My problem is that I want to compute some specific area inside of graph of time series.
This is the graph :
an this is the code how I generate my time series :
x1<-rnorm(250,0.4,0.9)
x <- as.matrix(x1)
t <- ts(x[,1], start=c(1,1), frequency=30)
plot(t,xlim=c(2,4),main="Daily closing price of Walterenergie",ylab="Adjusted close Returns",xlab="Times",col="blue")
abline(0,0)
So my question is how to compute the area in green and then how to compute the area in red separately.
If you need the cothe for the graph :
###### Graph of the performance of my portfolio
num.points <- 250
x.vals <- 1:num.points
values <- x1
plot(x.vals, values, t="l", pch=20, main="Daily closing price", xlab="Times", ylab="Adjusted close Returns", las=1)
abline(h=0, col="darkgray", lwd=2)
crossings <- values[-length(values)] * values[-1]
crossings <- which(crossings < 0)
# You can draw the points to check (uncomment following line)
# points(x.vals[crossings], values[crossings], col="red", pch="X")
intersections <- NULL
for (cr in crossings)
{
new.int <- cr + abs(values[cr])/(abs(values[cr])+abs(values[cr+1]))
intersections <- c(intersections, new.int)
}
# Again, let's check the intersections
#points(intersections, rep(0, length(intersections)), pch=20, col="red", cex=0.7)
last.intersection <- 0
for (i in intersections)
{
ids <- which(x.vals<=i & x.vals>last.intersection)
poly.x <- c(last.intersection, x.vals[ids], i)
poly.y <- c(0, values[ids], 0)
if (max(poly.y) > 0)
{
col="green"
}
else
{
col="red"
}
polygon(x=poly.x, y=poly.y, col=col)
last.intersection <- i
}
Related
This question, probably has a simple solution but I cannot think of how to do it...
So I have a script as follows:
# ------------------ MODEL SETUP ----------------------------------------# simulation length
t_max <- 50
# arena
arena_x <- 100
arena_y <- 100
# plant parameters
a <- 0.1
b <- 0.1
g <- 1
# list of plant locations and initial sizes
nplants <-dim(plantLocsX)[1]*dim(plantLocsX)[2]
iterations<-5
totalBiomass<-matrix(0,nrow=iterations,ncol=1)
# starting loop
sep <- 10
# Original matrix
plantLocsX <- matrix(rep(seq(0,arena_x,sep), arena_y/sep),
nrow=1+arena_x/sep,
ncol=1+arena_y/sep)
plantLocsY <- t(plantLocsX)
plantSizes <- matrix(1,nrow=nplants,ncol=1)
# Plot the plants
radius <- sqrt( plantSizes/ pi )
symbols(plantLocsX, plantLocsY, radius, xlim = c(0,100), ylim=c(0,100), inches=0.05, fg = "green",
xlab = "x domain (m)", ylab = "y domain (m)", main = "Random Plant Locations", col.main = 51)
# Calculate distances between EACH POSSIBLE PAIR of plants
distances <- matrix(0,nrow=nplants,ncol=nplants)
for (i in 1:nplants){
for (j in 1:nplants){
distances[i,j] <- sqrt( (plantLocsX[i]-plantLocsX[j])^2 + (plantLocsY[i]-plantLocsY[j])^2 )
}
}
# ------------------ MODEL RUNNING ---------------------------------------
I need to alter the arrays containing plant locations and plant sizes so that at each time step, entries are removed and added (simulating mortality/reproduction, respectively). The "distances" must be updated with plant locations and sizes after each iteration...I can only think of complex ways to do this: destructing and constructing new matrices at each time step to fit the new number of elements but there must be functions to make this simpler....any advice?
Many thanks!!
I'm doing some optimization in R and in connection with that I need to write a function that returns a jacobian. It's a very simple jacobian -- just zeros and ones -- but I'd like to populate it quickly and cleanly. My current code works but is very sloppy.
I have a four-dimensional array of probabilities. Index the dimensions by i, j, k, l. My constraint is that, for each i, j, k, the sum of probabilities over index l must equal 1.
I compute my constraint vector like this:
get_prob_array_from_vector <- function(prob_vector, array_dim) {
return(array(prob_vector, array_dim))
}
constraint_function <- function(prob_vector, array_dim) {
prob_array <- get_prob_array_from_vector(prob_vector, array_dim)
prob_array_sums <- apply(prob_array, MARGIN=c(1, 2, 3), FUN=sum)
return(as.vector(prob_array_sums) - 1) # Should equal zero
}
My question is: what is a clean, fast way of computing the jacobian of as.vector(apply(array(my_input_vector, array_dim), MARGIN=c(1, 2, 3), FUN=sum)) -- i.e., my constraint_function in the code above -- with respect to my_input_vector?
Here is my sloppy solution (which I check for correctness against the jacobian function from the numDeriv package):
library(numDeriv)
array_dim <- c(5, 4, 3, 3)
get_prob_array_from_vector <- function(prob_vector, array_dim) {
return(array(prob_vector, array_dim))
}
constraint_function <- function(prob_vector, array_dim) {
prob_array <- get_prob_array_from_vector(prob_vector, array_dim)
prob_array_sums <- apply(prob_array, MARGIN=c(1, 2, 3), FUN=sum)
return(as.vector(prob_array_sums) - 1)
}
constraint_function_jacobian <- function(prob_vector, array_dim) {
prob_array <- get_prob_array_from_vector(prob_vector, array_dim)
jacobian <- matrix(0, Reduce("*", dim(prob_array)[1:3]), length(prob_vector))
## Must be a faster, clearner way of populating jacobian
for(i in seq_along(prob_vector)) {
dummy_vector <- rep(0, length(prob_vector))
dummy_vector[i] <- 1
dummy_array <- get_prob_array_from_vector(dummy_vector, array_dim)
dummy_array_sums <- apply(dummy_array, MARGIN=c(1, 2, 3), FUN=sum)
jacobian_row_idx <- which(dummy_array_sums != 0, arr.ind=FALSE)
stopifnot(length(jacobian_row_idx) == 1)
jacobian[jacobian_row_idx, i] <- 1
} # Is there a fast, readable one-liner that does the same as this for loop?
stopifnot(sum(jacobian) == length(prob_vector))
stopifnot(all(jacobian == 0 | jacobian == 1))
return(jacobian)
}
## Example of a probability array satisfying my constraint
my_prob_array <- array(0, array_dim)
for(i in seq_len(array_dim[1])) {
for(j in seq_len(array_dim[2])) {
my_prob_array[i, j, , ] <- diag(array_dim[3])
}
}
my_prob_array[1, 1, , ] <- 1 / array_dim[3]
my_prob_array[2, 1, , ] <- 0.25 * (1 / array_dim[3]) + 0.75 * diag(array_dim[3])
my_prob_vector <- as.vector(my_prob_array) # Flattened representation of my_prob_array
should_be_zero_vector <- constraint_function(my_prob_vector, array_dim)
is.vector(should_be_zero_vector)
all(should_be_zero_vector == 0) # Constraint is satistied
## Check constraint_function_jacobian for correctness using numDeriv
jacobian_analytical <- constraint_function_jacobian(my_prob_vector, array_dim)
jacobian_numerical <- jacobian(constraint_function, my_prob_vector, array_dim=array_dim)
max(abs(jacobian_analytical - jacobian_numerical)) # Very small
My functions take prob_vector as input -- i.e., a flattened representation of my probability array -- because optimization functions require vector arguments.
Spend some time to understand what you were trying to do, but here is a proposition to replace your constraint_function_jacobian:
enhanced <- function(prob_vector, array_dim) {
firstdim <- Reduce("*", array_dim[1:3])
seconddim <- length(prob_vector)
jacobian <- matrix(0, firstdim, seconddim)
idxs <- split(1:seconddim, cut(1:seconddim, array_dim[4], labels=FALSE))
for (i in seq_along(idxs)) {
diag(jacobian[, idxs[[i]] ]) <- 1
}
stopifnot(sum(jacobian) == length(prob_vector))
stopifnot(all(jacobian == 0 | jacobian == 1))
jacobian
}
Unless I'm wrong, the jacobian construction is filling diagonals with 1, as it is not a square matrix we have to split it on array_dim[4] square matrix to fill up their diagonals with 1.
I did get rid of the transformation of prob_vector to an array to then get its dim as it will be the same as array_dim, skipping this step is not a huge improvement but it simplify the code IMO.
Results are ok according to test:
identical(constraint_function_jacobian(my_prob_vector, array_dim),
enhanced(my_prob_vector, array_dim))
# [1] TRUE
According to benchmark it gives a great speedup:
microbenchmark::microbenchmark(
original=constraint_function_jacobian(my_prob_vector, array_dim),
enhanced=enhanced(my_prob_vector, array_dim), times=100)
# Unit: microseconds
# expr min lq mean median uq max neval cld
# original 16946.979 18466.491 20150.304 19066.7410 19671.4100 28148.035 100 b
# enhanced 678.222 737.948 799.005 796.3905 834.5925 1141.773 100 a
I use a netCDF file which stores one variable and has following dimensions: lon, lat, time.
Generally speaking I wish to compare it against different data that I have already in R stored as dataframe - first two columns are coordinates in WGS84, and next are values for specific time.
So I wrote following code.
# since # ncFile$dim$time$units say: [1] "days since 1900-1-1"
daysFromDate <- function(data1, data2="1900-01-01")
{
round(as.numeric(difftime(data1,data2,units = "days")))
}
#study area:
lon <- c(40.25, 48)
lat <- c(16, 24.25)
myTime <- c(daysFromDate("2008-01-16"), daysFromDate("2011-12-31"))
varName <- "spei"
require(ncdf4)
require(RCurl)
x <- getBinaryURL("http://digital.csic.es/bitstream/10261/104742/3/SPEI_01.nc")
ncFile <- nc_open(x)
LonIdx <- which( ncFile$dim$lon$vals >= lon[1] | ncFile$dim$lon$vals <= lon[2])
LatIdx <- which( ncFile$dim$lat$vals >= lat[1] & ncFile$dim$lat$vals <= lat[2])
TimeIdx <- which( ncFile$dim$time$vals >= myTime[1] & ncFile$dim$time$vals <= myTime[2])
MyVariable <- ncvar_get( ncFile, varName)[ LonIdx, LatIdx, TimeIdx]
I thought that data frame will be returned so that I will be able to easily manipulate data (in example - check correlation or create a plot).
Unfortunately 3-dimensional list has been returned instead.
How can I reformat this to data frame with following columns X-Y-Time1-Time2-...
So, example data will looks as follows
X Y 2014-01-01 2014-01-02 2014-01-02
50 17 0.5 0.4 0.3
where 0.5, 0.4 and 0.3 are example variable values
Or maybe there is different solution?
Ok, try following code, but it assumes that ranges are dense filled. And I changed lon test from or to and
require(ncdf4)
nc <- nc_open("SPEI_01.nc")
print(nc)
lon <- ncvar_get(nc, "lon")
lat <- ncvar_get(nc, "lat")
time <- ncvar_get(nc, "time")
lonIdx <- which( lon >= 40.25 & lon <= 48.00)
latIdx <- which( lat >= 16.00 & lat <= 24.25)
myTime <- c(daysFromDate("2008-01-16"), daysFromDate("2011-12-31"))
timeIdx <- which(time >= myTime[1] & time <= myTime[2])
data <- ncvar_get(nc, "spei")[lonIdx, latIdx, timeIdx]
indices <- expand.grid(lon[lonIdx], lat[latIdx], time[timeIdx])
print(length(indices))
class(indices)
summary(indices)
str(indices)
df <- data.frame(cbind(indices, as.vector(data)))
summary(df)
str(df)
UPDATE
ok, looks like I got the idea what do you want, but have do direct solution. What I've got so far is this: split data frame using either split() function or data.table package. After splitting by X&Y, you'll get lists of small data frames where X&Y are a constant for a given frame. Probably is it possible to transpose and recombine them back, but I have no idea how. It might be a good idea to continue to work with data as columns, Lists are nested, could be flattened, and here is link for splitting in R: http://www.uni-kiel.de/psychologie/rexrepos/posts/dfSplitMerge.html
Code, as continued from previous example
require(data.table)
colnames(df) <- c("X","Y","Time","spei")
df$Time <- as.Date(df$Time, origin="1900-01-01")
dt <- as.data.table(df)
summary(dt)
# Taken from https://github.com/Rdatatable/data.table/issues/1389
# x data.table
# f use `by` argument instead - unlike data.frame
# drop logical default FALSE will include `by` columns in resulting data.tables - unlike data.frame
# by character column names on which split into lists
# flatten logical default FALSE will result in recursive nested list having data.table as leafs
# ... ignored
split.data.table <- function(x, f, drop = FALSE, by, flatten = FALSE, ...){
if(missing(by) && !missing(f)) by = f
stopifnot(!missing(by), is.character(by), is.logical(drop), is.logical(flatten), !".ll" %in% names(x), by %in% names(x), !"nm" %in% by)
if(!flatten){
.by = by[1L]
tmp = x[, list(.ll=list(.SD)), by = .by, .SDcols = if(drop) setdiff(names(x), .by) else names(x)]
setattr(ll <- tmp$.ll, "names", tmp[[.by]])
if(length(by) > 1L) return(lapply(ll, split.data.table, drop = drop, by = by[-1L])) else return(ll)
} else {
tmp = x[, list(.ll=list(.SD)), by=by, .SDcols = if(drop) setdiff(names(x), by) else names(x)]
setattr(ll <- tmp$.ll, 'names', tmp[, .(nm = paste(.SD, collapse = ".")), by = by, .SDcols = by]$nm)
return(ll)
}
}
# here is data.table split
q <- split.data.table(dt, by = c("X","Y"), drop=FALSE)
str(q)
# here is data frame split
qq <- split(df, list(df$X, df$Y))
str(qq)
I have 100 points representing the boundary of a sphere of radius .1 and center (.5,.5,.5). I would like to represent this sphere in an array of points on a 3-D gird. The values of the array would be binary. 1 to represent inside the sphere and 0 to represent outside the sphere. The representation would be upon the unit cube.
I forsee the array looking something like this. For an array 100 by 100 by 100. The (1,1,1) value would be 0. The (50,50,50) value would be 1, since this grid point is inside the sphere.
Here is the code that creates and plots the 100 (or more) points. These points all lay on the boundary of the sphere.
library(scatterplot3d)
n <- 100
r <- rep(.1,n)
theta <- runif(n,0,pi)
phi <- runif(n,0,2*pi)
x <- r*sin(theta)*cos(phi)+.5
y <- r*sin(theta)*sin(phi)+.5
z <- r*cos(theta)+.5
graphic <- scatterplot3d(x,y,z,xlim=c(0,1),ylim=c(0,1),zlim=c(0,1))
Maybe this helps you on your way. I'm guessing that visualization is no the main objective here, but I've included a series of image plots to show the sections of the 3d array.
Example:
n=25
cen <- c(0.5,0.5,0.5)
rad <- 1
xs <- seq(cen[1]-rad,cen[1]+rad,,n)
ys <- seq(cen[2]-rad,cen[2]+rad,,n)
zs <- seq(cen[3]-rad,cen[3]+rad,,n)
grd <- expand.grid(x=xs, y=ys, z=zs)
a <- array(0, dim=c(n,n,n))
for(i in seq(a)){
a[i] <- as.numeric(dist(rbind(grd[i,], cen)) <= rad)
}
png("sections.png", units="in", width=10, height=4, res=400)
op <- par(mfrow=c(1,n), mar=rep(0.1,4))
for(i in seq(n)){
image(x=xs, y=ys, z=a[,,i], col=c("white", "black"), axes=FALSE, xlab="", ylab="")
abline(h=xs, col=8, lwd=0.2)
abline(v=ys, col=8, lwd=0.2)
#box()
}
par(op)
dev.off()
n <- 3
x <- 1:n
y <- 1:n
z <- 1:n
grid <- expand.grid(x,y,z)
vec <- ((grid[,1]-rep(n/2+.5,n^3))^2 +
(grid[,2]-rep(n/2+.5,n^3))^2 +
(grid[,3]-rep(n/2+.5,n^3))^2)^.5
a <- array(round(vec,3),dim=c(n,n,n)) #in array (pixel) scale
a.metric <- a*(1/n) #in "metric" scale
a
a.metric
STACK <- array(as.numeric(a.metric <=.1),dim=c(n,n,n))
STACK
SimNo <- 10
for (i in 1:SimNo){
z1<-rnorm(1000,0,1)
z2<-rnorm(1000,0,1)
z3<-rnorm(1000,0,1)
z4<-rnorm(1000,0,1)
z5<-rnorm(1000,0,1)
z6<-rnorm(1000,0,1)
X<-cbind(z1,z2,z3,z4,z5,z6)
sx<-scale(X)/sqrt(999)
det1<-det(t(sx)%*%sx)
detans<-do.call(rbind,lapply(1:SimNo, function(x) ifelse(det1<1,det1,0)))
}
when I run all commands with in loop except last one I get different values of determinant but when I run code with loops at once I get last value of determinant repeated for all.
Please help and guide to control all situation like this.
Is there way to have short and efficient way for this code, so that each individual variable can also be accessed.
Whenever you are repeating the same operation multiple times, and without inputs, think about using replicate. Here you can use it twice:
SimNo <- 10
det1 <- replicate(SimNo, {
X <- replicate(6, rnorm(1000, 0, 1))
sx <- scale(X) / sqrt(999)
det(t(sx) %*% sx)
})
detans <- ifelse(det1 < 1, det1, 0)
Otherwise, this is what your code should have looked with your for loop. You needed to create a vector for storing your outputs at each loop iteration:
SimNo <- 10
detans <- numeric(SimNo)
for (i in 1:SimNo) {
z1<-rnorm(1000,0,1)
z2<-rnorm(1000,0,1)
z3<-rnorm(1000,0,1)
z4<-rnorm(1000,0,1)
z5<-rnorm(1000,0,1)
z6<-rnorm(1000,0,1)
X<-cbind(z1,z2,z3,z4,z5,z6)
sx<-scale(X)/sqrt(999)
det1<-det(t(sx)%*%sx)
detans[i] <- ifelse(det1<1,det1,0)
}
Edit: you asked in the comments how to access X using replicate. You would have to make replicate create and store all your X matrices in a list. Then use the *apply family of functions to loop throughout that list to finish the computations:
X <- replicate(SimNo, replicate(6, rnorm(1000, 0, 1)), simplify = FALSE)
det1 <- sapply(X, function(x) {
sx <- scale(x) / sqrt(999)
det(t(sx) %*% sx)
})
detans <- ifelse(det1 < 1, det1, 0)
Here, X is now a list of matrices, so you can get e.g. the matrix for the second simulation by doing X[[2]].
SimNo <- 10
matdet <- matrix(data=NA, nrow=SimNo, ncol=1, byrow=TRUE)
for (i in 1:SimNo){
z1<-rnorm(1000,0,1)
z2<-rnorm(1000,0,1)
z3<-rnorm(1000,0,1)
z4<-rnorm(1000,0,1)
z5<-rnorm(1000,0,1)
z6<-rnorm(1000,0,1)
X<-cbind(z1,z2,z3,z4,z5,z6)
sx<-scale(X)/sqrt(999)
det1<-det(t(sx)%*%sx)
matdet[i] <-do.call(rbind,lapply(1:SimNo, function(x) ifelse(det1<1,det1,0)))
}
matdet