I am trying to implement the Metropolis-Hastings algorithm for a simple linear regression in C (without use of other libraries (boost, Eigen etc.) and without two-dimensional arrays)*. For better testing of the code/evaluation of the trace plots, I have rewritten the code for R (see below) by keeping as much of the C-code as possible.
Unfortunately, the chains don't converge. I am wondering if
there is a mistake in the implementation itself?
"just" a bad choice of proposal distributions?
Assuming the latter, I am thinking about how to find good parameters of proposal distributions (currently I have picked arbitrary values) so that the algorithm works. Even with three parameters as in this case, it is quite hard to find suitable parameters. How does one normally handle this problem if say Gibbs sampling is not an alternative?
*I want to use this code for Cuda
#### posterior distribution
logPostDensity <- function(x, y, a, b, s2, N)
{
sumSqError = 0.0
for(i in 1:N)
{
sumSqError = sumSqError + (y[i] - (a + b*x[i]))^2
}
return(((-(N/2)+1) * log(s2)) + ((-0.5/s2) * sumSqError))
}
# x = x values
# y = actual datapoints
# N = sample size
# m = length of chain
# sigmaProp = uniform proposal for sigma squared
# paramAProp = uniform proposal for intercept
# paramBProp = uniform proposal for slope
mcmcSampling <- function(x,y,N,m,sigmaProp,paramAProp,paramBProp)
{
paramsA = vector("numeric",length=m) # intercept
paramsB = vector("numeric",length=m) # slope
s2 = vector("numeric",length=m) # sigma squared
paramsA[1] = 0
paramsB[1] = 0
s2[1] = 1
for(i in 2:m)
{
paramsA[i] = paramsA[i-1] + runif(1,-paramAProp,paramAProp)
if((logPostDensity(x,y,paramsA[i],paramsB[i],s2[i-1],N)
- logPostDensity(x,y,paramsA[i-1],paramsB[i-1],s2[i-1],N))
< log(runif(1)))
{
paramsA[i] = paramsA[i-1]
}
paramsB[i] = paramsB[i-1] + runif(1,-paramBProp,paramBProp)
if((logPostDensity(x,y,paramsA[i],paramsB[i],s2[i-1],N)
- logPostDensity(x,y,paramsA[i-1],paramsB[i-1],s2[i-1],N))
< log(runif(1)))
{
paramsB[i] = paramsB[i-1]
}
s2[i] = s2[i-1] + runif(1,-sigmaProp,sigmaProp)
if((s2[i] < 0) || (logPostDensity(x,y,paramsA[i],paramsB[i],s2[i],N)
- logPostDensity(x,y,paramsA[i],paramsB[i],s2[i-1],N))
< log(runif(1)))
{
s2[i] = s2[i-1]
}
}
res = data.frame(paramsA,paramsB,s2)
return(res)
}
#########################################
set.seed(321)
x <- runif(100)
y <- 2 + 5*x + rnorm(100)
summary(lm(y~x))
df <- mcmcSampling(x,y,10,5000,0.05,0.05,0.05)
par(mfrow=c(3,1))
plot(df$paramsA[3000:5000],type="l",main="intercept")
plot(df$paramsB[3000:5000],type="l",main="slope")
plot(df$s2[3000:5000],type="l",main="sigma")
There was one mistake in the intercept section (paramsA). Everything else was fine. I've implemented what Alexey suggested in his comments. Here's the solution:
pow <- function(x,y)
{
return(x^y)
}
#### posterior distribution
posteriorDistribution <- function(x, y, a, b,s2,N)
{
sumSqError <- 0.0
for(i in 1:N)
{
sumSqError <- sumSqError + pow(y[i] - (a + b*x[i]),2)
}
return((-((N/2)+1) * log(s2)) + ((-0.5/s2) * sumSqError))
}
# x <- x values
# y <- actual datapoints
# N <- sample size
# m <- length of chain
# sigmaProposalWidth <- width of uniform proposal dist for sigma squared
# paramAProposalWidth <- width of uniform proposal dist for intercept
# paramBProposalWidth <- width of uniform proposal dist for slope
mcmcSampling <- function(x,y,N,m,sigmaProposalWidth,paramAProposalWidth,paramBProposalWidth)
{
desiredAcc <- 0.44
paramsA <- vector("numeric",length=m) # intercept
paramsB <- vector("numeric",length=m) # slope
s2 <- vector("numeric",length=m) # sigma squared
paramsA[1] <- 0
paramsB[1] <- 0
s2[1] <- 1
accATot <- 0
accBTot <- 0
accS2Tot <- 0
for(i in 2:m)
{
paramsA[i] <- paramsA[i-1] + runif(1,-paramAProposalWidth,paramAProposalWidth)
accA <- 1
if((posteriorDistribution(x,y,paramsA[i],paramsB[i-1],s2[i-1],N) -
posteriorDistribution(x,y,paramsA[i-1],paramsB[i-1],s2[i-1],N)) < log(runif(1)))
{
paramsA[i] <- paramsA[i-1]
accA <- 0
}
accATot <- accATot + accA
paramsB[i] <- paramsB[i-1] + runif(1,-paramBProposalWidth,paramBProposalWidth)
accB <- 1
if((posteriorDistribution(x,y,paramsA[i],paramsB[i],s2[i-1],N) -
posteriorDistribution(x,y,paramsA[i-1],paramsB[i-1],s2[i-1],N)) < log(runif(1)))
{
paramsB[i] <- paramsB[i-1]
accB <- 0
}
accBTot <- accBTot + accB
s2[i] <- s2[i-1] + runif(1,-sigmaProposalWidth,sigmaProposalWidth)
accS2 <- 1
if((s2[i] < 0) || (posteriorDistribution(x,y,paramsA[i],paramsB[i],s2[i],N) -
posteriorDistribution(x,y,paramsA[i],paramsB[i],s2[i-1],N)) < log(runif(1)))
{
s2[i] <- s2[i-1]
accS2 <- 0
}
accS2Tot <- accS2Tot + accS2
if(i%%100==0)
{
paramAProposalWidth <- paramAProposalWidth * ((accATot/100)/desiredAcc)
paramBProposalWidth <- paramBProposalWidth * ((accBTot/100)/desiredAcc)
sigmaProposalWidth <- sigmaProposalWidth * ((accS2Tot/100)/desiredAcc)
accATot <- 0
accBTot <- 0
accS2Tot <- 0
}
}
res <- data.frame(paramsA,paramsB,s2)
return(res)
}
Related
I am having difficulty translating an algorithm from C to R. It's about Kolmogorov Smirnov test, and more specifically the KS probability function
In 'Numerical Recipes in C', 'probks', it's coded as
#include <math.h>
#define EPS1 0.001
#define EPS2 1.0e-8
float probks(float alam)
/*Kolmogorov-Smirnov probability function.*/
{
int j;
float a2,fac=2.0,sum=0.0,term,termbf=0.0;
a2 = -2.0*alam*alam;
for (j=1;j<=100;j++) {
term=fac*exp(a2*j*j);
sum += term;
if (fabs(term) <= EPS1*termbf || fabs(term) <= EPS2*sum) return sum;
fac = -fac; /*Alternating signs in sum.*/
termbf=fabs(term);
}
return 1.0; /* Get here only by failing to converge. */
}
I don't know how to handle the translation in R of the few last lines, all I have nowe is
PROBKS <- function(lambda) {
EPS1 <- 0.001; EPS2 <- 1.0e-8;
sum <- 0.0; fac <- 2.0; termbf <- 0.0;
a2 <- -2*lambda*lambda
for (j in 1:100) {
term <- fac * exp(a2*j*j)
sum <- sum + term
if ( (abs(term) <= EPS1*termbf) || (abs(term) <= EPS2*sum) ) {
break
} else {
fac <- -fac
}
}
termbf <- abs(term)
return(sum)
}
but this produces a non-monotonic probability function
where it should be $Q_KS(0) = 1$ and $Q_KS(\infty) = 0$.
Obviously, it's about how to interpret/encode the last 'if' statement.
Any help would be very appreciated. M
EDIT 1:
Here my session info
> sessionInfo()
R version 3.4.4 (2018-03-15)
Platform: i386-w64-mingw32/i386 (32-bit)
Running under: Windows >= 8 x64 (build 9200)
Matrix products: default
locale:
[1] LC_COLLATE=English_United Kingdom.1252
[2] LC_CTYPE=English_United Kingdom.1252
[3] LC_MONETARY=English_United Kingdom.1252
[4] LC_NUMERIC=C
[5] LC_TIME=English_United Kingdom.1252
attached base packages:
[1] stats graphics grDevices utils datasets methods base
other attached packages:
[1] reshape2_1.4.3 forcats_0.3.0 stringr_1.3.1 dplyr_0.7.7
[5] purrr_0.2.5 readr_1.1.1 tidyr_0.8.1 tibble_1.4.2
[9] ggplot2_3.1.0 tidyverse_1.2.1
loaded via a namespace (and not attached):
[1] withr_2.1.2 rvest_0.3.2 tidyselect_0.2.5 lattice_0.20-35
[5] pkgconfig_2.0.2 xml2_1.2.0 compiler_3.4.4 readxl_1.1.0
[9] Rcpp_0.12.19 cli_1.0.1 plyr_1.8.4 cellranger_1.1.0
[13] httr_1.3.1 tools_3.4.4 nlme_3.1-131.1 broom_0.5.0
[17] R6_2.3.0 bindrcpp_0.2.2 bindr_0.1.1 scales_1.0.0
[21] assertthat_0.2.0 gtable_0.2.0 stringi_1.1.7 rstudioapi_0.8
[25] backports_1.1.2 hms_0.4.2 munsell_0.5.0 grid_3.4.4
[29] colorspace_1.3-2 glue_1.3.0 lubridate_1.7.4 rlang_0.3.0.1
[33] magrittr_1.5 lazyeval_0.2.1 yaml_2.2.0 crayon_1.3.4
[37] haven_1.1.2 modelr_0.1.2 pillar_1.3.0 jsonlite_1.5
EDIT 2
Using Konrad's function ks_cdf and
x = seq(0, 1, by = 0.01)
plot(x, ks_cdf(x))
still gives 0 at 0
EDIT 3
After upgrading to 3.6.1
> sessionInfo()
R version 3.6.1 (2019-07-05)
Platform: x86_64-w64-mingw32/x64 (64-bit)
Running under: Windows >= 8 x64 (build 9200)
...
I still get the same plot as above, i.e. ks_cdf(0)=0 while it should be ks_sdf(0)=1
The code can be translated into R almost literally — it’s not clear why you diverged from the C code without reason. Here’s a literal, slightly cleaned up translation:
ks_cdf = function (lambda) {
EPS1 = 0.001
EPS2 = 1.0e-8
sum = 0
fac = 2
termbf = 0
a2 = -2 * lambda ^ 2
for (j in 1 : 100) {
term = fac * exp(a2 * j ^ 2)
sum = sum + term
if ((abs(term) <= EPS1 * termbf) || (abs(term) <= EPS2 * sum)) {
return(sum)
} else {
fac = -fac
termbf = abs(term)
}
}
1 # Failed to converge.
}
This code works but isn’t vectorised, which is something I’d change for a real implementation (but, by doing so, we’d lose the early exit).
Here’s an idiomatic R implementation using vectorised arithmetic and matrix multiplication:
ks_cdf = function (λ) {
eps1 = 0.001
eps2 = 1E-8
range = seq(1, 100)
terms = (-1) ^ (range - 1) * exp(-2 * range ^ 2 %*% t(λ ^ 2))
sums = 2 * colSums(terms)
pterms = abs(terms)
prev_pterms = rbind(0, pterms[-nrow(pterms), , drop = FALSE])
converged = apply(pterms <= eps1 * prev_pterms | pterms <= eps2 * sums, 2L, any)
sums[! converged] = 1
sums
}
And to show how nicely it vectorises, and that this is in fact a big deal:
x = seq(0, 1, by = 0.01)
plot(x, ks_cdf(x))
I have the following objects:
A: 1 array with x,y,z, dimensions -> containing a variable (Temperature)
B & C: 2 arrays with x,y dimensions -> containing the indices of vectors along A's z dimension
A <- array(rnorm(n = 12*4*3*5), dim = c(4,3,5))
dimnames(A) <- list("x" = c(1:4), "y" = c(1:3), "z" = c(1:5))
B <- matrix(rep(c(2:1), 6), nrow = 4)
dimnames(B) <- list("x" = c(1:4), "y" = c(1:3))
C <- matrix(rep(c(4:5), 6), nrow = 4)
dimnames(C) <- list("x" = c(1:4), "y" = c(1:3))
I'm looking for a way to apply sum of A across the z dimension only between the indices indicated by B and C.
If instead of the 3D-array I had a vector I would solve it like this:
> A <- round(c(rnorm(5)), 1)
> B <- 2 #index of first value to sum
> C <- 4 #index of last value to sum
> vindex <- seq(B,C,1)
> A
[1] 0.0 -0.9 -1.1 -1.7 -0.4
> vindex
[1] 2 3 4
> sum(A[vindex])
[1] -3.7
>
# or better with a function
> foo <- function(x, start_idx, end_idx) {
+ vidx <- seq(start_idx, end_idx, 1)
+ return(sum(x[vidx]))
+ }
>
> foo(A,B,C)
[1] -3.7
Unfortunately seq() does not accept vectors as arguments and therefore it's not straightforward to use the apply function. If again were A[x,y,z] and B and C[x,y]:
> apply(A,c(1,2),foo,B,C)
Error in seq.default(start_idx, end_idx, 1) : 'from' must be of length 1
Called from: seq.default(start_idx, end_idx, 1)
It would be great if anybody knew how to make this function workable with apply or with other clean solutions.
Thanks a lot!
This is not a very nice task for base R, and I would prefer to implement it in C++ in the absence of a package that already does so (?).
Logically speaking, a plain but vectorized solution to your problem could be structured as:
# initialize index array
D <- array(
1,
dim = c(4,3,5),
dimnames = list(x = letters[1:4], y = letters[1:3], z = letters[1:5])
)
# set indices out of bounds to zero
E <- rep(1:5, each = 4*3)
BB <- rep(B, times = 5)
D[E < BB] <- 0
CC <- rep(C, times = 5)
D[E > CC] <- 0
# multiply with index array and sum
apply(A * D, c(1,2), sum)
I'm doing some optimization in R and in connection with that I need to write a function that returns a jacobian. It's a very simple jacobian -- just zeros and ones -- but I'd like to populate it quickly and cleanly. My current code works but is very sloppy.
I have a four-dimensional array of probabilities. Index the dimensions by i, j, k, l. My constraint is that, for each i, j, k, the sum of probabilities over index l must equal 1.
I compute my constraint vector like this:
get_prob_array_from_vector <- function(prob_vector, array_dim) {
return(array(prob_vector, array_dim))
}
constraint_function <- function(prob_vector, array_dim) {
prob_array <- get_prob_array_from_vector(prob_vector, array_dim)
prob_array_sums <- apply(prob_array, MARGIN=c(1, 2, 3), FUN=sum)
return(as.vector(prob_array_sums) - 1) # Should equal zero
}
My question is: what is a clean, fast way of computing the jacobian of as.vector(apply(array(my_input_vector, array_dim), MARGIN=c(1, 2, 3), FUN=sum)) -- i.e., my constraint_function in the code above -- with respect to my_input_vector?
Here is my sloppy solution (which I check for correctness against the jacobian function from the numDeriv package):
library(numDeriv)
array_dim <- c(5, 4, 3, 3)
get_prob_array_from_vector <- function(prob_vector, array_dim) {
return(array(prob_vector, array_dim))
}
constraint_function <- function(prob_vector, array_dim) {
prob_array <- get_prob_array_from_vector(prob_vector, array_dim)
prob_array_sums <- apply(prob_array, MARGIN=c(1, 2, 3), FUN=sum)
return(as.vector(prob_array_sums) - 1)
}
constraint_function_jacobian <- function(prob_vector, array_dim) {
prob_array <- get_prob_array_from_vector(prob_vector, array_dim)
jacobian <- matrix(0, Reduce("*", dim(prob_array)[1:3]), length(prob_vector))
## Must be a faster, clearner way of populating jacobian
for(i in seq_along(prob_vector)) {
dummy_vector <- rep(0, length(prob_vector))
dummy_vector[i] <- 1
dummy_array <- get_prob_array_from_vector(dummy_vector, array_dim)
dummy_array_sums <- apply(dummy_array, MARGIN=c(1, 2, 3), FUN=sum)
jacobian_row_idx <- which(dummy_array_sums != 0, arr.ind=FALSE)
stopifnot(length(jacobian_row_idx) == 1)
jacobian[jacobian_row_idx, i] <- 1
} # Is there a fast, readable one-liner that does the same as this for loop?
stopifnot(sum(jacobian) == length(prob_vector))
stopifnot(all(jacobian == 0 | jacobian == 1))
return(jacobian)
}
## Example of a probability array satisfying my constraint
my_prob_array <- array(0, array_dim)
for(i in seq_len(array_dim[1])) {
for(j in seq_len(array_dim[2])) {
my_prob_array[i, j, , ] <- diag(array_dim[3])
}
}
my_prob_array[1, 1, , ] <- 1 / array_dim[3]
my_prob_array[2, 1, , ] <- 0.25 * (1 / array_dim[3]) + 0.75 * diag(array_dim[3])
my_prob_vector <- as.vector(my_prob_array) # Flattened representation of my_prob_array
should_be_zero_vector <- constraint_function(my_prob_vector, array_dim)
is.vector(should_be_zero_vector)
all(should_be_zero_vector == 0) # Constraint is satistied
## Check constraint_function_jacobian for correctness using numDeriv
jacobian_analytical <- constraint_function_jacobian(my_prob_vector, array_dim)
jacobian_numerical <- jacobian(constraint_function, my_prob_vector, array_dim=array_dim)
max(abs(jacobian_analytical - jacobian_numerical)) # Very small
My functions take prob_vector as input -- i.e., a flattened representation of my probability array -- because optimization functions require vector arguments.
Spend some time to understand what you were trying to do, but here is a proposition to replace your constraint_function_jacobian:
enhanced <- function(prob_vector, array_dim) {
firstdim <- Reduce("*", array_dim[1:3])
seconddim <- length(prob_vector)
jacobian <- matrix(0, firstdim, seconddim)
idxs <- split(1:seconddim, cut(1:seconddim, array_dim[4], labels=FALSE))
for (i in seq_along(idxs)) {
diag(jacobian[, idxs[[i]] ]) <- 1
}
stopifnot(sum(jacobian) == length(prob_vector))
stopifnot(all(jacobian == 0 | jacobian == 1))
jacobian
}
Unless I'm wrong, the jacobian construction is filling diagonals with 1, as it is not a square matrix we have to split it on array_dim[4] square matrix to fill up their diagonals with 1.
I did get rid of the transformation of prob_vector to an array to then get its dim as it will be the same as array_dim, skipping this step is not a huge improvement but it simplify the code IMO.
Results are ok according to test:
identical(constraint_function_jacobian(my_prob_vector, array_dim),
enhanced(my_prob_vector, array_dim))
# [1] TRUE
According to benchmark it gives a great speedup:
microbenchmark::microbenchmark(
original=constraint_function_jacobian(my_prob_vector, array_dim),
enhanced=enhanced(my_prob_vector, array_dim), times=100)
# Unit: microseconds
# expr min lq mean median uq max neval cld
# original 16946.979 18466.491 20150.304 19066.7410 19671.4100 28148.035 100 b
# enhanced 678.222 737.948 799.005 796.3905 834.5925 1141.773 100 a
I have the following function which takes 4 vectors. The T vector has a given length and all 3 other vectors (pga, Sa5Hz and Sa1Hz) have a given (identical but not necessarily equal to T) lenght.
The output is a matrix with length(T) rows and length(pga) columns.
My code below seems like the perfect example of what NOT to do, however, I could not figure out a way to optimize it using an apply function. Can anyone help?
designSpectrum <- function (T, pga, Sa5Hz, Sa1Hz){
Ts <- Sa1Hz / Sa5Hz
#By convention, if Sa5Hz is null, set Ts as 0.
Ts[is.nan(Ts)] <- 0
res <- matrix(NA, nrow = length(T), ncol = length(pga))
for (i in 1:nrow(res))
{
for (j in 1:ncol(res))
{
res[i,j] <- if(T[i] <= 0) {pga[j]}
else if (T[i] <= 0.2 * Ts[j]) {pga[j] + T[i] * (Sa5Hz[j] - pga[j]) / (0.2 * Ts[j])}
else if (T[i] <= Ts[j]) {Sa5Hz[j]}
else Sa1Hz[j] / T[i]
}
}
return(res)
}
Instead of doing a double for loop and processing each i and j value separately, you could use the outer function to process all of them in one shot. Since you're now processing multiple i and j values simultaneously, you could switch to the vectorized ifelse statement instead of the non-vectorized if and else statements:
designSpectrum2 <- function (T, pga, Sa5Hz, Sa1Hz) {
Ts <- Sa1Hz / Sa5Hz
Ts[is.nan(Ts)] <- 0
outer(1:length(T), 1:length(pga), function(i, j) {
ifelse(T[i] <= 0, pga[j],
ifelse(T[i] <= 0.2 * Ts[j], pga[j] + T[i] * (Sa5Hz[j] - pga[j]) / (0.2 * Ts[j]),
ifelse(T[i] <= Ts[j], Sa5Hz[j], Sa1Hz[j] / T[i])))
})
}
identical(designSpectrum(T, pga, Sa5Hz, Sa1Hz), designSpectrum2(T, pga, Sa5Hz, Sa1Hz))
# [1] TRUE
Data:
T <- -1:3
pga <- 1:3
Sa5Hz <- 2:4
Sa1Hz <- 3:5
You can see the efficiency gains by testing on rather large vectors (here I'll use an output matrix with 1 million entries):
# Larger vectors
set.seed(144)
T2 <- runif(1000, -1, 3)
pga2 <- runif(1000, -1, 3)
Sa5Hz2 <- runif(1000, -1, 3)
Sa1Hz2 <- runif(1000, -1, 3)
# Runtime comparison
all.equal(designSpectrum(T2, pga2, Sa5Hz2, Sa1Hz2), designSpectrum2(T2, pga2, Sa5Hz2, Sa1Hz2))
# [1] TRUE
system.time(designSpectrum(T2, pga2, Sa5Hz2, Sa1Hz2))
# user system elapsed
# 4.038 1.011 5.042
system.time(designSpectrum2(T2, pga2, Sa5Hz2, Sa1Hz2))
# user system elapsed
# 0.517 0.138 0.652
The approach with outer is almost 10x faster.
I am aware of the ncdf package of R.
I am trying to plot wind speed and wind direction(4D) of WRFout netCDF file.
Variable as a function of (i,j,k,l).
New variables need to be created as the calculation shown in the code.
Looping through 4 for loops is taking too long. I understand advanced looping techniques such as plyr or tapply or mapply exist. But examples for these are only for 2D array/matrix.
Please suggest optimization package for 4D array.
library(ncdf)
ncin <- open.ncdf("wrfout.nc")
imax <- 425 #No of grids in Longitude
jmax <- 200 #No of grids in Latitude
kmax <- 40 #Vertical layers
lmax <- 11 #Time
paiv <- atan(1.0)/45.0
#However, UUin and VVin are in staggerd grid from input file
#dim(UUin) is (426,200,40,11)
#dim(VVin) is (425,201,40,11)
UUin <- get.var.ncdf(ncin,"U")
VVin <- get.var.ncdf(ncin,"V")
#Initialize arrays in normal grid
UU <- array(0.0, c(imax,jmax,kmax,lmax))
VV <- array(0.0, c(imax,jmax,kmax,lmax))
Wspd <- array(0.0,c(imax,jmax,kmax,lmax))
Wdir <- array(0.0,c(imax,jmax,kmax,lmax))
for (l in 1:lmax) {
for (k in 1:kmax) {
for (j in 1:jmax) {
for (i in 1:imax) {
#Change U and V staggerd grid to regular grid
UU[i,j,k,l] <- 0.5*(UUin[i,j,k,l]+ UUin[i+1,j,k,l])
VV[i,j,k,l] <- 0.5*(VVin[i,j,k,l]+ VVin[i,j+1,k,l])
#Now calculate wind speed and direction in regular grid
Wspd[i,j,k,l] <- sqrt(UU[i,j,k,l]*UU[i,j,k,l] + VV[i,j,k,l]* VV[i,j,k,l])
if (UU[i,j,k,l] == 0.0)
{ # startif
if(VV[i,j,k,l] < 0.0)
Wdir[i,j,k,l] = 0.0
else
Wdir[i,j,k,l] = 180.0
}
else
Wdir[i,j,k,l] = atan2(VV[i,j,k,l], UU[i,j,k,l]) / paiv
if(UU[i,j,k,l] < 0.0) {
Wdir[i,j,k,l] = 90.0 - Wdir[i,j,k,l]
}
else
Wdir[i,j,k,l] = 270.0 - Wdir[i,j,k,l]
if (Wdir[i,j,k,l] < 0.0)
{
Wdir[i,j,k,l] = Wdir[i,j,k,l] + 360.0
}
if (Wdir[i,j,k,l] > 360.0)
{
Wdir[i,j,k,l] = Wdir[i,j,k,l] - 360.0
}
} #end of i
} #end of j
} #end of k
} #end of l