I work in R using C libraries. I need to pass to a C function an array with numbers between 1 and 10 but that could also be "NA". Then in C, depending on the value I need to set the output.
Here's a simplified code
heredyn.load("ranking.so")
fun <- function(ranking) {
nrak <- length(ranking)
out <- .C("ranking", as.integer(nrak), as.character(ranking), rr = as.integer(vector("integer",nrak)))
out$rr
}
ranking <- sample(c(NA,seq(1,10)),10,replace=TRUE)
rr <- fun(ranking)
The C function could simply be such as
#include <R.h>
void ranking(int *nrak, char *ranking, int *rr) {
int i ;
for (i=0;i<*nrak;i++) {
if (ranking[i] == 'NA')
rr[i] = 1 ;
else
rr[i] = (int) strtol(&ranking[i],(char **)NULL,10) ;
}
}
Due to the "NA" value I set ranking as character but maybe there's another way to do that, using integer and without replacing "NA" to 0 before calling the function?
(The code like this, gives me always an array of zeros...)
Test for whether the value is an NA using R_NaInt, like
#include <R.h>
void ranking_c(int *nrak, int *ranking, int *rr) {
for (int i=0; i < *nrak; i++)
rr[i] = R_NaInt == ranking[i] ? -1 : ranking[i];
}
Invoke from R by explicitly allowing NAs
> x = c(1:2, NA_integer_)
> .C("ranking_c", length(x), as.integer(x), integer(length(x)), NAOK=TRUE)[[3]]
[1] 1 2 -1
Alternatively, use R's .Call() interface. Each R object is represented as an S-expression. There are C-level functions to manipulate S-expressions, e.g., length Rf_length(), data access INTEGER(), and allocation Rf_allocVector() of different types of S-expressions such as INTSXP for integer vectors.
R memory management uses a garbage collector that can run on any call that allocates memory. It is therefore best practice to PROTECT() any R allocation while in scope.
Your function will accept 0 or more S-expressions as input, and return a single S-expression; it might be implemented as
#include <Rinternals.h>
#include <R_ext/Arith.h>
SEXP ranking_call(SEXP ranking)
{
/* allocate space for result, PROTECTing from garbage collection */
SEXP result = PROTECT(Rf_allocVector(INTSXP, Rf_length(ranking)));
/* assign result */
for (int i = 0; i < Rf_length(ranking); ++i)
INTEGER(result)[i] =
R_NaInt == INTEGER(ranking)[i] ? -1 : INTEGER(ranking)[i];
UNPROTECT(1); /* no more need to protect */
return result;
}
And invoked from R with .Call("ranking_call", as.integer(ranking)).
Using .Call is more efficient than .C in terms of speed and memory allocation (.C may copy atomic vectors on the way in), but the primary reason to use it is for the flexibility it offers in terms of working directly with R's data structures. This is especially important when the return values are more complicated than atomic vectors.
You are attempting to address a couple of delicate and non-trivial points, least of all how to compile code with R, and to test for non-finite values.
You asked for help with C. I would like to suggest C++ -- which you do not need to use in a complicated way. Consider this short file with contains a function to process a vector along the lines you suggest (I just test for NA and then assign 42 as a marker for simplicit) or else square the value:
#include <Rcpp.h>
using namespace Rcpp;
// [[Rcpp::export]]
NumericVector foo(NumericVector x) {
unsigned int n = x.size();
for (unsigned int i=0; i<n; i++)
if (NumericVector::is_na(x[i]))
x[i] = 42.0;
else
x[i] = pow(x[i], 2);
return x;
}
/*** R
foo( c(1, 3, NA, NaN, 6) )
*/
If I save this on my box as /tmp/foo.cpp, in order compile, link, load and even run the embedded R use example, I only need one line to call sourceCpp():
R> Rcpp::sourceCpp("/tmp/foo.cpp")
R> foo( c(1, 3, NA, NaN, 6))
[1] 1 9 42 42 36
R>
We can do the same with integers:
// [[Rcpp::export]]
IntegerVector bar(IntegerVector x) {
unsigned int n = x.size();
for (unsigned int i=0; i<n; i++)
if (IntegerVector::is_na(x[i]))
x[i] = 42;
else
x[i] = pow(x[i], 2);
return x;
}
Related
I'm working on developing sorting algorithms and was surprised to find c's qsort taking 1.6x as long Julia's default sorting algorithm. I imagine I'm making some sort of benchmarking mistake. Here are my benchmarking programs and their results:
Julia:
# time (julia bench.jl)
using Printf
function main()
len = 100_000_000
x = rand(Int64, len)
t = #elapsed sort!(x)
#printf "%d elements:\nclaim\t%fs" len t
end
main()
c
// time (gcc -O3 bench.c && ./a.out)
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
int comp (const void * elem1, const void * elem2)
{
int f = *((int*)elem1);
int s = *((int*)elem2);
if (f > s) return 1;
if (f < s) return -1;
return 0;
}
long long utime()
{
struct timeval now_time;
gettimeofday(&now_time, NULL);
return now_time.tv_sec * 1000000LL + now_time.tv_usec;
}
int main(int argc, char* argv[])
{
long length = 100000000;
long long *x;
x = (long long *) malloc(length * sizeof(long long));
if (x == NULL)
{
printf("Malloc failed\n");
return 1;
}
for (long cnt = 0 ; cnt < length ; cnt++)
x[cnt] = rand();
long long start = utime();
qsort (x, length, sizeof(*x), comp);
long long end = utime();
//for (long cnt = 0 ; cnt < length ; cnt += length/10)
// printf("%lld\n", x[cnt]);
free(x);
printf ("%ld elements:\nclaim\t%fs", length, (end-start)/1000000.0);
return 0;
}
Results
bash-3.2$ time (julia bench.jl)
100000000 elements:
claim 12.405531s
real 0m16.560s
user 0m13.883s
sys 0m1.297s
bash-3.2$ time (gcc -O3 bench.c && ./a.out)
100000000 elements:
claim 20.592641s
real 0m24.604s
user 0m21.352s
sys 0m2.479s
Is it true that Julia's algorithm (median of 3 quicksort with an insertion sort base case for less than 20 elements) is substantially faster than c's qsort? Can I sort faster than qsort in c?
It's easy to sort faster than C's qsort. You could, for example, use C++'s std::sort. The C++ library is not faster because it uses a better algorithm; rather, it's because C++'s generics allow the compiler to avoid the overhead of calling the comparison function and a smaller overhead in qsort's swap, which needs to handle elements of arbitrary size.
In the following, the only difference between sortbench-c and sortbench-cc is the use of std::sort in the latter:
$ diff sortbench-c.c sortbench-cc.cc
1c1
< // time (gcc -O3 sortbench-c.c && ./a.out)
---
> // time (gcc -O3 sortbench-cc.cc && ./a.out)
2a3
> #include <algorithm>
7,14d7
< int comp (const void * elem1, const void * elem2)
< {
< int f = *((int*)elem1);
< int s = *((int*)elem2);
< if (f > s) return 1;
< if (f < s) return -1;
< return 0;
< }
38c31
< qsort (x, length, sizeof(*x), comp);
---
> std::sort(x, x+length);
The difference is dramatic:
$ time (gcc -O3 sortbench-c.c && ./a.out)
100000000 elements:
claim 16.673827s
real 0m17.774s
user 0m17.387s
sys 0m0.379s
$ time (gcc -O3 sortbench-cc.cc && ./a.out)
100000000 elements:
claim 9.948971s
real 0m11.133s
user 0m10.926s
sys 0m0.204s
There is no performance guarantee for qsort:
Despite the name, neither C nor POSIX standards require this function to be implemented using quicksort or make any complexity or stability guarantees.
To do a proper sorting benchmark between Julia and C, you will need another implementation.
The problem is that the rand functions are [probably] different.
Quicksort is data/order dependent. For example, mergesort will always execute in the same amount of time, regardless of what data it is sorting.
However, quicksort's time will vary depending upon the data.
To do a proper benchmark, do not use rand unless you write them yourself or guarantee that Julia's version and libc's version are exactly the same.
I'd write an initialization function for both langs. For example, the requisite for (i = 0; i < length; ++i) array[i] = length - i; or some such, so that the initial data is guaranteed to be the same.
You can use a random function if you have one program generate the array and save it to a file. The other program can then read in the [same] data.
Sometimes, I write a separate program that generates the input data, and saves it to a file. Then, I pass that file off to both programs. This decouples the test data generation from the programs under test.
I am currently working on a project where I would like to optimize some numerical computation in Python by calling C.
In short, I need to compute the value of y[i] = f(x[i]) for each element in an huge array x (typically has 10^9 entries or more). Here, x[i] is an integer between -10 and 10 and f is function that takes x[i] and returns a double. My issue is that f but it takes a very long time to evaluate in a way that is numerically stable.
To speed things up, I would like to just hard code all 2*10 + 1 possible values of f(x[i]) into constant array such as:
double table_of_values[] = {f(-10), ...., f(10)};
And then just evaluate f using a "lookup table" approach as follows:
for (i = 0; i < N; i++) {
y[i] = table_of_values[x[i] + 11]; //instead of y[i] = f(x[i])
}
Since I am not really well-versed at writing optimized code in C, I am wondering:
Specifically - since x is really large - I'm wondering if it's worth doing second-degree optimization when evaluating the loop (e.g. by sorting x beforehand, or by finding a smart way to deal with the negative indices (aside from just doing [x[i] + 10 + 1])?
Say x[i] were not between -10 and 10, but between -20 and 20. In this case, I could still use the same approach, but would need to hard code the lookup table manually. Is there a way to generate the look-up table dynamically in the code so that I make use of the same approach and allow for x[i] to belong to a variable range?
It's fairly easy to generate such a table with dynamic range values.
Here's a simple, single table method:
#include <malloc.h>
#define VARIABLE_USED(_sym) \
do { \
if (1) \
break; \
if (!! _sym) \
break; \
} while (0)
double *table_of_values;
int table_bias;
// use the smallest of these that can contain the values the x array may have
#if 0
typedef int xval_t;
#endif
#if 0
typedef short xval_t;
#endif
#if 1
typedef char xval_t;
#endif
#define XLEN (1 << 9)
xval_t *x;
// fslow -- your original function
double
fslow(int i)
{
return 1; // whatever
}
// ftablegen -- generate variable table
void
ftablegen(double (*f)(int),int lo,int hi)
{
int len;
table_bias = -lo;
len = hi - lo;
len += 1;
// NOTE: you can do free(table_of_values) when no longer needed
table_of_values = malloc(sizeof(double) * len);
for (int i = lo; i <= hi; ++i)
table_of_values[i + table_bias] = f(i);
}
// fcached -- retrieve cached table data
double
fcached(int i)
{
return table_of_values[i + table_bias];
}
// fripper -- access x and table arrays
void
fripper(xval_t *x)
{
double *tptr;
int bias;
double val;
// ensure these go into registers to prevent needless extra memory fetches
tptr = table_of_values;
bias = table_bias;
for (int i = 0; i < XLEN; ++i) {
val = tptr[x[i] + bias];
// do stuff with val
VARIABLE_USED(val);
}
}
int
main(void)
{
ftablegen(fslow,-10,10);
x = malloc(sizeof(xval_t) * XLEN);
fripper(x);
return 0;
}
Here's a slightly more complex way that allows many similar tables to be generated:
#include <malloc.h>
#define VARIABLE_USED(_sym) \
do { \
if (1) \
break; \
if (!! _sym) \
break; \
} while (0)
// use the smallest of these that can contain the values the x array may have
#if 0
typedef int xval_t;
#endif
#if 1
typedef short xval_t;
#endif
#if 0
typedef char xval_t;
#endif
#define XLEN (1 << 9)
xval_t *x;
struct table {
int tbl_lo; // lowest index
int tbl_hi; // highest index
int tbl_bias; // bias for index
double *tbl_data; // cached data
};
struct table ftable1;
struct table ftable2;
double
fslow(int i)
{
return 1; // whatever
}
double
f2(int i)
{
return 2; // whatever
}
// ftablegen -- generate variable table
void
ftablegen(double (*f)(int),int lo,int hi,struct table *tbl)
{
int len;
tbl->tbl_bias = -lo;
len = hi - lo;
len += 1;
// NOTE: you can do free tbl_data when no longer needed
tbl->tbl_data = malloc(sizeof(double) * len);
for (int i = lo; i <= hi; ++i)
tbl->tbl_data[i + tbl->tbl_bias] = fslow(i);
}
// fcached -- retrieve cached table data
double
fcached(struct table *tbl,int i)
{
return tbl->tbl_data[i + tbl->tbl_bias];
}
// fripper -- access x and table arrays
void
fripper(xval_t *x,struct table *tbl)
{
double *tptr;
int bias;
double val;
// ensure these go into registers to prevent needless extra memory fetches
tptr = tbl->tbl_data;
bias = tbl->tbl_bias;
for (int i = 0; i < XLEN; ++i) {
val = tptr[x[i] + bias];
// do stuff with val
VARIABLE_USED(val);
}
}
int
main(void)
{
x = malloc(sizeof(xval_t) * XLEN);
// NOTE: we could use 'char' for xval_t ...
ftablegen(fslow,-37,62,&ftable1);
fripper(x,&ftable1);
// ... but, this forces us to use a 'short' for xval_t
ftablegen(f2,-99,307,&ftable2);
return 0;
}
Notes:
fcached could/should be an inline function for speed. Notice that once the table is calculated once, fcached(x[i]) is quite fast. The index offset issue you mentioned [solved by the "bias"] is trivially small in calculation time.
While x may be a large array, the cached array for f() values is fairly small (e.g. -10 to 10). Even if it were (e.g.) -100 to 100, this is still about 200 elements. This small cached array will [probably] stay in the hardware memory cache, so access will remain quite fast.
Thus, sorting x to optimize H/W cache performance of the lookup table will have little to no [measurable] effect.
The access pattern to x is independent. You'll get best performance if you access x in a linear manner (e.g. for (i = 0; i < 999999999; ++i) x[i]). If you access it in a semi-random fashion, it will put a strain on the H/W cache logic and its ability to keep the needed/wanted x values "cache hot"
Even with linear access, because x is so large, by the time you get to the end, the first elements will have been evicted from the H/W cache (e.g. most CPU caches are on the order of a few megabytes)
However, if x only has values in a limited range, changing the type from int x[...] to short x[...] or even char x[...] cuts the size by a factor of 2x [or 4x]. And, that can have a measurable improvement on the performance.
Update: I've added an fripper function to show the fastest way [that I know of] to access the table and x arrays in a loop. I've also added a typedef named xval_t to allow the x array to consume less space (i.e. will have better H/W cache performance).
UPDATE #2:
Per your comments ...
fcached was coded [mostly] to illustrate simple/single access. But, it was not used in the final example.
The exact requirements for inline has varied over the years (e.g. was extern inline). Best use now: static inline. However, if using c++, it may be, yet again different. There are entire pages devoted to this. The reason is because of compilation in different .c files, what happens when optimization is on or off. Also, consider using a gcc extension. So, to force inline all the time:
__attribute__((__always_inline__)) static inline
fripper is the fastest because it avoids refetching globals table_of_values and table_bias on each loop iteration. In fripper, compiler optimizer will ensure they remain in registers. See my answer: Is accessing statically or dynamically allocated memory faster? as to why.
However, I coded an fripper variant that uses fcached and the disassembled code was the same [and optimal]. So, we can disregard that ... Or, can we? Sometimes, disassembling the code is a good cross check and the only way to know for sure. Just an extra item when creating fully optimized C code. There are many options one can give to the compiler regarding code generation, so sometimes it's just trial and error.
Because benchmarking is important, I threw in my routines for timestamping (FYI, [AFAIK] the underlying clock_gettime call is the basis for python's time.clock()).
So, here's the updated version:
#include <malloc.h>
#include <time.h>
typedef long long s64;
#define SUPER_INLINE \
__attribute__((__always_inline__)) static inline
#define VARIABLE_USED(_sym) \
do { \
if (1) \
break; \
if (!! _sym) \
break; \
} while (0)
#define TVSEC 1000000000LL // nanoseconds in a second
#define TVSECF 1e9 // nanoseconds in a second
// tvget -- get high resolution time of day
// RETURNS: absolute nanoseconds
s64
tvget(void)
{
struct timespec ts;
s64 nsec;
clock_gettime(CLOCK_REALTIME,&ts);
nsec = ts.tv_sec;
nsec *= TVSEC;
nsec += ts.tv_nsec;
return nsec;
)
// tvgetf -- get high resolution time of day
// RETURNS: fractional seconds
double
tvgetf(void)
{
struct timespec ts;
double sec;
clock_gettime(CLOCK_REALTIME,&ts);
sec = ts.tv_nsec;
sec /= TVSECF;
sec += ts.tv_sec;
return sec;
)
double *table_of_values;
int table_bias;
double *dummyptr;
// use the smallest of these that can contain the values the x array may have
#if 0
typedef int xval_t;
#endif
#if 0
typedef short xval_t;
#endif
#if 1
typedef char xval_t;
#endif
#define XLEN (1 << 9)
xval_t *x;
// fslow -- your original function
double
fslow(int i)
{
return 1; // whatever
}
// ftablegen -- generate variable table
void
ftablegen(double (*f)(int),int lo,int hi)
{
int len;
table_bias = -lo;
len = hi - lo;
len += 1;
// NOTE: you can do free(table_of_values) when no longer needed
table_of_values = malloc(sizeof(double) * len);
for (int i = lo; i <= hi; ++i)
table_of_values[i + table_bias] = f(i);
}
// fcached -- retrieve cached table data
SUPER_INLINE double
fcached(int i)
{
return table_of_values[i + table_bias];
}
// fripper_fcached -- access x and table arrays
void
fripper_fcached(xval_t *x)
{
double val;
double *dptr;
dptr = dummyptr;
for (int i = 0; i < XLEN; ++i) {
val = fcached(x[i]);
// do stuff with val
dptr[i] = val;
}
}
// fripper -- access x and table arrays
void
fripper(xval_t *x)
{
double *tptr;
int bias;
double val;
double *dptr;
// ensure these go into registers to prevent needless extra memory fetches
tptr = table_of_values;
bias = table_bias;
dptr = dummyptr;
for (int i = 0; i < XLEN; ++i) {
val = tptr[x[i] + bias];
// do stuff with val
dptr[i] = val;
}
}
int
main(void)
{
ftablegen(fslow,-10,10);
x = malloc(sizeof(xval_t) * XLEN);
dummyptr = malloc(sizeof(double) * XLEN);
fripper(x);
fripper_fcached(x);
return 0;
}
You can have negative indices in your arrays. (I am not sure if this is in the specifications.) If you have the following code:
int arr[] = {1, 2 ,3, 4, 5};
int* lookupTable = arr + 3;
printf("%i", lookupTable[-2]);
it will print out 2.
This works because arrays in c are defined as pointers. And if the pointer does not point to the begin of the array, you can access the item before the pointer.
Keep in mind though that if you have to malloc() the memory for arr you probably cannot use free(lookupTable) to free it.
I really think Craig Estey is on the right track for building your table in an automatic way. I just want to add a note for looking up the table.
If you know that you will run the code on a Haswell machine (with AVX2) you should make sure your code utilise VGATHERDPD which you can utilize with the _mm256_i32gather_pd intrinsic. If you do that, your table lookups will fly! (You can even detect avx2 on the fly with cpuid(), but that's another story)
EDIT:
Let me elaborate with some code:
#include <stdint.h>
#include <stdio.h>
#include <immintrin.h>
/* I'm not sure if you need the alignment */
double table[8] __attribute__((aligned(16)))= { 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8 };
int main()
{
int32_t i[4] = { 0,2,4,6 };
__m128i index = _mm_load_si128( (__m128i*) i );
__m256d result = _mm256_i32gather_pd( table, index, 8 );
double* f = (double*)&result;
printf("%f %f %f %f\n", f[0], f[1], f[2], f[3]);
return 0;
}
Compile and run:
$ gcc --std=gnu99 -mavx2 gathertest.c -o gathertest && ./gathertest
0.100000 0.300000 0.500000 0.700000
This is fast!
I am implementing a topological sort in Haskell with the requirement to be as efficient as possible. I have profiled my current solution and found out the the following method is taking 60% of total time (and 0 amount of additional space):
import Control.Monad.ST
import Control.Monad
import Data.Array.ST
import Data.Array.Unboxed
import Data.Word
import Data.Array.Base
zeroElementsAfterDecrement' :: (MArray a e m, Num e, Eq e) => a Int e -> [Int] -> m [Int]
zeroElementsAfterDecrement' arr is = foldr k (return []) is
where k i a = do xs <- a
decremented <- liftM (subtract 1) (unsafeRead arr i)
unsafeWrite arr i decremented
if decremented == 0 then return (i:xs) else return xs
largenum :: Int
largenum = 10000000
test = runST $ do arr <- newArray (1, largenum) 100 :: ST s (STUArray s Int Word32)
zeroElementsAfterDecrement' arr [1..largenum]
main = (putStrLn . show) test
The function takes an array (I use unboxed mutable arrays) and a list of indexes, decrements elements by these indexes and returns indexes of elements that became zero during this operation. Right now this is more than 10 times slower than the optimized C++ code but still pretty good compared to Python (or maybe I don't know Python way to optimize this). I understand there is an overhead from executing a monadic code, but maybe there are still ways to optimize I am not aware of?
Edit:
GHC: -O -fllvm: 0.54s
GHC (with unsafeWrite/unsafeRead and Word32): 0.34s
g++: 0.24s
g++ -O2: 0.05s
python3: 2.66s
Also when I change foldr to foldl' it starts allocating some memory and is 4 times slower as a result, why is that?
Here is a C++ version I compared it to:
#include <iostream>
#include <vector>
using namespace std;
#define LARGENUM 10000000
int main()
{
vector <int> arr;
for (int i = 0; i < LARGENUM; i++) {
arr.push_back(100);
}
for (int i = 0; i < arr.size(); i++) {
arr[i]--;
if (arr[i] == 0)
cout << i << endl;
}
return 0;
}
And a Python version:
arr = [100] * 10000000
for x in range (0, 10000000 - 1):
arr[x] = arr[x] - 1
if arr[x] == 0:
print x
How can you use some function written in C from R level using R data.
eg. to use function like:
double* addOneToVector(int n, const double* vector) {
double* ans = malloc(sizeof(double)*n);
for (int i = 0; i < n; ++i)
ans[i] = vector[i] + 1
return ans;
}
in the context:
x = 1:3
x = addOneToVector(x)
x # 2, 3, 4
I've searched stackoverflow first but I noticed there is no answer for that in here.
The general idea is (commands for linux, but same idea under other OS):
Create function that will only take pointers to basic types and do everything by side-effects (returns void). eg in a file called foo.c:
void addOneToVector(int* n, double* vector) {
for (int i = 0; i < *n; ++i)
vector[i] += 1.0;
}
Compile file C source as dynamic library, you can use R shortcut to do this:
$ R CMD SHLIB foo.c
This will then create a file called foo.so on Mac or foo.dll on Windows.
Load dynamic library from R
on Mac:
dyn.load("foo.so")
or on Windows:
dyn.load("foo.dll")
Call C functions using .C R function, IE:
x = 1:3
ret_val = .C("addOneToVector", n=length(x), vector=as.double(x))
It returns list from which you can get value of inputs after calling functions eg.
ret_val$x # 2, 3, 4
You can now wrap it to be able to use it from R easier.
There is a nice page describing whole process with more details here (also covering Fortran):
http://users.stat.umn.edu/~geyer/rc/
I just did the same thing in a very simple way using the Rcpp package. It allows you to write C++ functions directly in R.
library("Rcpp")
cppFunction("
NumericVector addOneToVector(NumericVector vector) {
int n = vector.size();
for (int i = 0; i < n; ++i)
vector[i] = vector[i] + 1.0;
return vector;
}")
Find more details here http://adv-r.had.co.nz/Rcpp.html. C++ functions can be done very fast with these instructions.
First off, I wanted to thank both #m0nhawk and #Jan for their immensely useful contributions to this problem.
I tried both methods on my MacBook: first the one showed m0nhawk which requires creating a function in C (without the main method) and then compiling using R CMD SHLIB <prog.c> and then invoking the function from R using the .C command
Here's a small C code I wrote (not a pro in C - just learning in bits and pieces)
Step 1: Write the C Program
#include <stdio.h>
int func_test() {
for(int i = 0; i < 5; i++) {
printf("The value of i is: %d\n", i);
}
return 0;
}
Step 2: Compile the program using
R CMD SHLIB func_test.c
This will produce a func_test.so file
Step 3: Now write the R Code that invokes this C function from within R Studio
dyn.load("/users/my_home_dir/xxx/ccode/ac.so")
.C("func_test")
Step 4: Output:
.C("func_test") The value of i is: 0 The value of i is: 1 The value of i is: 2 The value of i is: 3 The value of i is: 4 list()
Then I tried the direct method suggested by Jan - using the RCpp package
library("Rcpp")
cppFunction("
NumericVector addOneToVector(NumericVector vector) {
int n = vector.size();
for (int i = 0; i < n; ++i)
vector[i] = vector[i] + 1.0;
return vector;
}")
# Test code to test the function
addOneToVector(c(1,2,3))
Both methods worked superbly. I can now start writing functions in C or C++ and use them in R
Thank you once again!
EDIT
In the initial posting's code snippet (see below) I was not properly sending the struct to the device, this has been fixed, but the results are still the same. In my full code this mistake was not present. (There were two mistakes in that command in my initial posting -- one, the structure was being copied from HostToDevice, but was actually reversed, and the size of the copy was also wrong. Apologies; both errors were fixed, but the recompiled code still displays the zeros phenomena described below, as does my full code.)
EDIT 2
In the haste of my de-proprietarization rewrite of the code I made a couple errors which dalekchef kindly pointed out to me (the copy of the struct to the device was performed BEFORE the allocation on the device, in my rewritten code and the device cudaMalloc calls were not multiplied with the sizeof(...) the type of the array elements. I added these fixes, recompiled and retested, but it did not fix the problem. Also double checked my original code -- it did not have those mistakes. Apologies again, for the confusion.
I'm trying to dump statistics from a large simulations program. A similar pared down code is displayed below. Both codes exhibit the same problem -- they output zeroes, when they should be outputting averaged values.
#include "stdio.h"
struct __align__(8) DynamicVals
{
double a;
double b;
int n1;
int n2;
int perDump;
};
__device__ int *dev_arrN1, *dev_arrN2;
__device__ double *dev_arrA, *dev_arrB;
__device__ DynamicVals *dev_myVals;
__device__ int stepsA, stepsB;
__device__ double sumA, sumB;
__device__ int stepsN1, stepsN2;
__device__ int sumN1, sumN2;
__global__ void TEST
(int step, double dev_arrA[], double dev_arrB[],
int dev_arrN1[], int dev_arrN2[],DynamicVals *dev_myVals)
{
if (step % dev_myVals->perDump)
{
dev_arrN1[step/dev_myVals->perDump] = 0;
dev_arrN2[step/dev_myVals->perDump] = 0;
dev_arrA[step/dev_myVals->perDump] = 0.0;
dev_arrB[step/dev_myVals->perDump] = 0.0;
stepsA = 0;
stepsB = 0;
stepsN1 = 0;
stepsN2 = 0;
sumA = 0.0;
sumB = 0.0;
sumN1 = 0;
sumN2 = 0;
}
sumA += dev_myVals->a;
sumB += dev_myVals->b;
sumN1 += dev_myVals->n1;
sumN2 += dev_myVals->n2;
stepsA++;
stepsB++;
stepsN1++;
stepsN2++;
if ( sumA > 100000000 )
{
dev_arrA[step/dev_myVals->perDump] +=
sumA / stepsA;
sumA = 0.0;
stepsA = 0;
}
if ( sumB > 100000000 )
{
dev_arrB[step/dev_myVals->perDump] +=
sumB / stepsB;
sumB = 0.0;
stepsB = 0;
}
if ( sumN1 > 1000000 )
{
dev_arrN1[step/dev_myVals->perDump] +=
sumN1 / stepsN1;
sumN1 = 0;
stepsN1 = 0;
}
if ( sumN2 > 1000000 )
{
dev_arrN2[step/dev_myVals->perDump] +=
sumN2 / stepsN2;
sumN2 = 0;
stepsN2 = 0;
}
if ((step+1) % dev_myVals->perDump)
{
dev_arrA[step/dev_myVals->perDump] +=
sumA / stepsA;
dev_arrB[step/dev_myVals->perDump] +=
sumB / stepsB;
dev_arrN1[step/dev_myVals->perDump] +=
sumN1 / stepsN1;
dev_arrN2[step/dev_myVals->perDump] +=
sumN2 / stepsN2;
}
}
int main()
{
const int TOTAL_STEPS = 10000000;
DynamicVals vals;
int *arrN1, *arrN2;
double *arrA, *arrB;
int statCnt;
vals.perDump = TOTAL_STEPS/10;
statCnt = TOTAL_STEPS/vals.perDump+1;
vals.a = 30000.0;
vals.b = 60000.0;
vals.n1 = 10000;
vals.n2 = 20000;
cudaMalloc( (void**)&dev_arrA, statCnt*sizeof(double) );
cudaMalloc( (void**)&dev_arrB, statCnt*sizeof(double) );
cudaMalloc( (void**)&dev_arrN1, statCnt*sizeof(int) );
cudaMalloc( (void**)&dev_arrN2, statCnt*sizeof(int) );
cudaMalloc( (void**)&dev_myVals, sizeof(DynamicVals));
cudaMemcpy(dev_myVals, &vals, sizeof(DynamicVals),
cudaMemcpyHostToDevice);
arrA = (double *)malloc(statCnt * sizeof(double));
arrB = (double *)malloc(statCnt * sizeof(double));
arrN1 = (int *)malloc(statCnt * sizeof(int));
arrN2 = (int *)malloc(statCnt * sizeof(int));
for (int i=0; i< TOTAL_STEPS; i++)
TEST<<<1,1>>>(i, dev_arrA,dev_arrB,dev_arrN1,dev_arrN2,dev_myVals);
cudaMemcpy(arrA,dev_arrA,statCnt * sizeof(double),cudaMemcpyDeviceToHost);
cudaMemcpy(arrB,dev_arrB,statCnt * sizeof(double),cudaMemcpyDeviceToHost);
cudaMemcpy(arrN1,dev_arrN1,statCnt * sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(arrN2,dev_arrN2,statCnt * sizeof(int),cudaMemcpyDeviceToHost);
for (int i=0; i< statCnt; i++)
{
printf("Step: %d ; A=%g B=%g N1=%d N2=%d\n",
i*vals.perDump,
arrA[i], arrB[i], arrN1[i], arrN2[i]);
}
}
Output:
Step: 0 ; A=0 B=0 N1=0 N2=0
Step: 1000000 ; A=0 B=0 N1=0 N2=0
Step: 2000000 ; A=0 B=0 N1=0 N2=0
Step: 3000000 ; A=0 B=0 N1=0 N2=0
Step: 4000000 ; A=0 B=0 N1=0 N2=0
Step: 5000000 ; A=0 B=0 N1=0 N2=0
Step: 6000000 ; A=0 B=0 N1=0 N2=0
Step: 7000000 ; A=0 B=0 N1=0 N2=0
Step: 8000000 ; A=0 B=0 N1=0 N2=0
Step: 9000000 ; A=0 B=0 N1=0 N2=0
Step: 10000000 ; A=0 B=0 N1=0 N2=0
Now, if I were to use a small period for my dumps or if my #s were smaller, I could get away with just a direct
add
divide by period and the end of period
...algorithm, but I use temporary sums as otherwise my int would overflow (the double wouldn't overflow, but I was concerned about it losing precision).
If I use the above direct algorithm for smaller values I get correct non-zero values, but the second I use the intermediates (e.g. stepsA, sumA, etc.) the values go to zero.
I know I'm doing something silly here... what am I missing?
Notes:
A.) Yes, I know this code in its above form is not parallel and by itself does not warrant parallelization. It is part of a small statistics collecting portion of a much longer code. In that code it is encased in a thread index specific conditional logic to prevent clashing (making it parallel) and serves as data gathering to a simulations program (which warrants parallelization). Hopefully you can understand where the above code originates and avoid snide comments about its lack of thread-safety. (This disclaimer is added out of past experience receiving unproductive comments from people who didn't understand I was posting an excerpt, not a full code, despite me writing in less explicit terms as such.)
B.) Yes, I know the names of the variables are ambiguous. That is the point. The code I'm working on is proprietary, though it will eventually be open sourced. I only write this as I have posted similarly anonymized codes in the past and received rude commentary about my naming convention.
C.) Yes, I have read the CUDA manual several times, though I do make errors and I admit there's some features I don't understand. I'm not using shared memory here, but I am using shared memory (OF COURSE) in my full code.
D.) Yes, the above code does represent the exact same features as the data dumping portion of my non-working code, with the logic not related to this particular problem removed, and with it the thread safety conditional. The variable names have been changed, but algorithmically it should be unaltered and this is verified by the exact same non-working output (zeroes).
E.) I do realize the "dynamic" struct in the above snippet has non-dynamic values. I named the structure that because in the full code, this struct contains simulations data, and is dynamic. The static nature in the pared-down code should not make the statistics collecting code fail, it will simply mean that the average for each dump should be constant (and non-zero).
A couple of things:
It seems like you are calling cudaMemcpy for dev_MyVals before you are calling cudaMalloc for it. This is not how it should be.
ALSO: You do not multiply by sizeof int when you do your cudaMalloc calls.
You should really check all of your CUDA calls cudaMalloc/cudaMemcpy for an error code. They should all return an error or CUDA_SUCCESS. I believe the CUDA examples all show how to do this.
Also, for future reference NEVER use the modulo operator in CUDA it is incredibly slow. Just Google for "Modulo CUDA" for some alternatives.
Let me know how it goes, this will probably take a couple of iterations to fix.
The biggest problem I see here is one of scope. The way this code is written leads me to conclude that you might not understand how variable scoping in C++ works in general, and how device and host code scope works in CUDA in particular. A couple of observations:
When you do this type of thing in code:
__device__ double *dev_arrA, *dev_arrB;
__global__ void TEST(int step, double dev_arrA[], double dev_arrB[], ....)
you have a variable scope problem. dev_arrA is declared at both compilation unit scope and function scope. The two declarations do not refer to the same variable -- the function unit scope declaration (in the kernel) takes precedence over the compilation unit scope declaration inside the kernel. you modify that variable, you are modifying the kernel scope declaration, not the __device__variable. This can lead to all sorts of subtle and unexpactd behaviour. It is much better to avoid ever having the same variable declared at multiple scopes.
When you declare a variable using the __device__ specifier, it is intended to be exclusively a device context symbol, and should only be used directly in device code. So something like this:
__device__ double *dev_arrA;
int main()
{
....
cudaMalloc( (void**)&dev_arrA, statCnt*sizeof(double) );
....
}
is illegal. You cannot call an API function like cudaMalloc directly on a __device__ variable. Even though it will compile (because of the hackery involved in the CUDA compilation tradjectories for host and device code), it is incorrect to do so. In the above example dev_arrA is a device symbol. You can interact with it via the API symbol manipulation calls, but that is all it is technically legal to do. In you code, variables intended to hold device pointers and be passed as kernel arguments (like dev_arrA) should be declared at main() scope, and passed by value to the kernel.
It is a combination of the above two things which is probably causing your problems.
But the difficulty is that you have chosen to post roughy 150 lines of code (a lot of which is redundant) as a repro case. I doubt anyone cares enough about your problems to go through that much code with a fine tooth comb and pinpoint where the precise problem is. Further, you habit of doing these nasty "top edits" in your questions quickly turn what might have been reasonably written starting points into unintelligible psuedo changelogs which are incredibly hard to follow and are unlikely to be of help to anyone. Also, the mildly passive-aggressive notes section serves no real purpose - it adds nothing of value to the question.
So I will leave you with a greatly simplified version of the code you posted which I think has all the basic things which you are trying to do working. I leave it as an "exercise for the reader" to turn it back into whatever it is that you are trying to do.
#include "stdio.h"
typedef float Real;
struct __align__(8) DynamicVals
{
Real a;
int n1;
int perDump;
};
__device__ int stepsA;
__device__ Real sumA;
__device__ int stepsN1;
__device__ int sumN1;
__global__ void TEST
(int step, Real dev_arrA[], int dev_arrN1[], DynamicVals *dev_myVals)
{
if (step % dev_myVals->perDump)
{
dev_arrN1[step/dev_myVals->perDump] = 0;
dev_arrA[step/dev_myVals->perDump] = 0.0;
stepsA = 0;
stepsN1 = 0;
sumA = 0.0;
sumN1 = 0;
}
sumA += dev_myVals->a;
sumN1 += dev_myVals->n1;
stepsA++;
stepsN1++;
dev_arrA[step/dev_myVals->perDump] += sumA / stepsA;
dev_arrN1[step/dev_myVals->perDump] += sumN1 / stepsN1;
}
inline void gpuAssert(cudaError_t code, char *file, int line,
bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code),
file, line);
if (abort) exit(code);
}
}
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
int main()
{
const int TOTAL_STEPS = 1000;
DynamicVals vals;
int *arrN1;
Real *arrA;
int statCnt;
vals.perDump = TOTAL_STEPS/10;
statCnt = TOTAL_STEPS/vals.perDump;
vals.a = 30000.0;
vals.n1 = 10000;
Real *dev_arrA;
int *dev_arrN1;
DynamicVals *dev_myVals;
gpuErrchk( cudaMalloc( (void**)&dev_arrA, statCnt*sizeof(Real)) );
gpuErrchk( cudaMalloc( (void**)&dev_arrN1, statCnt*sizeof(int)) );
gpuErrchk( cudaMalloc( (void**)&dev_myVals, sizeof(DynamicVals)) );
gpuErrchk( cudaMemcpy(dev_myVals, &vals, sizeof(DynamicVals),
cudaMemcpyHostToDevice) );
arrA = (Real *)malloc(statCnt * sizeof(Real));
arrN1 = (int *)malloc(statCnt * sizeof(int));
for (int i=0; i< TOTAL_STEPS; i++) {
TEST<<<1,1>>>(i, dev_arrA,dev_arrN1,dev_myVals);
gpuErrchk( cudaPeekAtLastError() );
}
gpuErrchk( cudaMemcpy(arrA,dev_arrA,statCnt * sizeof(Real),
cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(arrN1,dev_arrN1,statCnt * sizeof(int),
cudaMemcpyDeviceToHost) );
for (int i=0; i< statCnt; i++)
{
printf("Step: %d ; A=%g N1=%d\n",
i*vals.perDump, arrA[i], arrN1[i] );
}
}