I want to know the impact on performance when using cudaMalloc or cudaMalloc3D when allocating, copying and accessing memory for a 2D array. I have code that I tried to test the run time on where on one I use cudaMalloc and on the other cudaMalloc3D. I have included the code below. An explanation on how the performance is impacted by either api would be much appreciated.
cudaMalloc code:
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define PI 3.14159265
#define NX 8192 /* includes boundary points on both end */
#define NY 4096 /* includes boundary points on both end */
#define N_THREADS_X 16
#define N_THREADS_Y 16
#define N_BLOCKS_X NX/N_THREADS_X
#define N_BLOCKS_Y NY/N_THREADS_Y
#define LX 4.0 /* length of the domain in x-direction */
#define LY 2.0 /* length of the domain in x-direction */
#define dx (REAL) ( LX/( (REAL) (NX) ) )
#define cSqrd 5.0
#define dt (REAL) ( 0.4 * dx / sqrt(cSqrd) )
#define FACTOR ( cSqrd * (dt*dt)/(dx*dx) )
#define IC (i + j*NX) /* (i,j) */
#define IM1 (i + j*NX - 1) /* (i-1,j) */
#define IP1 (i + j*NX + 1) /* (i+1,j) */
#define JM1 (i + (j-1)*NX) /* (i,j-1) */
#define JP1 (i + (j+1)*NX) /* (i,j+1) */
#define cudaCheckError() {\
cudaError_t e = cudaGetLastError() ; \
if( e != cudaSuccess ) {\
printf("\nCuda Failure %s:%d: %s\n",__FILE__,__LINE__,cudaGetErrorString(e));\
exit(EXIT_FAILURE);\
}\
}
typedef double REAL;
typedef int INT;
__global__ void solveWaveGPU ( REAL *uold, REAL *u, REAL *unew )
{
INT i,j;
i = blockIdx.x*blockDim.x + threadIdx.x;
j = blockIdx.y*blockDim.y + threadIdx.y;
if (i>0 && i < (NX-1) && j>0 && j < (NY-1) ) {
unew[IC] = 2.0*u[IC] - uold[IC] + FACTOR*( u[IP1] + u[IM1] + u[JP1] + u[JM1] - 4.0*u[IC] );
}
}
void initWave ( REAL *unew, REAL *u, REAL *uold, REAL *x, REAL *y )
{
INT i,j;
for (j=1; j<NY-1; j++) {
for (i=1; i<NX-1; i++) {
u[IC] = 0.1 * (4.0*x[IC]-x[IC]*x[IC]) * ( 2.0*y[IC] - y[IC]*y[IC] );
}
}
for (j=1; j<NY-1; j++) {
for (i=1; i<NX-1; i++) {
uold[IC] = u[IC] + 0.5*FACTOR*( u[IP1] + u[IM1] + u[JP1] + u[JM1] - 4.0*u[IC] );
}
}
}
void meshGrid ( REAL *x, REAL *y )
{
INT i,j;
REAL a;
for (j=0; j<NY; j++) {
a = dx * ( (REAL) j );
for (i=0; i<NX; i++) {
x[IC] = dx * ( (REAL) i );
y[IC] = a;
}
}
}
INT main(INT argc, char *argv[])
{
INT nTimeSteps = 100;
REAL *unew, *u, *uold, *uFinal, *x, *y; //pointers for the host side
REAL *d_unew, *d_u, *d_uold, *tmp; //pointers for the device
// variable declaration for timing
cudaEvent_t timeStart, timeStop;
cudaEventCreate(&timeStart);
cudaEventCreate(&timeStop);
float elapsedTime_gpu;
unew = (REAL *)calloc(NX*NY,sizeof(REAL));
u = (REAL *)calloc(NX*NY,sizeof(REAL));
uold = (REAL *)calloc(NX*NY,sizeof(REAL));
uFinal = (REAL *)calloc(NX*NY,sizeof(REAL));
x = (REAL *)calloc(NX*NY,sizeof(REAL));
y = (REAL *)calloc(NX*NY,sizeof(REAL));
// create device copies of the variables
cudaMalloc( (void**) &d_unew, NX*NY*sizeof(REAL) ); cudaCheckError();
cudaMalloc( (void**) &d_u, NX*NY*sizeof(REAL) ); cudaCheckError();
cudaMalloc( (void**) &d_uold, NX*NY*sizeof(REAL) ); cudaCheckError();
meshGrid( x, y );
initWave( unew, u, uold, x, y );
// start timing the GPU
cudaMemcpy( d_u, u, NX*NY*sizeof(REAL), cudaMemcpyHostToDevice ); cudaCheckError();
cudaMemcpy( d_uold, uold, NX*NY*sizeof(REAL), cudaMemcpyHostToDevice ); cudaCheckError();
cudaMemcpy( d_unew, unew, NX*NY*sizeof(REAL), cudaMemcpyHostToDevice ); cudaCheckError();
// set up the GPU grid/block model
dim3 dimGrid ( N_BLOCKS_X , N_BLOCKS_Y );
dim3 dimBlock ( N_THREADS_X, N_THREADS_Y );
// launch the GPU kernel
cudaEventRecord(timeStart, 0);
for (INT n=1; n<nTimeSteps+1; n++) {
solveWaveGPU <<<dimGrid,dimBlock>>>(d_uold, d_u, d_unew);
cudaDeviceSynchronize();
cudaCheckError();
tmp = d_uold;
d_uold = d_u;
d_u = d_unew;
d_unew = tmp;
}
cudaEventRecord(timeStop, 0);
cudaEventSynchronize(timeStop);
cudaEventElapsedTime(&elapsedTime_gpu, timeStart, timeStop);
cudaMemcpy( uFinal, d_u, NX*NY*sizeof(REAL), cudaMemcpyDeviceToHost ); cudaCheckError();
printf("elapsedTime on the GPU= %f s.\n", elapsedTime_gpu/1000.0);
free(unew); free(u); free(uold);
cudaFree(d_unew); cudaFree(d_u); cudaFree(d_uold);
free(uFinal); free(x); free(y);
cudaEventDestroy(timeStart);
cudaEventDestroy(timeStop);
return (0);
}
cudaMalloc3D code:
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define PI 3.14159265
#define NX 8192 /* includes boundary points on both end */
#define NY 4096 /* includes boundary points on both end */
#define NZ 1 /* needed for cudaMalloc3D */
#define N_THREADS_X 16
#define N_THREADS_Y 16
#define N_BLOCKS_X NX/N_THREADS_X
#define N_BLOCKS_Y NY/N_THREADS_Y
#define LX 4.0 /* length of the domain in x-direction */
#define LY 2.0 /* length of the domain in x-direction */
#define dx (REAL) ( LX/( (REAL) (NX) ) )
#define cSqrd 5.0
#define dt (REAL) ( 0.4 * dx / sqrt(cSqrd) )
#define FACTOR ( cSqrd * (dt*dt)/(dx*dx) )
#define IC (i + j*NX) /* (i,j) */
#define IM1 (i + j*NX - 1) /* (i-1,j) */
#define IP1 (i + j*NX + 1) /* (i+1,j) */
#define JM1 (i + (j-1)*NX) /* (i,j-1) */
#define JP1 (i + (j+1)*NX) /* (i,j+1) */
#define cudaCheckError() {\
cudaError_t e = cudaGetLastError() ; \
if( e != cudaSuccess ) {\
printf("\nCuda Failure %s:%d: %s\n",__FILE__,__LINE__,cudaGetErrorString(e));\
exit(EXIT_FAILURE);\
}\
}
typedef double REAL;
typedef int INT;
__global__ void solveWaveGPU ( cudaPitchedPtr uold, cudaPitchedPtr u, cudaPitchedPtr unew )
{
INT i,j;
i = blockIdx.x*blockDim.x + threadIdx.x;
j = blockIdx.y*blockDim.y + threadIdx.y;
if (i>0 && i < (NX-1) && j>0 && j < (NY-1) ) {
char *d_u = (char *) u.ptr;
char *d_uold = (char *) uold.ptr;
char *d_unew = (char *) unew.ptr;
REAL *u_row = (REAL *)(d_u + j * u.pitch);
REAL u_IP1 = ( (REAL *)(d_u + (j+1) * u.pitch) )[i];
REAL u_IM1 = ( (REAL *)(d_u + (j-1) * u.pitch) )[i];
REAL u_JP1 = u_row[i+1];
REAL u_JM1 = u_row[i-1];
REAL u_IC = u_row[i];
REAL uold_IC = ( (REAL *)(d_uold + j * uold.pitch) )[i];
REAL *unew_row = (REAL *)(d_unew + j * unew.pitch);
unew_row[i] = 2.0 * u_IC - uold_IC + FACTOR * ( u_IP1 + u_IM1 + u_JP1 + u_JM1 - 4.0 * u_IC );
}
}
void initWave ( REAL *unew, REAL *u, REAL *uold, REAL *x, REAL *y )
{
INT i,j;
for (j=1; j<NY-1; j++) {
for (i=1; i<NX-1; i++) {
u[IC] = 0.1 * (4.0*x[IC]-x[IC]*x[IC]) * ( 2.0*y[IC] - y[IC]*y[IC] );
}
}
for (j=1; j<NY-1; j++) {
for (i=1; i<NX-1; i++) {
uold[IC] = u[IC] + 0.5*FACTOR*( u[IP1] + u[IM1] + u[JP1] + u[JM1] - 4.0*u[IC] );
}
}
}
void meshGrid ( REAL *x, REAL *y )
{
INT i,j;
REAL a;
for (j=0; j<NY; j++) {
a = dx * ( (REAL) j );
for (i=0; i<NX; i++) {
x[IC] = dx * ( (REAL) i );
y[IC] = a;
}
}
}
INT main(INT argc, char *argv[])
{
INT nTimeSteps = 100;
REAL *unew, *u, *uold, *uFinal, *x, *y; //pointers for the host side
// variable declaration for timing
cudaEvent_t timeStart, timeStop;
cudaEventCreate(&timeStart);
cudaEventCreate(&timeStop);
float elapsedTime_gpu;
unew = (REAL *)calloc(NX*NY,sizeof(REAL));
u = (REAL *)calloc(NX*NY,sizeof(REAL));
uold = (REAL *)calloc(NX*NY,sizeof(REAL));
uFinal = (REAL *)calloc(NX*NY,sizeof(REAL));
x = (REAL *)calloc(NX*NY,sizeof(REAL));
y = (REAL *)calloc(NX*NY,sizeof(REAL));
cudaExtent myExtent = make_cudaExtent(NX * sizeof(REAL), NY, NZ);
cudaPitchedPtr d_u, d_uold, d_unew, d_tmp;
// create device copies of the variables
cudaMalloc3D( &d_u , myExtent ); cudaCheckError();
cudaMalloc3D( &d_uold, myExtent ); cudaCheckError();
cudaMalloc3D( &d_unew, myExtent ); cudaCheckError();
meshGrid( x, y );
initWave( unew, u, uold, x, y );
cudaMemcpy3DParms cpy3D = { 0 };
cpy3D.extent = myExtent;
cpy3D.kind = cudaMemcpyHostToDevice;
// copy 3D from u to d_u
cpy3D.srcPtr = make_cudaPitchedPtr(u, NX*sizeof(REAL), NX, NY);
cpy3D.dstPtr = d_u;
cudaMemcpy3D( &cpy3D ); cudaCheckError();
// copy 3D from uold to d_uold
cpy3D.srcPtr = make_cudaPitchedPtr(uold, NX*sizeof(REAL), NX, NY);
cpy3D.dstPtr = d_uold;
cudaMemcpy3D( &cpy3D ); cudaCheckError();
// set up the GPU grid/block model
dim3 dimGrid ( N_BLOCKS_X , N_BLOCKS_Y );
dim3 dimBlock ( N_THREADS_X, N_THREADS_Y );
// launch the GPU kernel
// start timing the GPU
cudaEventRecord(timeStart, 0);
for (INT n=1; n<nTimeSteps+1; n++) {
solveWaveGPU <<<dimGrid,dimBlock>>>(d_uold, d_u, d_unew);
cudaDeviceSynchronize();
cudaCheckError();
d_tmp = d_uold;
d_uold = d_u;
d_u = d_unew;
d_unew = d_tmp;
}
cudaEventRecord(timeStop, 0);
cudaEventSynchronize(timeStop);
cudaEventElapsedTime(&elapsedTime_gpu, timeStart, timeStop);
// copy 3D from d_u to uFinal
cpy3D.kind = cudaMemcpyDeviceToHost;
cpy3D.srcPtr = d_u;
cpy3D.dstPtr = make_cudaPitchedPtr(uFinal, NX*sizeof(REAL), NX, NY);
cudaMemcpy3D( &cpy3D ); cudaCheckError();
printf("elapsedTime on the GPU= %f s.\n", elapsedTime_gpu/1000.0);
free(u); cudaFree(d_unew.ptr);
free(uold); cudaFree(d_u.ptr);
free(unew); cudaFree(d_uold.ptr);
free(uFinal); free(x); free(y);
cudaEventDestroy(timeStart);
cudaEventDestroy(timeStop);
return (0);
}
Timing:
cudaMalloc3D: 1.192510 s
cudaMalloc: 0.960322 s
Machine specification:
GNU/Linux x86_64
NVIDIA GeForce GTX Titan CC: 3.5
CUDA ver 7.0
The performance difference you observe is mostly due to the increased instruction overhead in the pitched memory indexing scheme. Because your array size is a large power of two in the major direction, it is very likely that the pitched array allocated with cudaMalloc3D is the same size as the naïve allocation using cudaMalloc. You may find that the performance difference between the two versions changes if you vary the problem size.
(Take note of the comments regarding compiler regressions in CUDA 7. If you refactor your code to pass the Fourier number as a kernel parameter, you will probably get a far bigger performance change than any difference due to pitched memory).
Related
I am a novice in the field of CUDA program and I am trying to repeat the function of cublasSgemmBatched, which means that I want to perform the matrix-matrix multiplication of a batch of matrices. I try to implement my idea as the following code.
#include <stdio.h>
__global__ void BatchMulCUDA(float* array1, float* array2, int narray1, int dim, float* result)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
if (tx < narray1 * dim)
{
float temp = 0;
int index = tx / dim;
#pragma
for (int i = 0; i < dim; i++)
{
temp += array1[tx * dim + i] * array2[index * dim + i];
}
result[tx] = temp;
}
}
void BatchMulGPU(float* array1, float* array2, int narray1, int dim, float* result)
{
dim3 threads(1024, 1);
dim3 grid(narray1 / 1024 + 1, 1);
int threadsPerBlock = threads.x * threads.y;
int blocksPerGrid = grid.x * grid.y;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
BatchMulCUDA<<<grid, threads>>>(array1, array2, narray1, dim, result);
}
However, strangely, I found that I can get the right output before the index 19730. After the element of 19730, the output of GPU is always 0. I do not know what the problem is. The CPU version of my code and test function are as the following. Is there any hardware limitation that I do not realize?
#include "kernel.h"
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <sys/time.h>
#include <math.h>
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double) tp.tv_sec + (double)tp.tv_usec*1e-6);
}
void BatchMulCPU(float* array1, float* array2, int narray1, int dim, float* result)
{
for (int i = 0; i < narray1 * dim; i++)
{
float temp = 0;
int index = i / dim;
for (int j = 0; j < dim; j++)
{
temp += array1[i * dim + j] * array2[index * dim + j];
}
result[i] = temp;
}
}
int main(int argc, char** argv)
{
int narray1 = 6980;
int dim = 4;
float* array1 = new float[narray1 * dim * dim];
float* array2 = new float[narray1 * dim];
float* resultGPU = new float[narray1 * dim];
float* resultCPU = new float[narray1 * dim];
float* d_array1;
float* d_array2;
float* d_result;
for (int i = 0; i < narray1 * dim * dim; i++)
{
array1[i] = static_cast<float> (rand() / (static_cast<float> (RAND_MAX / 10)));
}
for (int i = 0; i < narray1 * dim; i++)
{
array2[i] = static_cast<float> (rand() / (static_cast<float> (RAND_MAX / 10)));
}
cudaError_t err;
double iStart = cpuSecond();
err = cudaMalloc((void**)&d_array1, narray1 * dim * dim * sizeof(float));
err = cudaMalloc((void**)&d_array2, narray1 * dim * sizeof(float));
err = cudaMalloc((void**)&d_result, narray1 * dim * sizeof(float));
err = cudaMemcpy(d_array1, array1, narray1 * dim * dim * sizeof(float), cudaMemcpyHostToDevice);
err = cudaMemcpy(d_array2, array2, narray1 * dim * sizeof(float), cudaMemcpyHostToDevice);
BatchMulGPU(d_array1, d_array2, narray1, dim, d_result);
err = cudaMemcpy(resultGPU, d_result, narray1 * dim * sizeof(float), cudaMemcpyDeviceToHost);
double iElaps = cpuSecond() - iStart;
printf("Total GPU computation time is %lf \n" , iElaps);
iStart = cpuSecond();
BatchMulCPU(array1, array2, narray1, dim, resultCPU);
iElaps = cpuSecond() - iStart;
printf("Total CPU computation time is %lf \n" , iElaps);
float error = 0;
float temp = 0;
for (long i = 0; i < narray1 * dim; i++)
{
// temp = abs(resultCPU[i] - resultGPU[i]);
// if (temp > 0.5)
// {
// std::cout << i << std::endl;
// }
error += abs(resultCPU[i] - resultGPU[i]);
}
printf("Error is %f \n", error);
// for (int i = 19730; i < 19750; i++)
// {
// std::cout << "GPU " << resultGPU[i] << std::endl;
// std::cout << "CPU " << resultCPU[i] << std::endl;
// }
cudaFree(d_array1);
cudaFree(d_array2);
cudaFree(d_result);
return 0;
}
Apart from the possibility of a WDDM TDR timeout as discussed in the comments, the code has an error.
Its evident that the kernel design expects that a total grid size (total number of threads) will be launched that is equal to or greater than the number of arrays times the side dimension:
int tx = blockIdx.x * blockDim.x + threadIdx.x;
if (tx < narray1 * dim)
i.e. narray1*dim are the needed number of threads
However the number being launched is only narray1:
dim3 threads(1024, 1);
dim3 grid(narray1 / 1024 + 1, 1);
If we change the last line above to:
dim3 grid((narray1*dim) / 1024 + 1, 1);
this code design error will be addressed.
The reason the code works correctly for small number of matrices (anything up to 256) is because of the rounding-up effect in the grid sizing to a minimum of 1024 threads, which is 256*4 (narray1 * dim).
As an aside, this code is not functionally similar to cublasSgemmBatched from what I can see. I don't recognize this code as being any matrix multiplication (matrix dot product) that I am familiar with.
I am studying a CUDA C example (ripple.cu in chapter 5) on the CUDA C by Example book; when I compile the file it seems there is no problem; here's what i type on the terminal:
nvcc ripple.cu -lGL -lGLU -lX11 -lXi -lXmu -lglut -lGLEW
When I run the executable i should get an image like this:
However this is what i get instead:
Here I post the file ripple.cu and the related header files:
// ripple.cu
#include "cuda.h"
#include "../common/book.h"
#include "../common/cpu_anim.h"
#define DIM 1024
#define PI 3.1415926535897932f
__global__ void kernel( unsigned char *ptr, int ticks ) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
// now calculate the value at that position
float fx = x - DIM/2;
float fy = y - DIM/2;
float d = sqrtf( fx * fx + fy * fy );
unsigned char grey = (unsigned char)(128.0f + 127.0f *
cos(d/10.0f - ticks/7.0f) /
(d/10.0f + 1.0f));
ptr[offset*4 + 0] = grey;
ptr[offset*4 + 1] = grey;
ptr[offset*4 + 2] = grey;
ptr[offset*4 + 3] = 255;
}
struct DataBlock {
unsigned char *dev_bitmap;
CPUAnimBitmap *bitmap;
};
void generate_frame( DataBlock *d, int ticks ) {
dim3 blocks(DIM/16,DIM/16);
dim3 threads(16,16);
kernel<<<blocks,threads>>>( d->dev_bitmap, ticks );
HANDLE_ERROR( cudaMemcpy( d->bitmap->get_ptr(),
d->dev_bitmap,
d->bitmap->image_size(),
cudaMemcpyDeviceToHost ) );
}
// clean up memory allocated on the GPU
void cleanup( DataBlock *d ) {
HANDLE_ERROR( cudaFree( d->dev_bitmap ) );
}
int main( void ) {
DataBlock data;
CPUAnimBitmap bitmap( DIM, DIM, &data );
data.bitmap = &bitmap;
HANDLE_ERROR( cudaMalloc( (void**)&data.dev_bitmap,
bitmap.image_size() ) );
bitmap.anim_and_exit( (void (*)(void*,int))generate_frame,
(void (*)(void*))cleanup );
}
Now i post the headers which are contained into a folder named common:
// book.h
#ifndef __BOOK_H__
#define __BOOK_H__
#include <stdio.h>
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define HANDLE_NULL( a ) {if (a == NULL) { \
printf( "Host memory failed in %s at line %d\n", \
__FILE__, __LINE__ ); \
exit( EXIT_FAILURE );}}
template< typename T >
void swap( T& a, T& b ) {
T t = a;
a = b;
b = t;
}
void* big_random_block( int size ) {
unsigned char *data = (unsigned char*)malloc( size );
HANDLE_NULL( data );
for (int i=0; i<size; i++)
data[i] = rand();
return data;
}
int* big_random_block_int( int size ) {
int *data = (int*)malloc( size * sizeof(int) );
HANDLE_NULL( data );
for (int i=0; i<size; i++)
data[i] = rand();
return data;
}
// a place for common kernels - starts here
__device__ unsigned char value( float n1, float n2, int hue ) {
if (hue > 360) hue -= 360;
else if (hue < 0) hue += 360;
if (hue < 60)
return (unsigned char)(255 * (n1 + (n2-n1)*hue/60));
if (hue < 180)
return (unsigned char)(255 * n2);
if (hue < 240)
return (unsigned char)(255 * (n1 + (n2-n1)*(240-hue)/60));
return (unsigned char)(255 * n1);
}
__global__ void float_to_color( unsigned char *optr,
const float *outSrc ) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float l = outSrc[offset];
float s = 1;
int h = (180 + (int)(360.0f * outSrc[offset])) % 360;
float m1, m2;
if (l <= 0.5f)
m2 = l * (1 + s);
else
m2 = l + s - l * s;
m1 = 2 * l - m2;
optr[offset*4 + 0] = value( m1, m2, h+120 );
optr[offset*4 + 1] = value( m1, m2, h );
optr[offset*4 + 2] = value( m1, m2, h -120 );
optr[offset*4 + 3] = 255;
}
__global__ void float_to_color( uchar4 *optr,
const float *outSrc ) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float l = outSrc[offset];
float s = 1;
int h = (180 + (int)(360.0f * outSrc[offset])) % 360;
float m1, m2;
if (l <= 0.5f)
m2 = l * (1 + s);
else
m2 = l + s - l * s;
m1 = 2 * l - m2;
optr[offset].x = value( m1, m2, h+120 );
optr[offset].y = value( m1, m2, h );
optr[offset].z = value( m1, m2, h -120 );
optr[offset].w = 255;
}
#if _WIN32
//Windows threads.
#include <windows.h>
typedef HANDLE CUTThread;
typedef unsigned (WINAPI *CUT_THREADROUTINE)(void *);
#define CUT_THREADPROC unsigned WINAPI
#define CUT_THREADEND return 0
#else
//POSIX threads.
#include <pthread.h>
typedef pthread_t CUTThread;
typedef void *(*CUT_THREADROUTINE)(void *);
#define CUT_THREADPROC void
#define CUT_THREADEND
#endif
//Create thread.
CUTThread start_thread( CUT_THREADROUTINE, void *data );
//Wait for thread to finish.
void end_thread( CUTThread thread );
//Destroy thread.
void destroy_thread( CUTThread thread );
//Wait for multiple threads.
void wait_for_threads( const CUTThread *threads, int num );
#if _WIN32
//Create thread
CUTThread start_thread(CUT_THREADROUTINE func, void *data){
return CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)func, data, 0, NULL);
}
//Wait for thread to finish
void end_thread(CUTThread thread){
WaitForSingleObject(thread, INFINITE);
CloseHandle(thread);
}
//Destroy thread
void destroy_thread( CUTThread thread ){
TerminateThread(thread, 0);
CloseHandle(thread);
}
//Wait for multiple threads
void wait_for_threads(const CUTThread * threads, int num){
WaitForMultipleObjects(num, threads, true, INFINITE);
for(int i = 0; i < num; i++)
CloseHandle(threads[i]);
}
#else
//Create thread
CUTThread start_thread(CUT_THREADROUTINE func, void * data){
pthread_t thread;
pthread_create(&thread, NULL, func, data);
return thread;
}
//Wait for thread to finish
void end_thread(CUTThread thread){
pthread_join(thread, NULL);
}
//Destroy thread
void destroy_thread( CUTThread thread ){
pthread_cancel(thread);
}
//Wait for multiple threads
void wait_for_threads(const CUTThread * threads, int num){
for(int i = 0; i < num; i++)
end_thread( threads[i] );
}
#endif
// cpu_anim.h
#endif // __BOOK_H__
Here's the second header:
// cpu_anim.h
#ifndef __CPU_ANIM_H__
#define __CPU_ANIM_H__
#include "gl_helper.h"
#include <iostream>
struct CPUAnimBitmap {
unsigned char *pixels;
int width, height;
void *dataBlock;
void (*fAnim)(void*,int);
void (*animExit)(void*);
void (*clickDrag)(void*,int,int,int,int);
int dragStartX, dragStartY;
CPUAnimBitmap( int w, int h, void *d = NULL ) {
width = w;
height = h;
pixels = new unsigned char[width * height * 4];
dataBlock = d;
clickDrag = NULL;
}
~CPUAnimBitmap() {
delete [] pixels;
}
unsigned char* get_ptr( void ) const { return pixels; }
long image_size( void ) const { return width * height * 4; }
void click_drag( void (*f)(void*,int,int,int,int)) {
clickDrag = f;
}
void anim_and_exit( void (*f)(void*,int), void(*e)(void*) ) {
CPUAnimBitmap** bitmap = get_bitmap_ptr();
*bitmap = this;
fAnim = f;
animExit = e;
// a bug in the Windows GLUT implementation prevents us from
// passing zero arguments to glutInit()
int c=1;
char* dummy = (char *)(void *)"";
glutInit( &c, &dummy );
glutInitDisplayMode( GLUT_DOUBLE | GLUT_RGBA );
glutInitWindowSize( width, height );
glutCreateWindow( "bitmap" );
glutKeyboardFunc(Key);
glutDisplayFunc(Draw);
if (clickDrag != NULL)
glutMouseFunc( mouse_func );
glutIdleFunc( idle_func );
glutMainLoop();
}
// static method used for glut callbacks
static CPUAnimBitmap** get_bitmap_ptr( void ) {
static CPUAnimBitmap* gBitmap;
return &gBitmap;
}
// static method used for glut callbacks
static void mouse_func( int button, int state,
int mx, int my ) {
if (button == GLUT_LEFT_BUTTON) {
CPUAnimBitmap* bitmap = *(get_bitmap_ptr());
if (state == GLUT_DOWN) {
bitmap->dragStartX = mx;
bitmap->dragStartY = my;
} else if (state == GLUT_UP) {
bitmap->clickDrag( bitmap->dataBlock,
bitmap->dragStartX,
bitmap->dragStartY,
mx, my );
}
}
}
// static method used for glut callbacks
static void idle_func( void ) {
static int ticks = 1;
CPUAnimBitmap* bitmap = *(get_bitmap_ptr());
bitmap->fAnim( bitmap->dataBlock, ticks++ );
glutPostRedisplay();
}
// static method used for glut callbacks
static void Key(unsigned char key, int x, int y) {
switch (key) {
case 27:
CPUAnimBitmap* bitmap = *(get_bitmap_ptr());
bitmap->animExit( bitmap->dataBlock );
//delete bitmap;
exit(0);
}
}
// static method used for glut callbacks
static void Draw( void ) {
CPUAnimBitmap* bitmap = *(get_bitmap_ptr());
glClearColor( 0.0, 0.0, 0.0, 1.0 );
glClear( GL_COLOR_BUFFER_BIT );
glDrawPixels( bitmap->width, bitmap->height, GL_RGBA, GL_UNSIGNED_BYTE, bitmap->pixels );
glutSwapBuffers();
}
};
#endif // __CPU_ANIM_H__
I don't really know where the problem might be... I have already asked in the NVIDA FORUM without success... Here's the link where you can download the source code in case you want: https://developer.nvidia.com/content/cuda-example-introduction-general-purpose-gpu-programming-0
I know it is a very specific problem and it takes a lot of effort to read it but any suggestion is welcome.
I just figured out how to make it work... so i basically changed the dimensions of the window from 1024 to 512:
ripple.cu: #define DIM 1024 ----> #define DIM 512
I don't know why but it works now! I just got lucky.
I am a novice C programmer and was a bit confused about this segmentation fault. I have worked with pointers before and this doesn't make sense. This code is being done on an NVIDIA GPU but I am not using any of the CUDA API functions yet (commented them out to isolate the error).
I get the error when de-referencing the pointer *mu on the GPU (see code below) in the function calibrate. That is, the error is a segmentation fault.
My host code is:
/******************************************************************************
*cr
*cr
******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "kernel.cu"
#include "support.h"
int main (int argc, char *argv[])
{
Timer timer;
cudaError_t cuda_ret;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem...\n"); fflush(stdout);
startTime(&timer);
double* A_h, *T_h, *Delta_h, *E_h, *p_h, *p2_h, *D_h, *Times_h, *ones_h;
double* A_d, *T_d, *Delta_d, *E_d, *p_d, *p2_d, *D_d, *Times_d, *ones_d, *temp_1, *temp_2;
double* mu_h, *alpha_h, *omega_h;
double* mu_d, *alpha_d, *omega_d;
int N;
unsigned int mat_size, vec_size;
// Import data
FILE *fp;
char str[60];
unsigned int count=0;
double d;
/* opening file for reading */
fp = fopen("AAPL_data.txt","r");
if(fp == NULL) {
perror("Error opening file");
return(-1);
}
while(fgets (str, 60, fp)!=NULL)
++count;
// Stick with a limited subset of the data for now
N = 2000;
fclose(fp);
printf("Count is %u \n",count);
mat_size = N*N;
vec_size = N;
dim3 dim_grid, dim_block;
// Fill matrices with 0's
A_h = (double*) malloc( sizeof(double)*mat_size );
for (unsigned int i=0; i < mat_size; ++i) { A_h[i] = 0; }
T_h = (double*) malloc( sizeof(double)*mat_size );
for (unsigned int i=0; i < mat_size; ++i) { T_h[i] = 0; }
Delta_h = (double*) malloc( sizeof(double)*mat_size );
for (unsigned int i=0; i < mat_size; ++i) { Delta_h[i] = 0; }
E_h = (double*) malloc( sizeof(double)*mat_size );
for (unsigned int i=0; i < mat_size; ++i) { E_h[i] = 0; }
p_h = (double*) malloc( sizeof(double)*mat_size );
for (unsigned int i=0; i < mat_size; ++i) { p_h[i] = 0; }
// Fill vectors with 0's, except the 1's vector
p2_h = (double*) malloc( sizeof(double)*vec_size );
for (unsigned int i=0; i < vec_size; ++i) { p2_h[i] = 0; }
Times_h = (double*) malloc( sizeof(double)*vec_size );
for (unsigned int i=0; i < vec_size; ++i) { Times_h[i] = 0; }
D_h = (double*) malloc( sizeof(double)*vec_size );
for (unsigned int i=0; i < vec_size; ++i) { D_h[i] = 0; }
ones_h = (double*) malloc( sizeof(double)*vec_size );
for (unsigned int i=0; i < vec_size; ++i) { ones_h[i] = 0; }
// Start constants as zero
mu_h = (double*) malloc( sizeof(double));
alpha_h = (double*) malloc( sizeof(double));
omega_h = (double*) malloc( sizeof(double));
*mu_h = 0;
*alpha_h = 0;
*omega_h = 0;
// Import data
count=0;
/* opening file for reading */
fp = fopen("AAPL_data.txt","r");
if(fp == NULL) {
perror("Error opening file");
return(-1);
}
while(fgets (str, 60, fp)!=NULL)
{
sscanf(str, "%lf", &d);
if(count < vec_size)
Times_h[count] = d;
++count;
}
fclose(fp);
/*printf("TIMES VECTOR: \n");
for (unsigned int i=0; i < vec_size; ++i)
{
printf("TIMES_H[ %u ] is ",i);
printf("%f \n", Times_h[i]);
}*/
printf("Count is %u \n",count);
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
cudaMalloc((void**) &A_d, mat_size*sizeof(double)); // Create device variable for matrix A
cudaMalloc((void**) &T_d, mat_size*sizeof(double)); // Create device variable for matrix T
cudaMalloc((void**) &Delta_d, mat_size*sizeof(double)); // Create device variable for matrix Delta
cudaMalloc((void**) &E_d, mat_size*sizeof(double)); // Create device variable for matrix E
cudaMalloc((void**) &p_d, mat_size*sizeof(double)); // Create device variable for matrix p
cudaMalloc((void**) &p2_d, vec_size*sizeof(double)); // Create device variable for vector p2
cudaMalloc((void**) &D_d, vec_size*sizeof(double)); // Create device variable for vector D
cudaMalloc((void**) &Times_d, vec_size*sizeof(double)); // Create device variable for vector Times
cudaMalloc((void**) &ones_d, vec_size*sizeof(double)); // Create device variable for vector ones
cudaMalloc((void**) &mu_d, sizeof(double)); // Create device variable for constant mu
cudaMalloc((void**) &alpha_d, sizeof(double)); // Create device variable for constant alpha
cudaMalloc((void**) &omega_d, sizeof(double)); // Create device variable for constant omega
cudaMalloc((void**) &temp_1, vec_size*sizeof(double)); // Create device variable for constant omega
cudaMalloc((void**) &temp_2, mat_size*sizeof(double)); // Create device variable for constant omega
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
cudaMemcpy(A_d,A_h,mat_size*sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(T_d,T_h,mat_size*sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(Delta_d,Delta_h,mat_size*sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(E_d,E_h,mat_size*sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(p_d,p_h,mat_size*sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(p2_d,p2_h,vec_size*sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(D_d,D_h,vec_size*sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(ones_d,ones_h,vec_size*sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(Times_d,Times_h,mat_size*sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(mu_d,mu_h,sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(alpha_d,alpha_h,sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(omega_d,omega_h,sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(temp_1,D_h,vec_size*sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(temp_2,A_h,mat_size*sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel using standard sgemm interface ---------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
int MAX_ITER = 100;
double TOL = .001;
calibrate(vec_size,mu_d, alpha_d, omega_d, A_d, T_d, Delta_d, E_d, p_d, p2_d, D_d, ones_d, Times_d,
MAX_ITER, TOL, temp_1, temp_2);
//tiledSgemm('N', 'N', matArow, matBcol, matBrow, 1.0f, \
// A_d, matArow, B_d, matBrow, 0.0f, C_d, matBrow); // A1_d, B1_d);
cuda_ret = cudaDeviceSynchronize();
if(cuda_ret != cudaSuccess) FATAL("Unable to launch kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host...\n"); fflush(stdout);
startTime(&timer);
cudaMemcpy(mu_h,mu_d,sizeof(float), cudaMemcpyDeviceToHost); // Copy from device var to host var
cudaMemcpy(alpha_h,alpha_d,sizeof(float), cudaMemcpyDeviceToHost); // Copy from device var to host var
cudaMemcpy(omega_h,omega_d,sizeof(float), cudaMemcpyDeviceToHost); // Copy from device var to host var
printf("mu is %f: \n",mu_h);
printf("alpha is %f: \n",alpha_h);
printf("omega is %f: \n",omega_h);
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Free memory ------------------------------------------------------------
free(A_h);
free(T_h);
free(Delta_h);
free(E_h);
free(p_h);
free(p2_h);
free(D_h);
free(ones_h);
free(Times_h);
free(mu_h);
free(alpha_h);
free(omega_h);
cudaFree(A_d);
cudaFree(T_d);
cudaFree(Delta_d);
cudaFree(E_d);
cudaFree(p_d);
cudaFree(p2_d);
cudaFree(D_d);
cudaFree(ones_d);
cudaFree(Times_d);
cudaFree(mu_d);
cudaFree(alpha_d);
cudaFree(omega_d);
return 0;
}
The Kernel code on the GPU is:
/*****************************************************************************************/
#include <stdio.h>
#define TILE_SIZE 16
#define BLOCK_SIZE 512
__global__ void mysgemm(int m, int n, int k, const double *A, const double *B, double* C) {
__shared__ float ds_A[TILE_SIZE][TILE_SIZE];
__shared__ float ds_B[TILE_SIZE][TILE_SIZE];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = (by*TILE_SIZE+ty);//%m;
int col = (bx*TILE_SIZE+tx);//%n;
float pvalue = 0;
for(int i=0;i<(k-1)/TILE_SIZE+1;++i)
{
if((i*TILE_SIZE +tx < k) && (row < m))
ds_A[ty][tx] = A[row*k+i*TILE_SIZE+tx];
else ds_A[ty][tx] = 0;
if((i*TILE_SIZE+ty < k) && (col < n))
ds_B[ty][tx] = B[(i*TILE_SIZE+ty)*n+col]; // Load data into shared memory
else ds_B[ty][tx] = 0;
__syncthreads();
if(row < m && col < n)
{
for(int j=0;j<TILE_SIZE;++j)
{
//if(j < k)
pvalue += ds_A[ty][j]*ds_B[j][tx];
}
}
__syncthreads();
}
if(row < m && col < n)
C[row*n+col] = pvalue;
}
// Kernel to multiply each element in A by the corresponding element in B and store
// the result to the corresponding element in C. All vectors should be of length m
__global__ void elem_mul(int m, const double *A, const double *B, double* C)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int i = tx+bx*blockDim.x;
if(i < m)
C[i] = A[i]*B[i];
}
// Kernel for parallel sum
__global__ void reduction(double *out, double *in, unsigned size)
{
__shared__ float partialSum[2*BLOCK_SIZE];
unsigned int t = threadIdx.x;
unsigned int start = 2*blockIdx.x*blockDim.x;
if(start + t >= size)
partialSum[t] = 0;
else partialSum[t] = in[start+t];
if(start + blockDim.x+t>= size)
partialSum[blockDim.x+t] = 0;
else partialSum[blockDim.x+t] = in[start + blockDim.x+t];
for(unsigned int stride = 1; stride <=blockDim.x; stride*=2)
{
__syncthreads();
if(t % stride ==0)
partialSum[2*t]+=partialSum[2*t+stride];
}
__syncthreads();
out[blockIdx.x] = partialSum[0];
}
// Uses several kernels to compute the inner product of A and B
void inner_product(double *out, int m, const double *A, const double* B, double* temp)
{
dim3 dimGrid((m-1)/BLOCK_SIZE+1,(m-1)/BLOCK_SIZE+1,1);
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE,1);
elem_mul<<<dimGrid,dimBlock>>>(m,A,B,temp);
reduction<<<dimGrid,dimBlock>>>(out,temp,m);
}
// Kernel to multiply each element in the matrix out in the following manner:
// out(i,j) = in(i) - in(j)
__global__ void fill(int m, const double *in, double *out)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int i = tx+bx*blockDim.x;
int j = ty+by*blockDim.y;
if((i < m) && (j < m))
out[i*m+j] = in[i]-in[j];
}
// Kernel to fill the matrix out with the formula out(i,j) = exp(-omega*T(i.j))
__global__ void fill_E(int m, double coeff, double *in, double *out)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int i = tx+bx*blockDim.x;
if(i < m)
out[i] = exp(-coeff * in[i]);
}
// Kernel for scalar multiplication for an mxk matirx and a coefficient coeff
__global__ void scal_mul(int m, int k, double coeff, double *in, double *out)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int i = tx+bx*blockDim.x;
if(i < m*k)
out[i] = coeff * in[i];
}
// Kernel for scalar multiplication for an mxk matirx and a coefficient coeff
__global__ void scal_add(int m, int k, double coeff, double *in, double *out)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int i = tx+bx*blockDim.x;
if(i < m*k)
out[i] = coeff + in[i];
}
// Kernel to update vector p2
__global__ void update_p2(int m, double coeff, double *in, double *out)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int i = tx+bx*blockDim.x;
if(i < m)
out[i] = coeff/in[i];
}
// Kernel to update matrix p
__global__ void update_p(int m, double* p2, double *denom, double *num, double *out)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int i = tx+bx*blockDim.x;
// loop through columns j
for(int j=0; j<m; ++j)
{
if(i == j)
out[i*m + j] = p2[i];
else if(i < m)
out[i*m + j] = num[i*m+j]/denom[i];
}
}
/*****************************************************************************************/
// int size: length of the Time-series vectors. Also the number of rows and columns in input matrices
// double mu: One of three parameters calibrated
// double alpha: One of three parameters calibrated
// double omega: One of three parameters calibrated
// double* A: A matrix filled out and used to calibrate
// double* T: A distance matrix T(i,j) = Times[i]-Times[j]
// double* Delta: A dissimilarity matrix Delta(i,j) = 1 if i > j, 0 otherwise
// double* E: A matrix filled out and used to calibrate--E(i,j) = exp(-omega*T(i,j))
// double* p: A probability matrix of cross excitations
// double* p2: A vector of self-excitation probabilities
// double* ones: A (size x 1) vector of 1's used in inner products and identity transformations
// double* Times: A (size x 1) vector of time series data to be calibrated
// int MAX_ITER: The maximum number of iterations allowed in the calibration
// double* TOL: The error tolerance or accuracy allowed in the calibration
// double* temp_1: A (size x 1) temporary vector used in intermediate calculations
// double* temp_2: A temporary matrix used in intermediate calculations
/*****************************************************************************************/
void calibrate(int size, double *mu, double *alpha, double *omega, double *A, double *T, double *Delta, double *E, double *p, double *p2, double *D, double* ones, double *Times, int MAX_ITER, double TOL, double* temp_1, double* temp_2)
{
//1) (a) Perform inner product to start initial values of mu, alpha, and omega
*mu = .11; // ERROR IS HERE!!
/*
inner_product(mu, size, Times, ones, temp_1);
double a = *(mu);
a = a/size;
*mu = .11;
/*
/size;
*alpha = *mu;
*omega = *mu;
double mu_t = 0;
double alpha_t = 0;
double omega_t = 0;
double err = 0;
int ctr = 0;
//1) (b) Fill out matrix T of time differences
dim3 dimGrid((size-1)/BLOCK_SIZE+1,(size-1)/BLOCK_SIZE+1,1);
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE,1);
fill<<<dimGrid,dimBlock>>>(size, Times, T);
while(ctr < MAX_ITER && err < TOL)
{
// 2) Fill out matrix E
dim3 dimGrid((size-1)/BLOCK_SIZE+1,(size-1)/BLOCK_SIZE+1,1);
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE,1);
fill_E<<<dimGrid,dimBlock>>>(size, omega, T, E);
// 3) Update matrix A
dim3 dimGrid((size-1)/BLOCK_SIZE+1,(size-1)/BLOCK_SIZE+1,1);
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE,1);
scal_mult<<<dimGrid,dimBlock>>>(size,size, alpha, delta, A);
scal_mult<<<dimGrid,dimBlock>>>(size,size, omega, A, A);
dim3 dimGrid((n-1)/TILE_SIZE+1,(m-1)/TILE_SIZE+1,1);
dim3 dimBlock(TILE_SIZE,TILE_SIZE,1);
mysgemm<<<dimGrid,dimBlock>>>(size,size,size,A,E,A)
// 4) Update matrix D
mysgemm<<<dimGrid,dimBlock>>>(size,size,1,A,ones,D);
scal_add<<<dimGrid,dimBlock>>>(size,size, mu, D, D);
// 5) Update matrix p and vector p2
update_p2<<<dimGrid,dimBlock>>>(size,mu, D, p2);
update_p<<<dimGrid,dimBlock>>>(size,p2, D, A, p);
// 6) Update parameters mu, alpha, omega
inner_product(mu_t, size, p2, ones, temp_1);
mu_t /=Times[size-1];
reduction<<<dimGrid,dimBlock>>>(alpha_t,p,size*size);
alpha_t/= size;
// Treat T and p as very long vectors and calculate the inner product
inner_product(omega_t, size*size, T, p, temp_2);
omega_t = alpha_t/omega_t;
// 7) Update error
ctr++;
err = (mu - mu_t)*(mu - mu_t) + (alpha-alpha_t)*(alpha-alpha_t) + (omega-omega_t)*(omega-omega_t);
mu = mu_t;
alpha = alpha_t;
omega = omega_t;
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("CUDA error: %s\n",cudaGetErrorString(error));
exit(-1);
}
}
*/
}
However, I think 99% of this code isn't relevant to the issue (I use nothing from "support.h" at the moment. Basically, I get an error de-referencing the pointer on the GPU, even though it is presumably not null. Thanks!
If you do proper cuda error checking you'll discover another problem with your code, this line:
cudaMemcpy(Times_d,Times_h,mat_size*sizeof(double), cudaMemcpyHostToDevice);
should be something like this:
cudaMemcpy(Times_d,Times_h,vec_size*sizeof(double), cudaMemcpyHostToDevice);
However that's not the crux of the issue. It took me a while to figure out that you are not making any kernel calls. If you call a kernel, all the parameters you pass to that kernel must be accessible by the device. So if you pass a pointer, the pointer must point to device memory. You are doing this with mu_d which is a device pointer:
calibrate(vec_size,mu_d,...
But your calibrate is not a kernel!!
It's an ordinary host function running on the host (CPU). So when you try and dereference the device pointer mu_d in host code:
*mu = .11; // ERROR IS HERE!!
You get a seg fault. I'm not sure why you're trying to debug this way, but simply converting kernel calls to host routines, while leaving all the parameters the same, is not a valid way to debug.
Fundamental CUDA rules (ignoring cuda 6 Unified Memory):
you cannot dereference a host pointer in device code
you cannot dereference a device pointer in host code
Your code is a violation of the 2nd rule above.
Hi I am writing some code in CUDA C that uses many kernels. The kernel.cu and main.cu are pictured below.
kernel.cu: (almost everything is commented out in the host code at the bottom)
/******************************************************************************
*cr
******************************************************************************/
#include <stdio.h>
#define TILE_SIZE 16
#define BLOCK_SIZE 512
/****************************************************************/
// Kernel for matrix multiplication:
// A: m x n matrix
// B: n x k matrix
// C = A x B: m x k matrix
__global__ void mysgemm(int m, int n, int k, const double *A, const double *B, double* C) {
__shared__ float ds_A[TILE_SIZE][TILE_SIZE];
__shared__ float ds_B[TILE_SIZE][TILE_SIZE];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = (by*TILE_SIZE+ty);//%m;
int col = (bx*TILE_SIZE+tx);//%n;
float pvalue = 0;
for(int i=0;i<(k-1)/TILE_SIZE+1;++i)
{
if((i*TILE_SIZE +tx < k) && (row < m))
ds_A[ty][tx] = A[row*k+i*TILE_SIZE+tx];
else ds_A[ty][tx] = 0;
if((i*TILE_SIZE+ty < k) && (col < n))
ds_B[ty][tx] = B[(i*TILE_SIZE+ty)*n+col]; // Load data into shared memory
else ds_B[ty][tx] = 0;
__syncthreads();
if(row < m && col < n)
{
for(int j=0;j<TILE_SIZE;++j)
{
//if(j < k)
pvalue += ds_A[ty][j]*ds_B[j][tx];
}
}
__syncthreads();
}
if(row < m && col < n)
C[row*n+col] = pvalue;
}
/****************************************************************/
/****************************************************************/
// Kernel to multiply each element in A by the corresponding element in B and store
// the result to the corresponding element in C. All vectors should be of length m
__global__ void elem_mul(int m, const double *A, const double *B, double* C)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int i = tx+bx*blockDim.x;
if(i < m)
C[i] = A[i]*B[i];
}
/****************************************************************/
// Kernel for parallel sum
__global__ void reduction(double *out, double *in, unsigned size)
{
__shared__ float partialSum[2*BLOCK_SIZE];
unsigned int t = threadIdx.x;
unsigned int start = 2*blockIdx.x*blockDim.x;
if(start + t >= size)
partialSum[t] = 0;
else partialSum[t] = in[start+t];
if(start + blockDim.x+t>= size)
partialSum[blockDim.x+t] = 0;
else partialSum[blockDim.x+t] = in[start + blockDim.x+t];
for(unsigned int stride = 1; stride <=blockDim.x; stride*=2)
{
__syncthreads();
if(t % stride ==0)
partialSum[2*t]+=partialSum[2*t+stride];
}
__syncthreads();
out[blockIdx.x] = partialSum[0];
}
// Uses several kernels to compute the inner product of A and B
void inner_product(double *out, int m, const double *A, const double* B, double* temp)
{
dim3 dimGrid((m-1)/BLOCK_SIZE+1,(m-1)/BLOCK_SIZE+1,1);
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE,1);
elem_mul<<<dimGrid,dimBlock>>>(m,A,B,temp);
reduction<<<dimGrid,dimBlock>>>(out,temp,m);
}
// Kernel to multiply each element in the matrix out in the following manner:
// out(i,j) = in(i) - in(j)
__global__ void fill(int m, const double *in, double *out)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int i = tx+bx*blockDim.x;
int j = ty+by*blockDim.y;
if((i < m) && (j < m))
out[i*m+j] = in[i]-in[j];
}
// Kernel to fill the matrix out with the formula out(i,j) = exp(-omega*T(i.j))
__global__ void fill_E(int m, double coeff, double *in, double *out)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int i = tx+bx*blockDim.x;
if(i < m)
out[i] = exp(-coeff * in[i]);
}
// Kernel for scalar multiplication for an mxk matirx and a coefficient coeff
__global__ void scal_mul(int m, int k, double coeff, double *in, double *out)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int i = tx+bx*blockDim.x;
if(i < m*k)
out[i] = coeff * in[i];
}
// Kernel for scalar multiplication for an mxk matirx and a coefficient coeff
__global__ void scal_add(int m, int k, double coeff, double *in, double *out)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int i = tx+bx*blockDim.x;
if(i < m*k)
out[i] = coeff + in[i];
}
/****************************************************************/
// Kernel to update vector p2
__global__ void update_p2(int m, double coeff, double *in, double *out)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int i = tx+bx*blockDim.x;
if(i < m)
out[i] = coeff/in[i];
}
/****************************************************************/
/****************************************************************/
// Kernel to update matrix p
__global__ void update_p(int m, double* p2, double *denom, double *num, double *out)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int i = tx+bx*blockDim.x;
// loop through columns j
for(int j=0; j<m; ++j)
{
if(i == j)
out[i*m + j] = p2[i];
else if(i < m)
out[i*m + j] = num[i*m+j]/denom[i];
}
}
/****************************************************************/
/****************************************************************/
// Kernel to update the error, counter, and parameter variables
__global__ void update(int* counter, double* error, double *mu, double *mu_temp, double* alpha, double* alpha_temp, double* omega, double* omega_temp)
{
counter = counter + 1;
*error = (mu - mu_temp)*(mu - mu_temp) + (alpha-alpha_temp)*(alpha-alpha_temp) + (omega-omega_temp)*(omega-omega_temp);
mu = mu_temp;
alpha = alpha_temp;
omega = omega_temp;
}
/****************************************************************/
/****************************************************************/
// Kernel to assign old * coeff + inc to new
__global__ void assign(double* new, double* old, double coeff, double inc)
{
*new = 5.0;
}
/****************************************************************/
/******************************************************************************************************/
// Does so via an iterative procedure. Variables:
// int size: length of the Time-series vectors. Also the number of rows and columns in input matrices
// double mu: One of three parameters calibrated
// double alpha: One of three parameters calibrated
// double omega: One of three parameters calibrated
// double* A: A matrix filled out and used to calibrate
// double* T: A distance matrix T(i,j) = Times[i]-Times[j]
// double* Delta: A dissimilarity matrix Delta(i,j) = 1 if i > j, 0 otherwise
// double* E: A matrix filled out and used to calibrate--E(i,j) = exp(-omega*T(i,j))
// double* p: A probability matrix of cross excitations
// double* p2: A vector of self-excitation probabilities
// double* ones: A (size x 1) vector of 1's used in inner products and identity transformations
// double* Times: A (size x 1) vector of time series data to be calibrated
// int MAX_ITER: The maximum number of iterations allowed in the calibration
// double* TOL: The error tolerance or accuracy allowed in the calibration
// double* temp_1: A (size x 1) temporary vector used in intermediate calculations
// double* temp_2: A temporary matrix used in intermediate calculations
// double* temp_3: A temporary scalar used in intermediate calculations
/******************************************************************************************************/
void calibrate(int size, double *mu, double *mu_t, double *alpha, double *alpha_t, double *omega, double *omega_t, double *A, double *T, double *Delta, double *E, double *p, double *p2, double *D, double* ones, double *Times, int *ctr, double *err, double* temp_1, double* temp_2, double* temp_3)
{
//1) (a) Perform inner product to start initial values of mu, alpha, and omega
inner_product(temp_3, size, Times, ones, temp_1); // Inner product of Time series
dim3 dimGrid(1,1,1);
dim3 dimBlock(1,1,1);
assign<<<dimGrid,dimBlock>>>(mu_t,temp_3,1.1,0); // Assign mu_t to be temp_3*(1/size) (the average)
assign<<<dimGrid,dimBlock>>>(alpha_t,temp_3,1.1,0); // Assign mu_t to be temp_3*(1/size) (the average)
assign<<<dimGrid,dimBlock>>>(omega_t,temp_3,1.1,0); // Assign mu_t to be temp_3*(1/size) (the average)
/*
//1) (b) Fill out matrix T of time differences
dim3 dimGrid((size-1)/BLOCK_SIZE+1,(size-1)/BLOCK_SIZE+1,1);
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE,1);
fill<<<dimGrid,dimBlock>>>(size, Times, T);
// 2) Fill out matrix E
dim3 dimGrid((size-1)/BLOCK_SIZE+1,(size-1)/BLOCK_SIZE+1,1);
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE,1);
fill_E<<<dimGrid,dimBlock>>>(size, omega, T, E);
// 3) Update matrix A
dim3 dimGrid((size-1)/BLOCK_SIZE+1,(size-1)/BLOCK_SIZE+1,1);
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE,1);
scal_mult<<<dimGrid,dimBlock>>>(size,size, alpha, delta, A);
scal_mult<<<dimGrid,dimBlock>>>(size,size, omega, A, A);
dim3 dimGrid((n-1)/TILE_SIZE+1,(m-1)/TILE_SIZE+1,1);
dim3 dimBlock(TILE_SIZE,TILE_SIZE,1);
mysgemm<<<dimGrid,dimBlock>>>(size,size,size,A,E,A)
// 4) Update matrix D
mysgemm<<<dimGrid,dimBlock>>>(size,size,1,A,ones,D);
scal_add<<<dimGrid,dimBlock>>>(size,size, mu, D, D);
// 5) Update matrix p and vector p2
update_p2<<<dimGrid,dimBlock>>>(size,mu, D, p2);
update_p<<<dimGrid,dimBlock>>>(size,p2, D, A, p);
// 6) Update parameters mu, alpha, omega
inner_product(mu_t, size, p2, ones, temp_1);
mu_t /=Times[size-1];
reduction<<<dimGrid,dimBlock>>>(alpha_t,p,size*size);
alpha_t/= size;
// Treat T and p as very long vectors and calculate the inner product
inner_product(omega_t, size*size, T, p, temp_2);
omega_t = alpha_t/omega_t;
// 7) Update error
dim3 dimGrid(1,1,1);
dim3 dimBlock(1,1,1);
update<<<dimGrid,dimBlock>>>(ctr,err,mu,mu_t,alpha,alpha_t,omega,omega_t)
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("CUDA error: %s\n",cudaGetErrorString(error));
exit(-1);
}
*/
}
main.cu (i don't use support.h yet)
/********************************************
*cr
cr
*********************************************/
#include <stdio.h>
#include <stdlib.h>
#include "kernel.cu"
#include "support.h"
int main (int argc, char *argv[])
{
Timer timer;
cudaError_t cuda_ret;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem...\n"); fflush(stdout);
startTime(&timer);
double* A_h, *T_h, *Delta_h, *E_h, *p_h, *p2_h, *D_h, *Times_h, *ones_h;
double* A_d, *T_d, *Delta_d, *E_d, *p_d, *p2_d, *D_d, *Times_d, *ones_d, *temp_1, *temp_2, *temp_3;
double* mu_h, *alpha_h, *omega_h; // hawkes parameters on host
double* mu_d, *alpha_d, *omega_d; // hawkes parameters on device
double* mu_t_d, *alpha_t_d, *omega_t_d; // hawkes temporary parameters on device
double* err_h, *err_d; // Iterative variables for hohst and device
int* ctr_h, *ctr_d;
int N;
unsigned int mat_size, vec_size;
// Import data
FILE *fp;
char str[60];
unsigned int count=0;
double d;
/* opening file for reading */
fp = fopen("AAPL_data.txt","r");
if(fp == NULL) {
perror("Error opening file");
return(-1);
}
while(fgets (str, 60, fp)!=NULL)
++count;
// Stick with a limited subset of the data for now to avoid using too much host memory
N = 2000;
fclose(fp);
printf("Count is %u \n",count);
mat_size = N*N;
vec_size = N;
dim3 dim_grid, dim_block;
// Fill matrices with 0's
A_h = (double*) malloc( sizeof(double)*mat_size );
for (unsigned int i=0; i < mat_size; ++i) { A_h[i] = 0; }
T_h = (double*) malloc( sizeof(double)*mat_size );
for (unsigned int i=0; i < mat_size; ++i) { T_h[i] = 0; }
Delta_h = (double*) malloc( sizeof(double)*mat_size );
for (unsigned int i=0; i < mat_size; ++i) { Delta_h[i] = 0; }
E_h = (double*) malloc( sizeof(double)*mat_size );
for (unsigned int i=0; i < mat_size; ++i) { E_h[i] = 0; }
p_h = (double*) malloc( sizeof(double)*mat_size );
for (unsigned int i=0; i < mat_size; ++i) { p_h[i] = 0; }
// Fill vectors with 0's, except the 1's vector
p2_h = (double*) malloc( sizeof(double)*vec_size );
for (unsigned int i=0; i < vec_size; ++i) { p2_h[i] = 0; }
Times_h = (double*) malloc( sizeof(double)*vec_size );
for (unsigned int i=0; i < vec_size; ++i) { Times_h[i] = 0; }
D_h = (double*) malloc( sizeof(double)*vec_size );
for (unsigned int i=0; i < vec_size; ++i) { D_h[i] = 0; }
ones_h = (double*) malloc( sizeof(double)*vec_size );
for (unsigned int i=0; i < vec_size; ++i) { ones_h[i] = 0; }
// Start constants as zero
mu_h = (double*) malloc( sizeof(double));
alpha_h = (double*) malloc( sizeof(double));
omega_h = (double*) malloc( sizeof(double));
err_h = (double*) malloc( sizeof(double));
ctr_h = (int*) malloc( sizeof(int));
*mu_h = 0;
*alpha_h = 0;
*omega_h = 0;
*err_h = 0;
*ctr_h = 0;
// Import data
count=0;
/* opening file for reading */
fp = fopen("AAPL_data.txt","r");
if(fp == NULL) {
perror("Error opening file");
return(-1);
}
while(fgets (str, 60, fp)!=NULL)
{
sscanf(str, "%lf", &d);
if(count < vec_size)
Times_h[count] = d;
++count;
}
fclose(fp);
/*printf("TIMES VECTOR: \n");
for (unsigned int i=0; i < vec_size; ++i)
{
printf("TIMES_H[ %u ] is ",i);
printf("%f \n", Times_h[i]);
}*/
printf("Count is %u \n",count);
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
cudaMalloc((void**) &A_d, mat_size*sizeof(double)); // Create device variable for matrix A
cudaMalloc((void**) &T_d, mat_size*sizeof(double)); // Create device variable for matrix T
cudaMalloc((void**) &Delta_d, mat_size*sizeof(double)); // Create device variable for matrix Delta
cudaMalloc((void**) &E_d, mat_size*sizeof(double)); // Create device variable for matrix E
cudaMalloc((void**) &p_d, mat_size*sizeof(double)); // Create device variable for matrix p
cudaMalloc((void**) &p2_d, vec_size*sizeof(double)); // Create device variable for vector p2
cudaMalloc((void**) &D_d, vec_size*sizeof(double)); // Create device variable for vector D
cudaMalloc((void**) &Times_d, vec_size*sizeof(double)); // Create device variable for vector Times
cudaMalloc((void**) &ones_d, vec_size*sizeof(double)); // Create device variable for vector ones
// Parameters and intermediate parameters
cudaMalloc((void**) &mu_d, sizeof(double)); // Create device variable for constant mu
cudaMalloc((void**) &alpha_d, sizeof(double)); // Create device variable for constant alpha
cudaMalloc((void**) &omega_d, sizeof(double)); // Create device variable for constant omega
cudaMalloc((void**) &mu_t_d, sizeof(double)); // Create device variable for constant mu
cudaMalloc((void**) &alpha_t_d, sizeof(double)); // Create device variable for constant alpha
cudaMalloc((void**) &omega_t_d, sizeof(double)); // Create device variable for constant omega
// Temporary variables
cudaMalloc((void**) &temp_1, vec_size*sizeof(double)); // Create device variable for constant omega
cudaMalloc((void**) &temp_2, mat_size*sizeof(double)); // Create device variable for constant omega
cudaMalloc((void**) &temp_3, sizeof(double)); // Create device variable for constant omega
// Iteration variables
cudaMalloc((void**) &err_d, sizeof(double)); // Create device variable for iterative counters
cudaMalloc((void**) &ctr_d, sizeof(int));
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
cudaMemcpy(A_d,A_h,mat_size*sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(T_d,T_h,mat_size*sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(Delta_d,Delta_h,mat_size*sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(E_d,E_h,mat_size*sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(p_d,p_h,mat_size*sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(p2_d,p2_h,vec_size*sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(D_d,D_h,vec_size*sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(ones_d,ones_h,vec_size*sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(Times_d,Times_h,mat_size*sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
// Parameters and intermediate parameters
cudaMemcpy(mu_d,mu_h,sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(alpha_d,alpha_h,sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(omega_d,omega_h,sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(mu_t_d,mu_h,sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(alpha_t_d,alpha_h,sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(omega_t_d,omega_h,sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
// Temporary variables
cudaMemcpy(temp_1,D_h,vec_size*sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(temp_2,A_h,mat_size*sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(temp_3,mu_h,sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
// Iteration variables
cudaMemcpy(err_d,err_h,sizeof(double), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaMemcpy(ctr_d,ctr_h,sizeof(int), cudaMemcpyHostToDevice); // Copy from host var to device var
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel using standard sgemm interface ---------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
int MAX_ITER = 100;
double TOL = .001;
//while(ctr_h < MAX_ITER && err_h < TOL)
//{
calibrate(vec_size,mu_d, mu_t_d, alpha_d, alpha_t_d, omega_d, omega_t_d, A_d, T_d, Delta_d, E_d, p_d,
p2_d, D_d, ones_d, Times_d, ctr_d, err_d, temp_1, temp_2, temp_3);
// cudaMemcpy(err_h,err_d,sizeof(double), cudaMemcpyDeviceToHost); // Copy from device var to host var
// cudaMemcpy(ctr_h,ctr_d,sizeof(int), cudaMemcpyDeviceToHost); // Copy from device var to host var
//}
cuda_ret = cudaDeviceSynchronize();
if(cuda_ret != cudaSuccess) FATAL("Unable to launch kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host...\n"); fflush(stdout);
startTime(&timer);
cudaMemcpy(mu_h,mu_d,sizeof(double), cudaMemcpyDeviceToHost); // Copy from device var to host var
cudaMemcpy(alpha_h,alpha_d,sizeof(double), cudaMemcpyDeviceToHost); // Copy from device var to host var
cudaMemcpy(omega_h,omega_d,sizeof(double), cudaMemcpyDeviceToHost); // Copy from device var to host var
printf("mu is %f: \n",*mu_h);
printf("alpha is %f: \n",*alpha_h);
printf("omega is %f: \n",*omega_h);
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Free memory ------------------------------------------------------------
free(A_h);
free(T_h);
free(Delta_h);
free(E_h);
free(p_h);
free(p2_h);
free(D_h);
free(ones_h);
free(Times_h);
free(mu_h);
free(alpha_h);
free(omega_h);
cudaFree(A_d);
cudaFree(T_d);
cudaFree(Delta_d);
cudaFree(E_d);
cudaFree(p_d);
cudaFree(p2_d);
cudaFree(D_d);
cudaFree(ones_d);
cudaFree(Times_d);
cudaFree(mu_d);
cudaFree(alpha_d);
cudaFree(omega_d);
return 0;
}
I want to avoid copying CUDA memory variables back to the host to do simple arithmetic on them, so I made the kernel
global void assign(double* new, double* old, double coeff, double inc)
in kernel.cu to calculate *new = (*old)*coeff + inc on the GPU. However, right now I am just having it store 5 to new. I cannot even compile yet:
kernel.cu(211): error: expected a ")" and have no clue why.
new is a reserved word in C++ and CUDA C. Don't use it as a variable name.
I have a strange problem dealing with 2D array on CUDA device.
#define VR 100 // rows
#define ST 13 // columns
__global__ void test(float *arr, curandState *globalState, size_t pitch, unsigned long seed) {
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
curand_init ( seed, id, 0, &globalState[id] );
cuPrintf("Thread id: %d \n", id);
float* row = (float*)(((char*)arr) + id * pitch);
for (int j = 0; j < ST; ++j) {
row[j] = generate(globalState, id);
}
}
int main() {
float *d_arr;
float *h_arr = new float[VR*ST];
size_t pitch;
cudaMallocPitch(&d_arr, &pitch, ST* sizeof(float), VR);
dim3 dimBlock(VR);
dim3 dimGrid(1,1);
curandState* devStates;
cudaMalloc ( &devStates, VR*ST*sizeof( curandState ) );
test <<< dimGrid, dimBlock >>> (d_arr, devStates, pitch, unsigned(time(NULL)));
cudaMemcpy(h_arr, d_arr,VR*ST*sizeof(float),cudaMemcpyDeviceToHost);
for (int i=0; i<VR; i++) {
for (int j=0; j<ST; j++) {
cout << "N["<<i<<"]["<<j<<"]=" << h_arr[(i*ST)+j]<<endl;
}
}
I don't get evenly distributed numbers, instead they appear in sequence of 13 with bunch of zeros in between. See: http://pastie.org/6106381
The problem is that the original data array is being allocated using cudaMallocPitch whereas the copying is being done using ordinary cudaMemcpy. This will give unexpected results because the cudaMallocPitch operation creates "padded" rows to satisfy alignment requirements, whereas cudaMemcpy assumes everything is stored contiguously. Below is code that I believe has corrections to be functional:
#include <stdio.h>
#include <iostream>
#include <curand_kernel.h>
#define VR 100 // rows
#define ST 13 // columns
__device__ float generate(curandState* globalState, int id)
{
//int id = (blockIdx.x * blockDim.x) + threadIdx.x;
curandState localState = globalState[id];
float rand;
do {
rand = curand_uniform( &localState );
} while(rand == 0); //
globalState[id] = localState;
return rand;
}
__global__ void test(float *arr, curandState *globalState, size_t pitch, unsigned long seed) {
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
curand_init ( seed, id, 0, &globalState[id] );
//cuPrintf("Thread id: %d \n", id);
float* row = (float*)(((char*)arr) + id * pitch);
for (int j = 0; j < ST; ++j) {
row[j] = generate(globalState, id);
}
}
using namespace std;
int main() {
float *d_arr;
float *h_arr = new float[VR*ST];
size_t pitch;
cudaMallocPitch(&d_arr, &pitch, ST* sizeof(float), VR);
dim3 dimBlock(VR);
dim3 dimGrid(1,1);
curandState* devStates;
cudaMalloc ( &devStates, VR*ST*sizeof( curandState ) );
test <<< dimGrid, dimBlock >>> (d_arr, devStates, pitch, unsigned(time(NULL)));
cudaMemcpy2D(h_arr, ST*sizeof(float), d_arr, pitch, ST*sizeof(float), VR ,cudaMemcpyDeviceToHost);
for (int i=0; i<VR; i++) {
for (int j=0; j<ST; j++) {
cout << "N["<<i<<"]["<<j<<"]=" << h_arr[(i*ST)+j]<<endl;
}
}
}
Compiling the above code using:
nvcc -arch=sm_20 -lcurand -o t70 t70.cu
and then running I get what appears to be "normal" output:
N[0][0]=0.876772
N[0][1]=0.550017
N[0][2]=0.49023
N[0][3]=0.530145
N[0][4]=0.501616
N[0][5]=0.326232
N[0][6]=0.438308
N[0][7]=0.857651
N[0][8]=0.462743
N[0][9]=0.38252
N[0][10]=0.258212
N[0][11]=0.194021
N[0][12]=0.895522
N[1][0]=0.559201
N[1][1]=0.257747
N[1][2]=0.430971
N[1][3]=0.707209
N[1][4]=0.599081
N[1][5]=0.0457626
N[1][6]=0.702412
N[1][7]=0.88791
N[1][8]=0.508877
N[1][9]=0.702734
N[1][10]=0.379898
N[1][11]=0.138841
N[1][12]=0.540869
(results truncated)
I think it's wrong, you should assign VR number of threads or blocks because you already loop through ST in the kernel.
maybe that will fix it.