Ok, so far, I can create an array on the host computer (of type float), and copy it to the gpu, then bring it back to the host as another array (to test if the copy was successful by comparing to the original).
I then create a CUDA array from the array on the GPU. Then I bind that array to a CUDA texture.
I now want to read that texture back and compare with the original array (again to test that it copied correctly). I saw some sample code that uses the readTexel() function shown below. It doesn't seem to work for me... (basically everything works except for the section in the bindToTexture(float* deviceArray) function starting at the readTexels(SIZE, testArrayDevice) line).
Any suggestions of a different way to do this? Or are there some obvious problems I missed in my code?
Thanks for the help guys!
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
#define SIZE 20;
//Create a channel description to use.
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
//Create a texture to use.
texture<float, 2, cudaReadModeElementType> cudaTexture;
//cudaTexture.filterMode = cudaFilterModeLinear;
//cudaTexture.normalized = false;
__global__ void readTexels(int amount, float *Array)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < amount)
{
float x = tex1D(cudaTexture, float(index));
Array[index] = x;
}
}
float* copyToGPU(float* hostArray, int size)
{
//Create pointers, one for the array to be on the device, and one for bringing it back to the host for testing.
float* deviceArray;
float* testArray;
//Allocate some memory for the two arrays so they don't get overwritten.
testArray = (float *)malloc(sizeof(float)*size);
//Allocate some memory for the array to be put onto the GPU device.
cudaMalloc((void **)&deviceArray, sizeof(float)*size);
//Actually copy the array from hostArray to deviceArray.
cudaMemcpy(deviceArray, hostArray, sizeof(float)*size, cudaMemcpyHostToDevice);
//Copy the deviceArray back to testArray in host memory for testing.
cudaMemcpy(testArray, deviceArray, sizeof(float)*size, cudaMemcpyDeviceToHost);
//Make sure contents of testArray match the original contents in hostArray.
for (int i = 0; i < size; i++)
{
if (hostArray[i] != testArray[i])
{
printf("Location [%d] does not match in hostArray and testArray.\n", i);
}
}
//Don't forget free these arrays after you're done!
free(testArray);
return deviceArray; //TODO: FREE THE DEVICE ARRAY VIA cudaFree(deviceArray);
}
cudaArray* bindToTexture(float* deviceArray)
{
//Create a CUDA array to translate deviceArray into.
cudaArray* cuArray;
//Allocate memory for the CUDA array.
cudaMallocArray(&cuArray, &cudaTexture.channelDesc, SIZE, 1);
//Copy the deviceArray into the CUDA array.
cudaMemcpyToArray(cuArray, 0, 0, deviceArray, sizeof(float)*SIZE, cudaMemcpyHostToDevice);
//Release the deviceArray
cudaFree(deviceArray);
//Bind the CUDA array to the texture.
cudaBindTextureToArray(cudaTexture, cuArray);
//Make a test array on the device and on the host to verify that the texture has been saved correctly.
float* testArrayDevice;
float* testArrayHost;
//Allocate memory for the two test arrays.
cudaMalloc((void **)&testArray, sizeof(float)*SIZE);
testArrayHost = (float *)malloc(sizeof(float)*SIZE);
//Read the texels of the texture to the test array in the device.
readTexels(SIZE, testArrayDevice);
//Copy the device test array to the host test array.
cudaMemcpy(testArrayHost, testArrayDevice, sizeof(float)*SIZE, cudaMemcpyDeviceToHost);
//Print contents of the array out.
for (int i = 0; i < SIZE; i++)
{
printf("%f\n", testArrayHost[i]);
}
//Free the memory for the test arrays.
free(testArrayHost);
cudaFree(testArrayDevice);
return cuArray; //TODO: UNBIND THE CUDA TEXTURE VIA cudaUnbindTexture(cudaTexture);
//TODO: FREE THE CUDA ARRAY VIA cudaFree(cuArray);
}
int main(void)
{
float* hostArray;
hostArray = (float *)malloc(sizeof(float)*SIZE);
for (int i = 0; i < SIZE; i++)
{
hostArray[i] = 10.f + i;
}
float* deviceAddy = copyToGPU(hostArray, SIZE);
free(hostArray);
return 0;
}
Briefly:
------------- in your main.cu ---------------------------------------------------------------------------------------
-1. Define the texture as a globlal variable
texture refTexture; // global variable !
// meaning: address the texture with (x,y) (2D) and get an unsinged int
In the main function:
-2. Use arrays combined with texture
cudaArray* myArray; // declar.
// ask for memory
cudaMallocArray ( &myArray,
&refTex.channelDesc, /* with this you don't need to fill a channel descriptor */
width,
height);
-3. copy data from CPU to GPU (to the array)
cudaMemcpyToArray ( arrayCudaEntrada, // destination: the array
0, 0, // offsets
sourceData, // pointer uint*
widthheightsizeof(uint), // total amount of bytes to be copied
cudaMemcpyHostToDevice);
-4. bind texture and array
cudaBindTextureToArray( refTex,arrayCudaEntrada)
-5. change some parameters in the texture
refTextura_In.normalized = false; // don't automatically convert fetched data to [0,1[
refTextura_In.addressMode[0] = cudaAddressModeClamp; // if my indexing is out of bounds: automatically use a valid indexing (0 if negative index, last if too great index)
refTextura_In.addressMode[1] = cudaAddressModeClamp;
---------- in the kernel --------------------------------------------------------
// find out indexes (f,c) to process by this thread
uint f = (blockIdx.x * blockDim.x) + threadIdx.x;
uint c = (blockIdx.y * blockDim.y) + threadIdx.y;
// this is curious and necessary: indexes for reading from a texture
// are floats !. Even if you are certain to access (4,5) you have
// match the "center" this is (4.5, 5.5)
uint read = tex2D( refTex, c+0.5f, f+0.5f); // texRef is a global variable
Now You process read and write the results to other zone of the device global
memory, not to the texture itself !
readTexels() is a kernel (__global__) function, i.e. it runs on the GPU. Therefore you need to use the correct syntax to launch a kernel.
Take a look through the CUDA Programming Guide and some of the SDK samples, both available via the NVIDIA CUDA site to see how to launch a kernel.
Hint: It'll end up something like readTexels<<<grid,block>>>(...)
Related
I've just started to learn CUDA and i wanted to fill an array (a 2D array represented as a 1D array) with random numbers. I followed another posts in order to generate random numbers, but i don't know if there is a problem with the generation of numbers or with the memory recovering from the device or anything else. The problem is that, though i have tried to fill any cell of the array with the id of the thread that is atending it in order to see the results after copying into the host memory, i receive an array that is filled with 0 in any position after recovering the data with cudaMemcpy().
I'm programming on Visual Studio 2013, with cuda 7.5, on a i5 2500k as my processor and a 960 GTX graphic card.
Here is the main and the method where i try to fill it. I'll update the cuRand Initialization too. If you need to see something else, just tell me.
__global__ void setup_cuRand(curandState * state, unsigned long seed)
{
int id = threadIdx.x;
curand_init(seed, id, 0, &state[id]);
}
__global__ void poblar(int * adn, curandState * state){
curandState localState = state[threadIdx.x];
int random = curand(&localState);
adn[threadIdx.x] = random;
// It doesn't mind if i use the following instruction, the result is a lot of 0's
//adn[threadIdx.x] = threadIdx.x;
}
int main()
{
const int adnLength = NUMCROMOSOMAS * SIZECROMOSOMAS; // 256 * 128 (32.768)
const size_t adnSize = adnLength * sizeof(int);
int adnCPU[adnLength];
int * adnDevice;
cudaError_t error = cudaSetDevice(0);
if (error != cudaSuccess)
exit(-EXIT_FAILURE);
curandState * randState;
error = cudaMalloc(&randState, adnLength * sizeof(curandState));
if (error != cudaSuccess){
cudaFree(randState);
exit(-EXIT_FAILURE);
}
//Here is initialized cuRand
setup_cuRand <<<1, adnLength >> > (randState, unsigned(time(NULL)));
error = cudaMalloc((void **)&adnDevice, adnSize);
if (error == cudaErrorMemoryAllocation){// cudaSuccess){
cudaFree(adnDevice);
cudaFree(randState);
printf("\n error");
exit(-EXIT_FAILURE);
}
poblar <<<1, adnLength >>> (adnDevice, randState);
error = cudaMemcpy(adnCPU, adnDevice, adnSize, cudaMemcpyDeviceToHost);
//After here, for any i, adnCPU[i] is 0 and i cannot figure what is wrong
if (error == cudaSuccess){
for (int i = 0; i < NUMCROMOSOMAS; i++){
for (int j = 0; j < SIZECROMOSOMAS; j++){
printf("%i,", adnCPU[(i*SIZECROMOSOMAS) + j]);
}
printf("\n");
}
}
return 0;
}
EDIT after answer solved: There was a particularity over the answer given, and is that you need a lower number of threads (half of that quantity worked for me) in order to seed correctly the random numbers with cuRand. For some reason, i could create the threads perfectly but i couldn't seed the pseudo-random algorithm generator.
The maximum number of threads per block is 1024 on your hardware, hence, you may not schedule a call with adnLength if it is larger than 1024.
The error you are having is most probably a call configuration error, and it is returned by cudaPeekAtLastError, as it occurs before any GPU work, right after the triple angled-bracket call. Indeed cudaMemcpy may not return it, even though it returns error from previous asynchronous calls.
The error that may occur is cudaErrorLaunchOutOfResources.
I am filling a Frame with a BGR image for encoding, and I am getting a memory leak. I think I got to the source of the problem but it appears to be a library issue instead. Since FFmpeg is such a mature library, I think I am misusing it and I would like to be instructed on how to do it correctly.
I am allocating a Frame using:
AVFrame *bgrFrame = av_frame_alloc();
And later I allocate the image in the Frame using:
av_image_alloc(bgrFrame->data, bgrFrame->linesize, bgrFrame->width, bgrFrame->height, AV_PIX_FMT_BGR24, 32);
Then I fill the image allocated using:
av_image_fill_pointers(bgrFrame->data, AV_PIX_FMT_BGR24, bgrFrame->height, originalBGRImage.data, bgrFrame->linesize);
Where originalBGRImage is an OpenCV Mat.
And this has a memory leak, apparently, av_image_alloc() allocates memory, and av_image_fill_pointers() also allocates memory, on the same pointers (I can see bgrFrame->data[0] changing between calls).
If I call
av_freep(&bgrFrame->data[0]);
After av_image_alloc(), it's fine, but if I call it after av_image_fill_pointers(), the program crashes, even though bgrFrame->data[0] is not NULL, which I find very curious.
Looking FFmpeg's av_image_alloc() source code, I see it calls av_image_fill_pointers() twice inside it, once allocating a buffer buff....and later in av_image_fill_pointers() source code, data[0] is substituted by the image pointer, which is (I think) the source of the memory leak, since data[0] was holding buf from the previous av_image_alloc() call.
So this brings the final question: What's the correct way of filling a frame with an image?.
You should allocate your frame once.
AVFrame* alloc_picture(enum PixelFormat pix_fmt, int width, int height)
{
AVFrame *f = avcodec_alloc_frame();
if (!f)
return NULL;
int size = avpicture_get_size(pix_fmt, width, height);
uint8_t *buffer = (uint8_t *) av_malloc(size);
if (!buffer) {
av_free(f);
return NULL;
}
avpicture_fill((AVPicture *)f, buffer, pix_fmt, width, height);
return f;
}
Yes, the cast (AVPicture*) is allowed https://stackoverflow.com/a/20498359/2079934 .
In subsequent frames, you can write into the this frame. Since your OpenCV raw data is BGR and you need RGB or YUV, you can use sws_scale. In my application, I mirror vertically:
struct SwsContext* convertCtx = sws_getContext(width, height, PIX_FMT_RGB24, c->width, c->height, c->pix_fmt, SWS_FAST_BILINEAR, NULL, NULL, NULL);
avpicture_fill(&pic_raw, (uint8_t*)pixelBuffer, PIX_FMT_RGB24, width, height);
// flip
pic_raw.data[0] += (height - 1) * pic_raw.linesize[0];
pic_raw.linesize[0] *= -1;
sws_scale(convertCtx, pic_raw.data, pic_raw.linesize, 0, height, f->data, f->linesize);
out_size = avcodec_encode_video(c, outputBuffer, outputBufferSize, f);
(You can adapt PIX_FMT_RGB24 to your needs and read from cv::Mat without copying data.)
av_fill_arrays() does the job. It will fill the frame's data[] and linesizes[] but not reallocating any memory.
Too late for answer. But after take many hours, i want to share.
In document
/**
* AVBuffer references backing the data for this frame. All the pointers in
* data and extended_data must point inside one of the buffers in buf or
* extended_buf. This array must be filled contiguously -- if buf[i] is
* non-NULL then buf[j] must also be non-NULL for all j < i.
*
* There may be at most one AVBuffer per data plane, so for video this array
* always contains all the references. For planar audio with more than
* AV_NUM_DATA_POINTERS channels, there may be more buffers than can fit in
* this array. Then the extra AVBufferRef pointers are stored in the
* extended_buf array.
*/
AVBufferRef *buf[AV_NUM_DATA_POINTERS];
Then buf is "smart pointer" for data (extended_buf for extended_data)
for example, i using image one linesize only
int size = av_image_get_buffer_size(AVPixelFormat::AV_PIX_FMT_BGRA, width, height, 1);
AVBufferRef* dataref = av_buffer_alloc(size);//that for av_frame_unref
memcpy(dataref->data, your_buffer, size );
AVFrame* frame = av_frame_alloc();
av_image_fill_arrays(frame->data, frame->linesize, dataref->data, AVPixelFormat::AV_PIX_FMT_BGRA, source->width, source->height, 1
frame->buf[0] = dataref;
av_frame_unref will unref frame->buf and free pointer if ref count to zero
For the record this is homework so help as little or as much with that in mind. We are using constant memory to store a "mask matrix" that will be used to perform a convolution on a larger matrix. When I am in the host code I am copying the mask to constant memory using the cudaMemcpyToSymbol().
My question is once this is copied over and I launch my device kernel code how does the device know where to access the constant memory mask matrix. Is there a pointer that I need to pass in on kernel launch. Most of the code that the professor gave us is not supposed to be changed (there is no pointer to the mask passed in) but there is always the possibility that he made a mistake ( although it is most likely my understanding of something)
Is the constant memeory declaratoin supposed to be included in the seperate kernel.cu file?
I am minimizing the code to just show the things having to do with the constant memory. As such please don't point out if something is not initialized ect. There is code for that but that is not of concern at this time.
main.cu:
#include <stdio.h>
#include "kernel.cu"
__constant__ float M_d[FILTER_SIZE * FILTER_SIZE];
int main(int argc, char* argv[])
{
Matrix M_h, N_h, P_h; // M: filter, N: input image, P: output image
/* Allocate host memory */
M_h = allocateMatrix(FILTER_SIZE, FILTER_SIZE);
N_h = allocateMatrix(imageHeight, imageWidth);
P_h = allocateMatrix(imageHeight, imageWidth);
/* Initialize filter and images */
initMatrix(M_h);
initMatrix(N_h);
cudaError_t cudda_ret = cudaMemcpyToSymbol(M_d, M_h.elements, M_h.height * M_h.width * sizeof(float), 0, cudaMemcpyHostToDevice);
//char* cudda_ret_pointer = cudaGetErrorString(cudda_ret);
if( cudda_ret != cudaSuccess){
printf("\n\ncudaMemcpyToSymbol failed\n\n");
printf("%s, \n\n", cudaGetErrorString(cudda_ret));
}
// Launch kernel ----------------------------------------------------------
printf("Launching kernel..."); fflush(stdout);
//INSERT CODE HERE
//block size is 16x16
// \\\\\\\\\\\\\**DONE**
dim_grid = dim3(ceil(N_h.width / (float) BLOCK_SIZE), ceil(N_h.height / (float) BLOCK_SIZE));
dim_block = dim3(BLOCK_SIZE, BLOCK_SIZE);
//KERNEL Launch
convolution<<<dim_grid, dim_block>>>(N_d, P_d);
return 0;
}
kernel.cu: THIS IS WHERE I DO NOT KNOW HOW TO ACCESS THE CONSTANT MEMORY.
//__constant__ float M_c[FILTER_SIZE][FILTER_SIZE];
__global__ void convolution(Matrix N, Matrix P)
{
/********************************************************************
Determine input and output indexes of each thread
Load a tile of the input image to shared memory
Apply the filter on the input image tile
Write the compute values to the output image at the correct indexes
********************************************************************/
//INSERT KERNEL CODE HERE
//__shared__ float N_shared[BLOCK_SIZE][BLOCK_SIZE];
//int row = (blockIdx.y * blockDim.y) + threadIdx.y;
//int col = (blockIdx.x * blockDim.x) + threadIdx.x;
}
In "classic" CUDA compilation you must define all code and symbols (textures, constant memory, device functions) and any host API calls which access them (including kernel launches, binding to textures, copying to symbols) within the same translation unit. This means, effectively, in the same file (or via multiple include statements within the same file). This is because "classic" CUDA compilation doesn't include a device code linker.
Since CUDA 5 was released, there is the possibility of using separate compilation mode and linking different device code objects into a single fatbinary payload on architectures which support it. In that case, you need to declare any __constant__ variables using the extern keyword and define the symbol exactly once.
If you can't use separate compilation, then the usual workaround is to define the __constant__ symbol in the same .cu file as your kernel, and include a small host wrapper function which just calls cudaMemcpyToSymbol to set the __constant__ symbol in question. You would probably do the same with kernel calls and texture operations.
Below is a "minimum-sized" example showing the use of __constant__ symbols. You do not need to pass any pointer to the __global__ function.
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
__constant__ float test_const;
__global__ void test_kernel(float* d_test_array) {
d_test_array[threadIdx.x] = test_const;
}
#include <conio.h>
int main(int argc, char **argv) {
float test = 3.f;
int N = 16;
float* test_array = (float*)malloc(N*sizeof(float));
float* d_test_array;
cudaMalloc((void**)&d_test_array,N*sizeof(float));
cudaMemcpyToSymbol(test_const, &test, sizeof(float));
test_kernel<<<1,N>>>(d_test_array);
cudaMemcpy(test_array,d_test_array,N*sizeof(float),cudaMemcpyDeviceToHost);
for (int i=0; i<N; i++) printf("%i %f\n",i,test_array[i]);
getch();
return 0;
}
I am sending a (particle) struct using the MPI_Type_create_struct() as done e.g. here, or explained in detail here.
I'm collecting all particles which are going to a specific proc, memcpy() them into the send buffer and MPI_Isend() them.
So far, so good. MPI_Iprob()'ing for the message gives me the right count of particles sent.
So I MPI_Recv() the buffer and extract the data (now even by copying the struct one by one). No matter how many particles I send, only the first particles' data are correct.
There are three possible mistakes:
The MPI_Type_create_struct() doesn't create a proper map of my struct, due to my usage of offset of() like in the first link. Maybe my struct contains a non visible padding as explained in the second link.
I'm doing some simple mistakes while copying particles into the send buffer and from the receive buffer back (I do print the send buffer - and it works - but maybe I'm overlooking something)
Something totally different.
(sorry for the really ugly presentation of the code, I could not manage to present it in a descent way. You'll find the code here - the line is already marked - on Github, too!)
Here are the construction of the mpi datatype,
typedef struct {
int ID;
double x[DIM];
} pchase_particle_t;
const int items = 2;
int block_lengths[2] = {1, DIM};
MPI_Datatype mpi_types[2] = {MPI_INT, MPI_DOUBLE};
MPI_Aint offsets[2];
offsets[0] = offsetof(pchase_particle_t, ID);
offsets[1] = offsetof(pchase_particle_t, x);
MPI_Type_create_struct(items, block_lengths, offsets, mpi_types, &W->MPI_Particle);
MPI_Type_commit(&W->MPI_Particle);
the sending
/* handle all mpi send/recv status data */
MPI_Request *send_request = P4EST_ALLOC(MPI_Request, W->p4est->mpisize);
MPI_Status *recv_status = P4EST_ALLOC(MPI_Status, W->p4est->mpisize);
/* setup send/recv buffers */
pchase_particle_t **recv_buf = P4EST_ALLOC(pchase_particle_t *, num_senders);
pchase_particle_t **send_buf = P4EST_ALLOC(pchase_particle_t *, num_receivers);
int recv_count = 0, recv_length, flag, j;
/* send all particles to their belonging procs */
for (i = 0; i < num_receivers; i++) {
/* resolve particle list for proc i */
sc_list_t *tmpList = *((sc_list_t **) sc_array_index(W->particles_to, receivers[i]));
pchase_particle_t * tmpParticle;
int send_count = 0;
/* get space for the particles to be sent */
send_buf[i] = P4EST_ALLOC(pchase_particle_t, tmpList->elem_count);
/* copy all particles into the send buffer and remove them from this proc */
while(tmpList->first != NULL){
tmpParticle = sc_list_pop(tmpList);
memcpy(send_buf[i] + send_count * sizeof(pchase_particle_t), tmpParticle, sizeof(pchase_particle_t));
/* free particle */
P4EST_FREE(tmpParticle);
/* update particle counter */
send_count++;
}
/* print send buffer */
for (j = 0; j < send_count; j++) {
pchase_particle_t *tmpParticle = send_buf[i] + j * sizeof(pchase_particle_t);
printf("[pchase %i sending] particle[%i](%lf,%lf)\n", W->p4est->mpirank, tmpParticle->ID, tmpParticle->x[0], tmpParticle->x[1]);
}
printf("[pchase %i sending] particle count: %i\n", W->p4est->mpirank, send_count);
/* send particles to right owner */
mpiret = MPI_Isend(send_buf[i], send_count, W->MPI_Particle, receivers[i], 13, W->p4est->mpicomm, &send_request[i]);
SC_CHECK_MPI(mpiret);
}
and the receiving.
recv_count = 0;
/* check for messages until all arrived */
while (recv_count < num_senders) {
/* probe if any of the sender has already sent his message */
for (i = 0; i < num_senders; i++) {
MPI_Iprobe(senders[i], MPI_ANY_TAG, W->p4est->mpicomm,
&flag, &recv_status[i]);
if (flag) {
/* resolve number of particles receiving */
MPI_Get_count(&recv_status[i], W->MPI_Particle, &recv_length);
printf("[pchase %i receiving message] %i particles arrived from sender %i with tag %i\n",
W->p4est->mpirank, recv_length, recv_status[i].MPI_SOURCE, recv_status[i].MPI_TAG);
/* get space for the particles to be sent */
recv_buf[recv_count] = P4EST_ALLOC(pchase_particle_t, recv_length);
/* receive a list with recv_length particles */
mpiret = MPI_Recv(recv_buf[recv_count], recv_length, W->MPI_Particle, recv_status[i].MPI_SOURCE,
recv_status[i].MPI_TAG, W->p4est->mpicomm, &recv_status[i]);
SC_CHECK_MPI(mpiret);
/*
* insert all received particles into the
* push list
*/
pchase_particle_t *tmpParticle;
for (j = 0; j < recv_length; j++) {
/*
* retrieve all particle details from
* recv_buf
*/
tmpParticle = recv_buf[recv_count] + j * sizeof(pchase_particle_t);
pchase_particle_t *addParticle = P4EST_ALLOC(pchase_particle_t,1);
addParticle->ID=tmpParticle->ID;
addParticle->x[0] = tmpParticle->x[0];
addParticle->x[1] = tmpParticle->x[1];
printf("[pchase %i receiving] particle[%i](%lf,%lf)\n",
W->p4est->mpirank, addParticle->ID, addParticle->x[0], addParticle->x[1]);
/* push received particle to push list and update world counter */
sc_list_append(W->particle_push_list, addParticle);
W->n_particles++;
}
/* we received another particle list */
recv_count++;
}
}
}
edit: reindented..
edit: Only the first particles' data is correct, means that all it's properties (ID and coordinates) are identical to that of the sent particle. The others however are initialized with zeros i.e. ID=0, x[0]=0.0, x[1]=0.0. Maybe that's a hint for the solution.
There is an error in your pointer arithmetic. send_buf[i] is already of type pchase_particle_t * and therefore send_buf[i] + j * sizeof(pchase_particle_t) does not point to the j-th element of the i-th buffer but rather to the j * sizeof(pchase_particle_t)-th element. Thus your particles are not stored contiguously in memory but rather separated by sizeof(pchase_particle_t) - 1 empty array elements. These get sent instead of the correct particles because the MPI_Send call accesses buffer memory contiguously. The same applies to the code of the receiver.
You do not see the error in the sender code because your debug print uses the same incorrect pointer arithmetic and hence accesses memory using the same stride. I guess your send counts are small and you get memory allocated on the data segment heap, otherwise you should have received SIGSEGV for out-of-bound array access very early in the data packing process (e.g. in the memcpy part).
Resolution: do not multiply the array index by sizeof(pchase_particle_t).
All, I wrote a very simple OpenCL kernel which transforms an RGB image to gray scale using simple averaging.
Some background:
The image is stored in mapped memory, as a 24 bit, non padded memory block
The output array is stored in pinned memory (mapped with clEnqueueMapBuffer) and is 8 bpp
There are two buffers allocated on the device (clCreateBuffer), one is specifically read (which we clWriteBuffer into before the kernel starts) and the other is specifically write (which we clReadBuffer after the kernel finishes)
I am running this on a 1280x960 image. A serial version of the algorithm averages 60ms, the OpenCL kernel averages 200ms!!! I'm doing something wrong but I have no idea how to proceed, what to optimize. (Timing my reads/writes without a kernel call, the algorithm runs in 15ms)
I am attaching the kernel setup (sizes and arguments) as well as the kernel
EDIT: So I wrote an even dumber kernel, that does no global memory accesses inside it, and it was only 150ms... This is still ridiculously slow. I thought maybe I'm messing up with global memory reads, they have to be 4 byte aligned or something? Nope...
Edit 2: Removing the all parameters from my kernel gave me significant speed up... I'm confused I thought that since I'm clEnqueueWriteBuffer the kernel should be doing no memory transfer from host->device and device->host....
Edit 3: Figured it out, but I still don't understand why. If anyone could explain it I would be glad to award correct answer to them. The problem was passing the custom structs by value. It looks like I'll need to allocate a global memory location for them and pass their cl_mems
Kernel Call:
//Copy input to device
result = clEnqueueWriteBuffer(handles->queue, d_input_data, CL_TRUE, 0, h_input.widthStep*h_input.height, (void *)input->imageData, 0, 0, 0);
if(check_result(result, "opencl_rgb_to_gray", "Failed to write to input buffer on device!")) return 0;
//Set kernel arguments
result = clSetKernelArg(handles->current_kernel, 0, sizeof(OpenCLImage), (void *)&h_input);
if(check_result(result, "opencl_rgb_to_gray", "Failed to set input struct.")) return 0;
result = clSetKernelArg(handles->current_kernel, 1, sizeof(cl_mem), (void *)&d_input_data);
if(check_result(result, "opencl_rgb_to_gray", "Failed to set input data.")) return 0;
result = clSetKernelArg(handles->current_kernel, 2, sizeof(OpenCLImage), (void *)&h_output);
if(check_result(result, "opencl_rgb_to_gray", "Failed to set output struct.")) return 0;
result = clSetKernelArg(handles->current_kernel, 3, sizeof(cl_mem), (void *)&d_output_data);
if(check_result(result, "opencl_rgb_to_gray", "Failed to set output data.")) return 0;
//Determine run parameters
global_work_size[0] = input->width;//(unsigned int)((input->width / (float)local_work_size[0]) + 0.5);
global_work_size[1] = input->height;//(unsigned int)((input->height/ (float)local_work_size[1]) + 0.5);
printf("Global Work Group Size: %d %d\n", global_work_size[0], global_work_size[1]);
//Call kernel
result = clEnqueueNDRangeKernel(handles->queue, handles->current_kernel, 2, 0, global_work_size, local_work_size, 0, 0, 0);
if(check_result(result, "opencl_rgb_to_gray", "Failed to run kernel!")) return 0;
result = clFinish(handles->queue);
if(check_result(result, "opencl_rgb_to_gray", "Failed to finish!")) return 0;
//Copy output
result = clEnqueueReadBuffer(handles->queue, d_output_data, CL_TRUE, 0, h_output.widthStep*h_output.height, (void *)output->imageData, 0, 0, 0);
if(check_result(result, "opencl_rgb_to_gray", "Failed to write to output buffer on device!")) return 0;
Kernel:
typedef struct OpenCLImage_t
{
int width;
int widthStep;
int height;
int channels;
} OpenCLImage;
__kernel void opencl_rgb_kernel(OpenCLImage input, __global unsigned char* input_data, OpenCLImage output, __global unsigned char * output_data)
{
int pixel_x = get_global_id(0);
int pixel_y = get_global_id(1);
unsigned char * cur_in_pixel, *cur_out_pixel;
float avg = 0;
cur_in_pixel = (unsigned char *)(input_data + pixel_y*input.widthStep + pixel_x * input.channels);
cur_out_pixel = (unsigned char *)(output_data + pixel_y*output.widthStep + pixel_x * output.channels);
avg += cur_in_pixel[0];
avg += cur_in_pixel[1];
avg+= cur_in_pixel[2];
avg /=3.0f;
if(avg > 255.0)
avg = 255.0;
else if(avg < 0)
avg = 0;
*cur_out_pixel = avg;
}
Overhead of copying the value to all the threads that will be created might be the possible reason for the time; where as for a global memory the reference will be enough in the other case. The only the SDK implementer will be able to answer exactly.. :)
You may want to try a local_work_size like [64, 1, 1] in order to coalesce your memory calls. (note that 64 is a diviser of 1280).
As previously said, you have to use a profiler in order to get more informations. Are you using an nvidia card ? Then download CUDA 4 (not 5), as it contains an openCL profiler.
Your performance must be far from the optimum. Change the local work size, the global work size, try to treat two or four pixels per tread. Can you change the way pixels are stored privous to your treatment? Then break your struct for tree arrays in order to coalesce memomry access more effectively.
Tou can hide your memory transfers with the GPU work: it will be more easy to do with a profiler near you.