So, in the other post I questioned about C time measurement. Now, I wanna know how to compare the result of the C "function" vs the OpenCL "function"
This is the code of the host OpenCL and C
#define PROGRAM_FILE "sum.cl"
#define KERNEL_FUNC "float_sum"
#define ARRAY_SIZE 1000000
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <CL/cl.h>
int main()
{
/* OpenCL Data structures */
cl_platform_id platform;
cl_device_id device;
cl_context context;
cl_program program;
cl_kernel kernel;
cl_command_queue queue;
cl_mem vec_buffer, result_buffer;
cl_event prof_event;;
/* ********************* */
/* C Data Structures / Data types */
FILE *program_handle; //Kernel file handle
char *program_buffer; //Kernel buffer
float *vec, *non_parallel;
float result[ARRAY_SIZE];
size_t program_size; //Kernel file size
cl_ulong time_start, time_end, total_time;
int i;
/* ****************************** */
/* Errors */
cl_int err;
/* ****** */
non_parallel = (float*)malloc(ARRAY_SIZE * sizeof(float));
vec = (float*)malloc(ARRAY_SIZE * sizeof(float));
//Initialize the vector of floats
for(i = 0; i < ARRAY_SIZE; i++)
vec[i] = i + 1;
/************************* C Function **************************************/
clock_t start, end;
start = clock();
for( i = 0; i < ARRAY_SIZE; i++)
{
non_parallel[i] = vec[i] * vec[i];
}
end = clock();
printf( "Number of seconds: %f\n", (clock()-start)/(double)CLOCKS_PER_SEC );
free(non_parallel);
/***************************************************************************/
clGetPlatformIDs(1, &platform, NULL);//Just want NVIDIA platform
clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 1, &device, NULL);
context = clCreateContext(NULL, 1, &device, NULL, NULL, &err);
// Context error?
if(err)
{
perror("Cannot create context");
return 1;
}
//Read the kernel file
program_handle = fopen(PROGRAM_FILE,"r");
fseek(program_handle, 0, SEEK_END);
program_size = ftell(program_handle);
rewind(program_handle);
program_buffer = (char*)malloc(program_size + 1);
program_buffer[program_size] = '\0';
fread(program_buffer, sizeof(char), program_size, program_handle);
fclose(program_handle);
//Create the program
program = clCreateProgramWithSource(context, 1, (const char**)&program_buffer,
&program_size, &err);
if(err)
{
perror("Cannot create program");
return 1;
}
free(program_buffer);
clBuildProgram(program, 0, NULL, NULL, NULL, NULL);
kernel = clCreateKernel(program, KERNEL_FUNC, &err);
if(err)
{
perror("Cannot create kernel");
return 1;
}
queue = clCreateCommandQueue(context, device, CL_QUEU_PROFILING_ENABLE, &err);
if(err)
{
perror("Cannot create command queue");
return 1;
}
vec_buffer = clCreateBuffer(context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR,
sizeof(float) * ARRAY_SIZE, vec, &err);
result_buffer = clCreateBuffer(context, CL_MEM_WRITE_ONLY, sizeof(float)*ARRAY_SIZE, NULL, &err);
if(err)
{
perror("Cannot create the vector buffer");
return 1;
}
clSetKernelArg(kernel, 0, sizeof(cl_mem), &vec_buffer);
clSetKernelArg(kernel, 1, sizeof(cl_mem), &result_buffer);
size_t global_size = ARRAY_SIZE;
size_t local_size = 0;
clEnqueueNDRangeKernel(queue, kernel, 1, NULL, &global_size, NULL, 0, NULL, &prof_event);
clEnqueueReadBuffer(queue, result_buffer, CL_TRUE, 0, sizeof(float)*ARRAY_SIZE, &result, 0, NULL, NULL);
clFinish(queue);
clGetEventProfilingInfo(prof_event, CL_PROFILING_COMMAND_START,
sizeof(time_start), &time_start, NULL);
clGetEventProfilingInfo(prof_event, CL_PROFILING_COMMAND_END,
sizeof(time_end), &time_end, NULL);
total_time += time_end - time_start;
printf("\nAverage time in nanoseconds = %lu\n", total_time/ARRAY_SIZE);
clReleaseMemObject(vec_buffer);
clReleaseMemObject(result_buffer);
clReleaseKernel(kernel);
clReleaseCommandQueue(queue);
clReleaseProgram(program);
clReleaseContext(context);
free(vec);
return 0;
}
And the kernel is:
__kernel void float_sum(__global float* vec,__global float* result){
int gid = get_global_id(0);
result[gid] = vec[gid] * vec[gid];
}
Now, the results are:
Number of seconds: 0.010000 <- This is the for the C code
Average time in nanoseconds = 140737284 <- OpenCL function
0,1407 seconds is the time of the OpenCL time kernel execution, and it's more than the C function, is it correct? Beacause I think OpenCL should be fastest than C non parallel algorithm...
Executing parallel code on the GPU is not necessarily faster that executing on the CPU. Take into account that you also have to transfer the data to and from the GPU memory in addition to the computations.
In your example you are transferring 2 * N items and doing an O(N) operation in parallel, which is a very inefficient use of the GPU. Therefore, it's quite likely that the CPU is indeed faster for this particular computation.
Just for others coming her for help: Short introduction to profiling kernel runtime with OpenCL
Enable profiling mode:
cmdQueue = clCreateCommandQueue(context, *devices, CL_QUEUE_PROFILING_ENABLE, &err);
Profiling kernel:
cl_event prof_event;
clEnqueueNDRangeKernel(cmdQueue, kernel, 1 , 0, globalWorkSize, NULL, 0, NULL, &prof_event);
Read profiling data in:
cl_ulong ev_start_time=(cl_ulong)0;
cl_ulong ev_end_time=(cl_ulong)0;
clFinish(cmdQueue);
err = clWaitForEvents(1, &prof_event);
err |= clGetEventProfilingInfo(prof_event, CL_PROFILING_COMMAND_START, sizeof(cl_ulong), &ev_start_time, NULL);
err |= clGetEventProfilingInfo(prof_event, CL_PROFILING_COMMAND_END, sizeof(cl_ulong), &ev_end_time, NULL);
Calculate kernel execution time:
float run_time_gpu = (float)(ev_end_time - ev_start_time)/1000; // in usec
Your approach with
total_time/ARRAY_SIZE
is not what you want. It will give you the run time per work item.
Operations / time in nanoseconds will give you GFLOPS (giga floating point operations per second).
This is one big problem with your application:
size_t global_size = ARRAY_SIZE;
size_t local_size = 0;
You are creating single-item work groups, which will let most of the gpu sit idle. In many cases, using single-item work groups will only utilize 1/15th of your gpu.
Instead try this:
size_t global_size = ARRAY_SIZE / 250; //important: local_size*global_size must equal ARRAY_SIZE
size_t local_size = 250; //a power of 2 works well. 250 is close enough, yes divisible by your 1M array size
Now you're creating large groups that will better saturate the ALU of your graphics hardware. The kernel will run fine the way you have it now, but there are ways to get more out of the kernel portion too.
Kernel optimization: pass ARRAY_SIZE into the kernel as an additional param, and use fewer groups of a more optimal group size. You will also eliminate the need for local_size*global_size to be exactly equal to ARRAY_SIZE. The work items' global id is never used in the this kernel, and it is not needed because the total size was passed in.
__kernel void float_sum(__global float* vec,__global float* result,__global int count){
int lId = get_local_id(0);
int lSize = get_local_size(0);
int grId = get_group_id(0);
int totalOps = count/get_num_groups(0);
int startIndex = grId * totalOps;
int maxIndex = startIndex+totalOps;
if(grId == get_num_groups(0)-1){
endIndex = count;
}
for(int i=startIndex+lId;i<endIndex;i+=lSize){
result[i] = vec[i] * vec[i];
}
}
Now you might be thinking that there are an awful lot of variables for such a simple kernel. Remember that each execution of the kernel will do multiple operations on the data, rather than just one. Using the values below, on my radeon 5870 (20 compute units), each work item ends up computing 781 or 782 values in its for loop. Each group would compute 50000 pieces of data. The overhead of the variables I use is far less than the overhead of creating 4000 work groups -- or 1 million.
size_t global_size = ARRAY_SIZE / numComputeUnits;
size_t local_size = 64; //also try other multiples of 16 or 64 for gpu; or multiples of your core-count for a cpu kernel
See here about how to get the value for numComputeUnits
Related
I’m using Ubntu 20.04 to run my Opencl program with C but whatever the function i execute the results is always zero or other
illogical results, it’s like kernel not working,
Please if anyone has OpenCL, please try this program i want to know if the problem is in the program or in the installation of OpenCL.
This is the programme i use.
#define CL_USE_DEPRECATED_OPENCL_1_2APIS
#include <stdio.h>
#include <stdlib.h>
#include <CL/cl.h>
#define MAX_SOURCE_SIZE (0x100000)
int main(void) {
// Create the two input vectors
int i;
const int LIST_SIZE = 10;
int* A = (int*)malloc(sizeof(int)*LIST_SIZE);
int* B = (int*)malloc(sizeof(int)*LIST_SIZE);
int* C = (int*)malloc(sizeof(int)*LIST_SIZE);
for(i = 0; i < LIST_SIZE; i++) {
A[i] = i;
B[i] = LIST_SIZE - i;
C[i] = 0;
}
// Load the kernel source code into the array source_str
/*FILE *fp;
char *source_str;
fp = fopen("vector_add_kernel.cl", "r");
if (!fp) {
fprintf(stderr, "Failed to load kernel.\n");
exit(1);
}
source_str = (char*)malloc(MAX_SOURCE_SIZE);
size_t source_size;
source_size = fread( source_str, 1, MAX_SOURCE_SIZE, fp);
fclose( fp );*/
//size_t source_size;
const char* source_str =
"__kernel void vector_add(__global int *A, __global int *B, __global int *C) {\n"
" int i = get_global_id(0);\n"
" if(i>=10) return;\n"
" C[i] = A[i]+B[i];\n"
"}"
;
// Get platform and device information
cl_platform_id platform_id = NULL;
cl_device_id device_id = NULL;
cl_uint ret_num_devices;
cl_uint ret_num_platforms;
cl_int ret = clGetPlatformIDs(1, &platform_id, &ret_num_platforms);
ret = clGetDeviceIDs( platform_id, CL_DEVICE_TYPE_ALL, 1,
&device_id, &ret_num_devices);
// Create an OpenCL context
cl_context context = clCreateContext( NULL, 1, &device_id, NULL, NULL, &ret);
// Create a command queue
cl_command_queue command_queue = clCreateCommandQueueWithProperties(context, device_id, 0, &ret);
// Create memory buffers on the device for each vector
cl_mem a_mem_obj = clCreateBuffer(context, CL_MEM_READ_ONLY,
LIST_SIZE * sizeof(int), NULL, &ret);
cl_mem b_mem_obj = clCreateBuffer(context, CL_MEM_READ_ONLY,
LIST_SIZE * sizeof(int), NULL, &ret);
cl_mem c_mem_obj = clCreateBuffer(context, CL_MEM_WRITE_ONLY,
LIST_SIZE * sizeof(int), NULL, &ret);
// Copy the lists A and B to their respective memory buffers
ret = clEnqueueWriteBuffer(command_queue, a_mem_obj, CL_TRUE, 0,
LIST_SIZE * sizeof(int), A, 0, NULL, NULL);
ret = clEnqueueWriteBuffer(command_queue, b_mem_obj, CL_TRUE, 0,
LIST_SIZE * sizeof(int), B, 0, NULL, NULL);
// Create a program from the kernel source
cl_program program = clCreateProgramWithSource(context, 1,
(const char **)&source_str, NULL, &ret);
// Build the program
ret = clBuildProgram(program, 1, &device_id, NULL, NULL, NULL);
// Create the OpenCL kernel
cl_kernel kernel = clCreateKernel(program, "vector_add", &ret);
// Set the arguments of the kernel
ret = clSetKernelArg(kernel, 0, sizeof(cl_mem), (void *)&a_mem_obj);
ret = clSetKernelArg(kernel, 1, sizeof(cl_mem), (void *)&b_mem_obj);
ret = clSetKernelArg(kernel, 2, sizeof(cl_mem), (void *)&c_mem_obj);
// Execute the OpenCL kernel on the list
size_t local_item_size = 64; // Divide work items into groups of 64
size_t global_item_size = ((LIST_SIZE+local_item_size-1)/local_item_size)*local_item_size; // Process the entire lists
ret = clEnqueueNDRangeKernel(command_queue, kernel, 1, NULL,
&global_item_size, &local_item_size, 0, NULL, NULL);
clFinish(command_queue);
// Read the memory buffer C on the device to the local variable C
ret = clEnqueueReadBuffer(command_queue, c_mem_obj, CL_TRUE, 0,
LIST_SIZE * sizeof(int), C, 0, NULL, NULL);
// Display the result to the screen
for(i = 0; i < LIST_SIZE; i++)
printf("%d + %d = %d\n", A[i], B[i], C[i]);
// Clean up
ret = clFlush(command_queue);
ret = clFinish(command_queue);
ret = clReleaseKernel(kernel);
ret = clReleaseProgram(program);
ret = clReleaseMemObject(a_mem_obj);
ret = clReleaseMemObject(b_mem_obj);
ret = clReleaseMemObject(c_mem_obj);
ret = clReleaseCommandQueue(command_queue);
ret = clReleaseContext(context);
free(A);
free(B);
free(C);
return 0;
}
The code looks oddly familiar to me...
With my PC it works as intended. I suppose the issue is that on your system no OpenCL device is detected. With this code snippet, you can select a platform and device:
// Get platform and device information
const int select_platform = 0;
const int select_device = 0;
cl_platform_id* platform_ids = NULL;
cl_uint nr_platforms = 0;
cl_device_id* device_ids = NULL;
cl_uint nr_devices = 0;
cl_device_id device_id = NULL;
cl_int ret = clGetPlatformIDs(0, NULL, &nr_platforms);
if(ret!=CL_SUCCESS || nr_platforms==0) std::cerr << "no OpenCL platforms found" << std::endl;
platform_ids = (cl_platform_id*)malloc(nr_platforms * sizeof(*platform_ids));
ret = clGetPlatformIDs(nr_platforms, platform_ids, &nr_platforms);
ret = clGetDeviceIDs(platform_ids[select_platform], CL_DEVICE_TYPE_ALL, 0, device_ids, &nr_devices);
if(ret!=CL_SUCCESS || nr_devices==0) std::cerr << "no OpenCL devices found on that platform" << std::endl;
device_ids = (cl_device_id*)malloc(nr_devices * sizeof(*device_ids));
ret = clGetDeviceIDs(platform_ids[select_platform], CL_DEVICE_TYPE_ALL, nr_devices, device_ids, &nr_devices);
device_id = device_ids[select_device];
char name[1024];
clGetDeviceInfo(device_id, CL_DEVICE_NAME, 1024, name, NULL);
std::cout << "selected device: " << name << std::endl;
Check different values of select_platform and select_device. If there is no device found on any platform, make sure to have compatible hardware and the latest graphics drivers or CPU runtime installed.
To make OpenCL development much less painful, consider this OpenCL-Wrapper. This automatically selects the fastest available OpenCL device for you in 1 line of code:
Device device(select_device_with_most_flops());
Alternatively, you can autiomatically select the device with most memory
Device device(select_device_with_most_memory());
or a device with specified ID:
Device device(select_device_with_id(1));
I hope you are well, I have a problem with an opencl program, I execute the following program of the vector addition
#define CL_USE_DEPRECATED_OPENCL_1_2APIS
#include <stdio.h>
#include <stdlib.h>
#include <CL/cl.h>
#define MAX_SOURCE_SIZE (0x100000)
int main(void) {
// Create the two input vectors
int i;
const int LIST_SIZE = 10;
int *A = (int*)malloc(sizeof(int)*LIST_SIZE);
int *B = (int*)malloc(sizeof(int)*LIST_SIZE);
for(i = 0; i < LIST_SIZE; i++) {
A[i] = i;
B[i] = LIST_SIZE - i;
}
// Load the kernel source code into the array source_str
FILE *fp;
char *source_str;
size_t source_size;
fp = fopen("vector_add_kernel.cl", "r");
if (!fp) {
fprintf(stderr, "Failed to load kernel.\n");
exit(1);
}
source_str = (char*)malloc(MAX_SOURCE_SIZE);
source_size = fread( source_str, 1, MAX_SOURCE_SIZE, fp);
fclose( fp );
// Get platform and device information
cl_platform_id platform_id = NULL;
cl_device_id device_id = NULL;
cl_uint ret_num_devices;
cl_uint ret_num_platforms;
cl_int ret = clGetPlatformIDs(1, &platform_id, &ret_num_platforms);
ret = clGetDeviceIDs( platform_id, CL_DEVICE_TYPE_CPU, 1,
&device_id, &ret_num_devices);
// Create an OpenCL context
cl_context context = clCreateContext( NULL, 1, &device_id, NULL, NULL, &ret);
// Create a command queue
cl_command_queue command_queue = clCreateCommandQueueWithProperties(context, device_id, 0, &ret);
// Create memory buffers on the device for each vector
cl_mem a_mem_obj = clCreateBuffer(context, CL_MEM_READ_ONLY,
LIST_SIZE * sizeof(int), NULL, &ret);
cl_mem b_mem_obj = clCreateBuffer(context, CL_MEM_READ_ONLY,
LIST_SIZE * sizeof(int), NULL, &ret);
cl_mem c_mem_obj = clCreateBuffer(context, CL_MEM_WRITE_ONLY,
LIST_SIZE * sizeof(int), NULL, &ret);
// Copy the lists A and B to their respective memory buffers
ret = clEnqueueWriteBuffer(command_queue, a_mem_obj, CL_TRUE, 0,
LIST_SIZE * sizeof(int), A, 0, NULL, NULL);
ret = clEnqueueWriteBuffer(command_queue, b_mem_obj, CL_TRUE, 0,
LIST_SIZE * sizeof(int), B, 0, NULL, NULL);
// Create a program from the kernel source
cl_program program = clCreateProgramWithSource(context, 1,
(const char **)&source_str, (const size_t *)&source_size, &ret);
// Build the program
ret = clBuildProgram(program, 1, &device_id, NULL, NULL, NULL);
// Create the OpenCL kernel
cl_kernel kernel = clCreateKernel(program, "vector_add", &ret);
// Set the arguments of the kernel
ret = clSetKernelArg(kernel, 0, sizeof(cl_mem), (void *)&a_mem_obj);
ret = clSetKernelArg(kernel, 1, sizeof(cl_mem), (void *)&b_mem_obj);
ret = clSetKernelArg(kernel, 2, sizeof(cl_mem), (void *)&c_mem_obj);
// Execute the OpenCL kernel on the list
size_t global_item_size = LIST_SIZE; // Process the entire lists
size_t local_item_size = 64; // Divide work items into groups of 64
ret = clEnqueueNDRangeKernel(command_queue, kernel, 1, NULL,
&global_item_size, &local_item_size, 0, NULL, NULL);
// Read the memory buffer C on the device to the local variable C
int *C = (int*)malloc(sizeof(int)*LIST_SIZE);
ret = clEnqueueReadBuffer(command_queue, c_mem_obj, CL_TRUE, 0,
LIST_SIZE * sizeof(int), C, 0, NULL, NULL);
// Display the result to the screen
for(i = 0; i < LIST_SIZE; i++)
printf("%d + %d = %d\n", A[i], B[i], C[i]);
// Clean up
ret = clFlush(command_queue);
ret = clFinish(command_queue);
ret = clReleaseKernel(kernel);
ret = clReleaseProgram(program);
ret = clReleaseMemObject(a_mem_obj);
ret = clReleaseMemObject(b_mem_obj);
ret = clReleaseMemObject(c_mem_obj);
ret = clReleaseCommandQueue(command_queue);
ret = clReleaseContext(context);
free(A);
free(B);
free(C);
return 0;
}
With the following kernel:
__kernel void vector_add(__global int *A, __global int *B, __global int *C) {
// Get the index of the current element
int i = get_global_id(0);
// Do the operation
C[i] = A[i] + B[i];
printf("calcule effectué");
}
While running a C program, I'm getting the following result and I can't seem to figure out why
0 + 10 = 714121520
1 + 9 = 21995
2 + 8 = 0
3 + 7 = 0
4 + 6 = 1852255608
5 + 5 = 1768697717
6 + 4 = 1932425826
7 + 3 = 3223151
8 + 2 = 1919885413
9 + 1 = 1953459744
I don't know what is the problem, please any help !!!!
I also get wrong results, and moreover the results are random for each execution. This means that the values in the C array are never overwritten, and the uninitialized, random values in that allocated memory location are printed.
First step to debug is to print ret and see where things go wrong:
After clGetDeviceIDs I got error -1 (device not found). CL_DEVICE_TYPE_ALL cleared that; then the selected device was my Nvidia GPU.
After clCreateProgramWithSource I got error -6 (out of host memory). To fix this, use cl_program program = clCreateProgramWithSource(context, 1, (const char**)&source_str, NULL, &ret);.
After clEnqueueNDRangeKernel I got error -54 (invalid workgroup size). To fix this, make global_item_size a multiple of local_item_size: size_t global_item_size = ((LIST_SIZE+local_item_size-1)/local_item_size)*local_item_size;. Since your intended global size (or rather size of allocated buffers) is not a multiple if the workgroup size, you should also put a guard clause if(i>=10) return; in the kernel; otherwise the kernel may write values in undefined memory space which can cause crashes.
Then, everything works as intended. Here is the entire fixed code:
#define CL_USE_DEPRECATED_OPENCL_1_2APIS
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <CL/cl.h>
#define MAX_SOURCE_SIZE (0x100000)
int main(void) {
// Create the two input vectors
int i;
const int LIST_SIZE = 10;
int* A = (int*)malloc(sizeof(int)*LIST_SIZE);
int* B = (int*)malloc(sizeof(int)*LIST_SIZE);
int* C = (int*)malloc(sizeof(int)*LIST_SIZE);
for(i = 0; i < LIST_SIZE; i++) {
A[i] = i;
B[i] = LIST_SIZE - i;
C[i] = 0;
}
// Load the kernel source code into the array source_str
size_t source_size;
const char* source_str =
"__kernel void vector_add(__global int *A, __global int *B, __global int *C) {\n"
" int i = get_global_id(0);\n"
" if(i>=10) return;\n"
" C[i] = A[i]+B[i];\n"
"}"
;
// Get platform and device information
cl_platform_id platform_id = NULL;
cl_device_id device_id = NULL;
cl_uint ret_num_devices;
cl_uint ret_num_platforms;
cl_int ret = clGetPlatformIDs(1, &platform_id, &ret_num_platforms);
ret = clGetDeviceIDs(platform_id, CL_DEVICE_TYPE_ALL, 1,
&device_id, &ret_num_devices);
// Create an OpenCL context
cl_context context = clCreateContext(NULL, 1, &device_id, NULL, NULL, &ret);
std::cout << "context " << ret << std::endl;
// Create a command queue
cl_command_queue command_queue = clCreateCommandQueue(context, device_id, 0, &ret);
std::cout << "queue " << ret << std::endl;
// Create memory buffers on the device for each vector
cl_mem a_mem_obj = clCreateBuffer(context, CL_MEM_READ_ONLY,
LIST_SIZE * sizeof(int), NULL, &ret);
cl_mem b_mem_obj = clCreateBuffer(context, CL_MEM_READ_ONLY,
LIST_SIZE * sizeof(int), NULL, &ret);
cl_mem c_mem_obj = clCreateBuffer(context, CL_MEM_WRITE_ONLY,
LIST_SIZE * sizeof(int), NULL, &ret);
std::cout << "buffer " << ret << std::endl;
// Copy the lists A and B to their respective memory buffers
ret = clEnqueueWriteBuffer(command_queue, a_mem_obj, CL_TRUE, 0,
LIST_SIZE * sizeof(int), A, 0, NULL, NULL);
ret = clEnqueueWriteBuffer(command_queue, b_mem_obj, CL_TRUE, 0,
LIST_SIZE * sizeof(int), B, 0, NULL, NULL);
std::cout << "write " << ret << std::endl;
// Create a program from the kernel source
cl_program program = clCreateProgramWithSource(context, 1, (const char**)&source_str, NULL, &ret);
//cl_program program = clCreateProgramWithSource(context, 1, (const char**)&source_str, (const size_t *)&source_size, &ret);
std::cout << "program " << ret << std::endl;
// Build the program
ret = clBuildProgram(program, 1, &device_id, NULL, NULL, NULL);
// Create the OpenCL kernel
cl_kernel kernel = clCreateKernel(program, "vector_add", &ret);
std::cout << "kernel " << ret << std::endl;
// Set the arguments of the kernel
ret = clSetKernelArg(kernel, 0, sizeof(cl_mem), (void*)&a_mem_obj);
ret = clSetKernelArg(kernel, 1, sizeof(cl_mem), (void*)&b_mem_obj);
ret = clSetKernelArg(kernel, 2, sizeof(cl_mem), (void*)&c_mem_obj);
std::cout << "args " << ret << std::endl;
// Execute the OpenCL kernel on the list
size_t local_item_size = 64; // Divide work items into groups of 64
size_t global_item_size = ((LIST_SIZE+local_item_size-1)/local_item_size)*local_item_size; // make global range a multiple of local range
ret = clEnqueueNDRangeKernel(command_queue, kernel, 1, NULL, &global_item_size, &local_item_size, 0, NULL, NULL);
std::cout << "run " << ret << std::endl;
clFinish(command_queue);
// Read the memory buffer C on the device to the local variable C
ret = clEnqueueReadBuffer(command_queue, c_mem_obj, CL_TRUE, 0,
LIST_SIZE * sizeof(int), C, 0, NULL, NULL);
std::cout << "read " << ret << std::endl;
// Display the result to the screen
for(i = 0; i < LIST_SIZE; i++)
printf("%d + %d = %d\n", A[i], B[i], C[i]);
// Clean up
ret = clFlush(command_queue);
ret = clFinish(command_queue);
ret = clReleaseKernel(kernel);
ret = clReleaseProgram(program);
ret = clReleaseMemObject(a_mem_obj);
ret = clReleaseMemObject(b_mem_obj);
ret = clReleaseMemObject(c_mem_obj);
ret = clReleaseCommandQueue(command_queue);
ret = clReleaseContext(context);
free(A);
free(B);
free(C);
return 0;
}
Debugging that was quite a hassle. Do yourself a favor and use this OpenCL-Wrapper with C++. This eliminates all the code overhead and the countless possibilities for errors that come with it.
Hi I am trying to exectue example code from the book 7 Concurreny Models in 7 weeks. The Author uses a macbook, while I am using a a dell xps with windows 10.
My Program crashes because the timing_event is still null after I call the function clEnqueueNDRangeKernel().
cl_event timing_event;
size_t work_units = NUM_ELEMENTS;
clEnqueueNDRangeKernel(queue, kernel, 1, NULL, &work_units,
NULL, 0, NULL,&timing_event);
The docs state that the follwing about the event parameter
event
Returns an event object that identifies this particular kernel
execution instance. Event objects are unique and can be used to
identify a particular kernel execution instance later on. If event is
NULL, no event will be created for this kernel execution instance and
therefore it will not be possible for the application to query or
queue a wait for this particular kernel execution instance.
Can sombody provide some explanation why this happens on my dell and not on the macbook of the author?
I found the solution. The problem did not come from clEnqueueNDRangeKernel()it happened earlier in clBuildProgram(program, 0, NULL, NULL, NULL, NULL);. I retrieved the build Information with clGetProgramBuildInfo(). The problem was that my multiply_arrays.cl file wasn't utf 8 encoded
For everyone who is new to opencl. Every opencl function returns a status integer which is mapped to an specific error code. If a function does return does not return an status code, it is possible to pass a pointer to the function. See the example functions I linked below. This is very helpful to debug your program.
Returns Status Code
Status Code by Reference
main.cpp
/***
* Excerpted from "Seven Concurrency Models in Seven Weeks",
* published by The Pragmatic Bookshelf.
* Copyrights apply to this code. It may not be used to create training material,
* courses, books, articles, and the like. Contact us if you are in doubt.
* We make no guarantees that this code is fit for any purpose.
* Visit http://www.pragmaticprogrammer.com/titles/pb7con for more book information.
***/
#ifdef __APPLE__
#include <OpenCL/cl.h>
#include <mach/mach_time.h>
#else
#include <CL/cl.h>
#include <Windows.h>
#endif
#include <stdio.h>
#include<iostream>
#include <inttypes.h>
#include <chrono>
#define NUM_ELEMENTS (100000)
char* read_source(const char* filename) {
FILE *h = fopen(filename, "r");
fseek(h, 0, SEEK_END);
size_t s = ftell(h);
rewind(h);
char* program = (char*)malloc(s + 1);
fread(program, sizeof(char), s, h);
program[s] = '\0';
fclose(h);
return program;
}
void random_fill(cl_float array[], size_t size) {
for (int i = 0; i < size; ++i)
array[i] = (cl_float)rand() / RAND_MAX;
}
int main() {
//Status for Errorhandling
cl_int status;
//Identify Platform
cl_platform_id platform;
clGetPlatformIDs(1, &platform, NULL);
//Get Id of GPU
cl_device_id device;
cl_uint num_devices = 0;
clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 1, &device, &num_devices);
// Create Context
cl_context context = clCreateContext(NULL, 1, &device, NULL, NULL, NULL);
//Use context to create Command Queue
//Que enables us to send commands to the gpu device
cl_command_queue queue = clCreateCommandQueue(context, device, CL_QUEUE_PROFILING_ENABLE, NULL);
//Load Kernel
char* source = read_source("multiply_arrays.cl");
cl_program program = clCreateProgramWithSource(context, 1,
(const char**)&source, NULL, &status);
free(source);
// Build Program
status = clBuildProgram(program, 0, NULL, NULL, NULL, NULL);
size_t len;
char *buffer;
clGetProgramBuildInfo(program, device, CL_PROGRAM_BUILD_LOG, 0, NULL, &len);
buffer = (char *) malloc(len);
clGetProgramBuildInfo(program, device, CL_PROGRAM_BUILD_LOG, len, buffer, NULL);
printf("%s\n", buffer);
//Create Kernel
cl_kernel kernel = clCreateKernel(program, "multiply_arrays", &status);
// Create Arrays with random Numbers
cl_float a[NUM_ELEMENTS], b[NUM_ELEMENTS];
random_fill(a, NUM_ELEMENTS);
random_fill(b, NUM_ELEMENTS);
//uint64_t startGPU = mach_absolute_time();
auto start = std::chrono::high_resolution_clock::now();
//Create Readonly input Buffers with value from a and b
cl_mem inputA = clCreateBuffer(context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR,
sizeof(cl_float) * NUM_ELEMENTS, a, NULL);
cl_mem inputB = clCreateBuffer(context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR,
sizeof(cl_float) * NUM_ELEMENTS, b, NULL);
//Create Output buffer write Only
cl_mem output = clCreateBuffer(context, CL_MEM_WRITE_ONLY,
sizeof(cl_float) * NUM_ELEMENTS, NULL, NULL);
//set Kernel Arguments
clSetKernelArg(kernel, 0, sizeof(cl_mem), &inputA);
clSetKernelArg(kernel, 1, sizeof(cl_mem), &inputB);
clSetKernelArg(kernel, 2, sizeof(cl_mem), &output);
cl_event timing_event;
size_t work_units = NUM_ELEMENTS;
status = clEnqueueNDRangeKernel(queue, kernel, 1, NULL, &work_units,
NULL, 0, NULL,&timing_event);
cl_float results[NUM_ELEMENTS];
//Calculate Results and copy from output buffer to results
clEnqueueReadBuffer(queue, output, CL_TRUE, 0, sizeof(cl_float) * NUM_ELEMENTS,
results, 0, NULL, NULL);
//uint64_t endGPU = mach_absolute_time();
auto finish = std::chrono::high_resolution_clock::now();
//printf("Total (GPU): %lu ns\n\n", (unsigned long)(endGPU - startGPU));
std::cout << "Total(GPU) :"<< std::chrono::duration_cast<std::chrono::nanoseconds>(finish - start).count() << "ns\n";
cl_ulong starttime;
clGetEventProfilingInfo(timing_event, CL_PROFILING_COMMAND_START,
sizeof(cl_ulong), &starttime, NULL);
cl_ulong endtime;
clGetEventProfilingInfo(timing_event, CL_PROFILING_COMMAND_END,
sizeof(cl_ulong), &endtime, NULL);
printf("Elapsed (GPU): %lu ns\n\n", (unsigned long)(endtime - starttime));
clReleaseEvent(timing_event);
clReleaseMemObject(inputA);
clReleaseMemObject(inputB);
clReleaseMemObject(output);
clReleaseKernel(kernel);
clReleaseProgram(program);
clReleaseCommandQueue(queue);
clReleaseContext(context);
//uint64_t startCPU = mach_absolute_time();
start = std::chrono::high_resolution_clock::now();
for (int i = 0; i < NUM_ELEMENTS; ++i)
results[i] = a[i] * b[i];
//uint64_t endCPU = mach_absolute_time();
finish = std::chrono::high_resolution_clock::now();
//printf("Elapsed (CPU): %lu ns\n\n", (unsigned long)(endCPU - startCPU));
std::cout << "Elapsed (CPU) :" << std::chrono::duration_cast<std::chrono::nanoseconds>(finish - start).count() << "ns\n";
return 0;
}
multiply_arrays.cl
__kernel void multiply_arrays(__global const float* inputA,
__global const float* inputB,
__global float* output) {
int i = get_global_id(0);
output[i] = inputA[i] * inputB[i];
}
//ö
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
/*#ifdef __APPLE__
#include <OpenCL/opencl.h>
#else*/
#include <CL/cl.h>
//#endif
#define DATA_SIZE 16
using namespace std;
const char *ProgramSource =
"__kernel void floydWarshallPass(__global uint * pathDistanceBuffer,const unsigned int numNodes, __global uint * result, const unsigned int pass)\n"\
"{\n"\
"int xValue = get_global_id(0);\n"\
"int yValue = get_global_id(1);\n"\
"int k = pass;\n"\
"int oldWeight = pathDistanceBuffer[yValue * 4 + xValue];\n"\
"int tempWeight = (pathDistanceBuffer[yValue * 4 + k] + pathDistanceBuffer[k * 4 + xValue]);\n"\
"if (tempWeight < oldWeight)\n"\
"{\n"\
"pathDistanceBuffer[yValue * 4 + xValue] = tempWeight;\n"\
"result[yValue * 4 + xValue] = tempWeight;\n"\
"}\n"\
"}\n"\
"\n";
int main(void)
{
cl_context context;
cl_context_properties properties[3];
cl_kernel kernel;
cl_command_queue command_queue;
cl_program program;
cl_int err;
cl_uint num_of_platforms=0;
cl_platform_id platform_id;
cl_device_id device_id;
cl_uint num_of_devices=0;
cl_mem inputA, inputB, output;
cl_int numNodes;
size_t global;
float inputDataA[16] = {0,2,3,4,5,0,7,8,9,10,0,12,13,14,15,0};
float results[16]={0};
int i,j;
numNodes = 16;
if(clGetPlatformIDs(1, &platform_id, &num_of_platforms) != CL_SUCCESS)
{
printf("Unable to get platform id\n");
return 1;
}
// try to get a supported GPU device
if (clGetDeviceIDs(platform_id, CL_DEVICE_TYPE_CPU, 1, &device_id, &num_of_devices) != CL_SUCCESS)
{
printf("Unable to get device_id\n");
return 1;
}
// context properties list - must be terminated with 0
properties[0]= CL_CONTEXT_PLATFORM;
properties[1]= (cl_context_properties) platform_id;
properties[2]= 0;
// create a context with the GPU device
context = clCreateContext(properties,1,&device_id,NULL,NULL,&err);
// create command queue using the context and device
command_queue = clCreateCommandQueue(context, device_id, 0, &err);
// create a program from the kernel source code
program = clCreateProgramWithSource(context,1,(const char **) &ProgramSource, NULL, &err);
// compile the program
if (clBuildProgram(program, 0, NULL, NULL, NULL, NULL) != CL_SUCCESS)
{
printf("Error building program\n");
return 1;
}
// specify which kernel from the program to execute
kernel = clCreateKernel(program, "floydWarshallPass", &err);
// create buffers for the input and ouput
inputA = clCreateBuffer(context, CL_MEM_READ_ONLY, sizeof(float) * DATA_SIZE, NULL, NULL);
output = clCreateBuffer(context, CL_MEM_WRITE_ONLY, sizeof(float) * DATA_SIZE, NULL, NULL);
// load data into the input buffer
clEnqueueWriteBuffer(command_queue, inputA, CL_TRUE, 0, sizeof(float) * DATA_SIZE, inputDataA, 0, NULL, NULL);
clEnqueueWriteBuffer(command_queue, output, CL_TRUE, 0, sizeof(float) * DATA_SIZE, inputDataA, 0, NULL, NULL);
// set the argument list for the kernel command
clSetKernelArg(kernel, 0, sizeof(cl_mem), &inputA);
clSetKernelArg(kernel, 1, sizeof(cl_int), (void *)&numNodes);
clSetKernelArg(kernel, 2, sizeof(cl_mem), &output);
global=DATA_SIZE;
// enqueue the kernel command for execution
for(cl_uint sh=0; sh<16; sh++)
{
clSetKernelArg(kernel, 3, sizeof(cl_uint), (void *)&sh);
clEnqueueNDRangeKernel(command_queue, kernel, 1, NULL, &global, NULL, 0, NULL, NULL);
//clEnqueueReadBuffer(command_queue, output, CL_TRUE, 0, sizeof(float)*DATA_SIZE, results, 0, NULL, NULL);
//clEnqueueWriteBuffer(command_queue, inputA, CL_TRUE, 0, sizeof(float) * DATA_SIZE, results, 0, NULL, NULL);
//clEnqueueWriteBuffer(command_queue, output, CL_TRUE, 0, sizeof(float) * DATA_SIZE, results, 0, NULL, NULL);
//clSetKernelArg(kernel, 0, sizeof(cl_mem), &inputA);
//clSetKernelArg(kernel, 1, sizeof(cl_int), (void *)&numNodes);
//clSetKernelArg(kernel, 2, sizeof(cl_mem), &output);
clFinish(command_queue);
}
clFinish(command_queue);
// copy the results from out of the output buffer
clEnqueueReadBuffer(command_queue, output, CL_TRUE, 0, sizeof(float) *DATA_SIZE, results, 0, NULL, NULL);
// print the results
printf("output: ");
for(i=0;i<16; i++)
{
printf("%f ",results[i]);
}
// cleanup - release OpenCL resources
clReleaseMemObject(inputA);
//clReleaseMemObject(inputB);
clReleaseMemObject(output);
clReleaseProgram(program);
clReleaseKernel(kernel);
clReleaseCommandQueue(command_queue);
clReleaseContext(context);
return 0;
}
I am getting -0.00000 output for every node.
P.S i am running my code on CL_DEVICE_TYPE_CPU because on GPU it is giving error that cannot get device id.
Please give some guidance on how to get correct output.
I think your question is a little too broad, you should have narrowed your code a little bit. I'll try to help you with some errors I found on your code, but I didn't debug or compile it, so those issues I describe here are only something for you to start looking at.
Why are you calling get_global_id with parameter 1 on your kernel?
Back at your clEnqueueNDRangeKernel you specified that your
work-items dimension is only one, so your get_global_id is querying
for a non-existing dimension. If you want to translate a single
dimension coordinate into two coordinate, you should use a
transformation such as below:
int id = get_global_id(0);
int x = id % size->width;
int y = id / size->height;
Pay attention when you use sizeof(float) to measure the size of the data types: they may be not of the same size inside the OpenCL implementation. Use sizeof(cl_float) instead.
Maybe you are not getting any GPU because you don't have the proper drivers installed on your computer. Go to the GPU vendor website and look for runtime drivers for OpenCL.
Take a look at those pages from the OpenCL specification
get_global_id
OpenCl data types
I'm delving into OpenCL by making a Matrix dot product implementation. I'm having a problem with getting my kernels to return the same values as my host.
I have made an encapsulation function that allocates device memory, sets parameters to a kernel, runs the kernel and returns the result back to the host.
/* This function runs the matrix dot product on whatever OpenCL device
* you specify
*/
cl_int OpenCL_MatrixMul(cl_device_id * device, cl_context * context,
cl_command_queue * commandQueue, cl_kernel * matrixMulKernel, float * A_h,
float * B_h, float * C_h, const cl_uint HeightA, const cl_uint WidthB,
const cl_uint WidthAHeightB)
{
printf("Inside matrix mul, WidthA: %zu, WidthB: %zu, WidthAHeightB: %zu\n",
HeightA, WidthB, WidthAHeightB);
//this error variable will record any errors found and will be returned
//by this function
cl_int error = CL_SUCCESS;
cl_int clEnqueueReadBuffer_error;
//declare a place for the memory on the device, A is the A matrix,
//B is the B matrix, C is the C result matrix
cl_mem A_d, B_d, C_d;
//this is a temporary value for holding the maximum work group size
size_t maximum_local_ws;
//variable for holding the number of work items per group
size_t local_ws[2];
//variable for holding the number of work items
size_t global_ws[2];
//calcuate work group and local size
//get the maximum work group size for the kernel, i.e. set local_ws
clGetKernelWorkGroupInfo((* matrixMulKernel), (* device),
CL_KERNEL_WORK_GROUP_SIZE, sizeof(maximum_local_ws),
&maximum_local_ws, NULL);
//find the largest integer, power of 2, square root, for maximum_local_ws
//that is less than or equal to 16
for(size_t i = 1; (i * i) <= maximum_local_ws && i <= maxBlockSize; i *= 2)
{
local_ws[0] = i;
local_ws[1] = i;
}
//calculate global work size
global_ws[0] = WidthB;
global_ws[1] = HeightA;
printf("Work group size calculated.\n");
//Allocate global memory on the device
//put A on the device
A_d = clCreateBuffer ((* context), CL_MEM_COPY_HOST_PTR,
(WidthAHeightB * HeightA * sizeof(float)), A_h, &error);
//put B on the device
B_d = clCreateBuffer ((* context), CL_MEM_COPY_HOST_PTR,
(WidthB * WidthAHeightB * sizeof(float)), B_h, &error);
//create a space for C on the device
C_d = clCreateBuffer ((* context), CL_MEM_READ_WRITE,
(HeightA * WidthB * sizeof(float)), NULL, &error);
printf("Global memory allocated.\n");
if(error == CL_SUCCESS)
{
//set the prarameters of the kernels
//Put in A
error = clSetKernelArg((* matrixMulKernel), 0, sizeof(cl_mem), &A_d);
//Put in B
error |= clSetKernelArg((* matrixMulKernel), 1, sizeof(cl_mem), &B_d);
//Put in C
error |= clSetKernelArg((* matrixMulKernel), 2, sizeof(cl_mem), &C_d);
//Put in HeightA
error |= clSetKernelArg((* matrixMulKernel), 3, sizeof(cl_uint),
&HeightA);
//Put in WidthB
error |= clSetKernelArg((* matrixMulKernel), 4, sizeof(cl_uint),
&WidthB);
//Put in WidthAHeightB
error |= clSetKernelArg((* matrixMulKernel), 5, sizeof(cl_uint),
&WidthAHeightB);
printf("Parameters added to the kernel.\n");
if(error == CL_SUCCESS)
{
//execute the kernel
printf("Running Kernel, Local work size: %zu x %zu global worksize:
%zu x %zu, HeightA: %zu, WidthB: %zu, WidthAHeightB: %zu\n",
local_ws[0], local_ws[1], global_ws[0], global_ws[1],
HeightA, WidthB, WidthAHeightB);
error = clEnqueueNDRangeKernel((* commandQueue),
(* matrixMulKernel), 1, NULL, global_ws, local_ws, 0, NULL,
NULL);
printf("Kernel Ran.\n");
if(error == CL_SUCCESS)
{
printf("Kernel Launched Successfully\n");
}
else
{
printf("Kernel Not Launched\n");
}
}
}
else
{
printf("Parameters not added to the kernel.\n");
}
printf("Reading results back from device\n");
//read the result back to the host system, (copy C_h to C_d)
clEnqueueReadBuffer_error = clEnqueueReadBuffer((* commandQueue), C_d,
CL_TRUE, 0, HeightA * WidthB * sizeof(float), C_h, 0, NULL, NULL);
//make sure we don't write over previous errors, if
//clEnqueueReadBuffer_error has an error
if(error == CL_SUCCESS)
{
error = clEnqueueReadBuffer_error;
}
printf("Freeing device memory\n");
//Free global memory on the device
clReleaseMemObject(A_d);
clReleaseMemObject(B_d);
clReleaseMemObject(C_d);
return error;
}
This code, when run, it outputs something strange:
Inside matrix mul, WidthA: 16, WidthB: 16, WidthAHeightB: 16
Work group size calculated.
Global memory allocated.
Parameters added to the kernel.
Running Kernel, Local work size: 1 x 1 global worksize: 16 x 16, HeightA: 16, WidthB: 140733193388048, WidthAHeightB: 16
Kernel Ran.
Kernel Launched Successfully
Reading results back from device
Freeing device memory
For some reason, widthB changed its value from 16 to 140733193388048. The strange thing is, widthB is different, yet WidthA and WidthAHeightB, despite being used the same way, remain the same. Furthermore, the value 140733193388048 remains unusually deterministic throughout all the calls I give to it.
Consequently, the first row, of the matrix, that my device returns, is the same as the host, but the subsequent values are not.
I'm programming on Mac OS X using Apple's OpenCL implementation in Snow Leopard.
What is going on here, and how do you keep something like this from happening?
One of the reasons why my kernel wasn't returning the right answer was because I wasn't giving clEnqueueNDRangeKernel the right number of dimensions for the work group. I'm still getting the weird outputs for WidthB, which isn't comforting knowing my print outs won't be accurate if I want to try to debug my programs.