I have the following code in cuda_computation.cu
#include <iostream>
#include <stdio.h>
#include <cuda.h>
#include <assert.h>
void checkCUDAError(const char *msg);
__global__ void euclid_kernel(float *x, float* y, float* f)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int i = blockIdx.x;
int j = threadIdx.x;
f[idx] = sqrt((x[i]-x[j])*(x[i]-x[j]) + (y[i]-y[j])*(y[i]-y[j]));
}
int main()
{
float *xh;
float *yh;
float *fh;
float *xd;
float *yd;
float *fd;
size_t n = 256;
size_t numBlocks = n;
size_t numThreadsPerBlock = n;
size_t memSize = numBlocks * numThreadsPerBlock * sizeof(float);
xh = (float *) malloc(n * sizeof(float));
yh = (float *) malloc(n * sizeof(float));
fh = (float *) malloc(memSize);
for(int ii(0); ii!=n; ++ii)
{
xh[ii] = ii;
yh[ii] = ii;
}
cudaMalloc( (void **) &xd, n * sizeof(float) );
cudaMalloc( (void **) &yd, n * sizeof(float) );
cudaMalloc( (void **) &fd, memSize );
for(int run(0); run!=10000; ++run)
{
//change value to avoid optimizations
xh[0] = ((float)run)/10000.0;
cudaMemcpy( xd, xh, n * sizeof(float), cudaMemcpyHostToDevice );
checkCUDAError("cudaMemcpy");
cudaMemcpy( yd, yh, n * sizeof(float), cudaMemcpyHostToDevice );
checkCUDAError("cudaMemcpy");
dim3 dimGrid(numBlocks);
dim3 dimBlock(numThreadsPerBlock);
euclid_kernel<<< dimGrid, dimBlock >>>( xd, yd, fd );
cudaThreadSynchronize();
checkCUDAError("kernel execution");
cudaMemcpy( fh, fd, memSize, cudaMemcpyDeviceToHost );
checkCUDAError("cudaMemcpy");
}
cudaFree(xd);
cudaFree(yd);
cudaFree(fd);
free(xh);
free(yh);
free(fh);
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(-1);
}
}
It takes about 6" to run on an FX QUADRO 380, while the corresponding serial version using just one i7-870 core takes just about 3". Do I miss something? Is the code under optimised in some ways? Or is it just expected behaviour that for simple calculations (like this all-pairs Euclidean distance) the overhead needed to move memory exceeds the computational gain?
I think you are being killed by the time to move the data.
Especially since you are calling the CUDA kernel with individual values, it might be quicker to upload a large set of values as a 1D array and operate on them.
Also sqrt isn't done in HW on Cuda (at least not on my GPU) whereas the CPU has optimized FPU HW for this and is probably 10x faster than the GPU, and for a small job like this is probably keeping all the results in cache between the timign runs.
Reduce your global memory reads since they are expensive.
You have 4 global memory reads per thread which can be reduced to 2 using shared memory.
__global__ void euclid_kernel(const float * inX_g, const float* inY_g, float * outF_g)
{
const unsigned int threadId = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float xBlock_s;
__shared__ float yBlock_s;
if(threadIdx.x == 0)
{
xBlock_s = inX_g[blockIdx.x];
yBlock_s = inY_g[blockIdx.x];
}
__syncthreads();
float xSub = xBlock_s - inX_g[threadIdx.x];
float ySub = yBlock_s - inY_g[threadIdx.x];
outF_g[threadId] = sqrt(xSub * xSub + ySub * ySub);
}
You should also test with different block sizes (aslong you have 100% occupancy).
You are splitting the problem so that each block is responsible for a single i vs all 256 j's. This is bad locality, as those 256 j's have to be reloaded for every block, for a total of 2*256*(256 + 1) loads. Instead, split your grid so that each block is responsible for a range of, say, 16 i's and 16 j's, which is still 256 blocks*256 threads. But each block now loads only 2*(16+16) values, for a total or 2*256*32 total loads. The idea is, reuse each loaded value as many times as possible. This may not have a huge impact with 256x256, but becomes more and more important as the size scales.
This optimization is used for efficient matrix multiplies, which have a similar locality problem. See http://en.wikipedia.org/wiki/Loop_tiling, or google for "optimized matrix multiply" for more details. And perhaps the matrix multiplication kernel in the NVIDIA SDK gives some details and ideas.
Related
I have many small 2D arrays (e.g. M x 32 x 40) and fewer larger 2D arrays (e.g. N x 200 x 300).
I would like to 'put' the smaller matrices at indices n,i,j in the larger arrays (upper left index of the array at batch index n). These small arrays could overlap and should be aggregated by functions that are associative and commutative say plus, multiply, etc.
I figure this is a pretty basic scenario that many people should have come across, right? Is there a cuda implementation that supports this in an efficient way?
Typical values M = 10^6, N = 10^4
This is a reduction operation.
In addition to what is expressed in the comments, I'll make the assumption that the distribution of the M matrices in terms of which of the N matrices they belong to, is relatively uniform, i.e. evenly distributed. This means for the dimensions given, that there will be approximately 100 of the M matrices that intended to update N matrix 0, 100 for N matrix 1, and so on. Furthermore, if we inspect the n array, we would observe a uniformly random pattern of indices (i.e. no clumping or grouping).
Given that, in what may be a first for me, I'll suggest a lock/critical section algorithm, using the plumbing from here. Each threadblock will take one of the M arrays, and attempt to acquire a lock so that it can update the appropriate N array. When finished, release the lock.
I considered other approaches as well, some of which are evident in the code. In any event, for the stated conditions, the lock based approach had a kernel runtime of about 40ms on my V100 GPU, which was the best I observed.
I would also note that the stated dimensions result in a data working set of ~8GB. Not that that is a problem, just be aware if running this code as-is on your laptop GPU.
Here's an example:
$ cat t34.cu
#include <iostream>
#include <cstdlib>
const int N = 10000;
const int M = 1000000;
const int Mx = 32;
const int My = 40;
const int Nx = 200;
const int Ny = 300;
const int nTPB = 256;
template <typename T>
__host__ __device__
T reduction_op(T &a, const T &b){ return a+b;}
template <typename T>
__global__ void k(const T * __restrict__ M, T * __restrict__ N, const int * __restrict__ n, const int * __restrict__ i, const int * __restrict__ j, const int num_M){
for (int ii = 0; ii < num_M; ii++){
if (n[ii] == blockIdx.x) {
for (int jj = threadIdx.x; jj < Mx*My; jj += blockDim.x){
int y = jj/Mx;
int x = jj - (y*Mx);
N[blockIdx.x*Nx*Ny + i[ii] + (j[ii]+y)*Nx + x] = reduction_op(
N[blockIdx.x*Nx*Ny + i[ii] + (j[ii]+y)*Nx + x], M[ii*Mx*My + y*Mx + x]);}
}
__syncthreads();}
}
// assumes Ny is whole-number divisible by sl
template <typename T>
__global__ void ki(const T * __restrict__ M, T * __restrict__ N, const int * __restrict__ n, const int * __restrict__ i, const int * __restrict__ j, const int num_M, const int sl){
extern __shared__ T s[];
for (int c = 0; c < Ny; c+=sl){ // process per chunk of N array
// load shared
for (int t = threadIdx.x; t < sl*Nx; t += blockDim.x) s[t] = N[blockIdx.x*Nx*Ny + c*Nx + t];
__syncthreads();
// process chunk stack
for (int ii = 0; ii < num_M; ii++){ // iterate through "stack"
if ((n[ii] == blockIdx.x) && (j[ii] < (c+sl)) && ((j[ii]+My) > c)) {
for (int jj = threadIdx.x; jj < sl*Mx; jj += blockDim.x){
int y = jj/Mx;
int x = jj - (y*Mx);
//y += c;
if ((y+c >= j[ii]) && (y+c < (j[ii]+My)))
s[y*Nx+x+i[ii]] = reduction_op(s[y*Nx+x+i[ii]], M[ii*Mx*My + (y+c-j[ii])*Mx + x]);}
}
__syncthreads();}
// save shared
for (int t = threadIdx.x; t < sl*Nx; t += blockDim.x) N[blockIdx.x*Nx*Ny + c*Nx + t] = s[t];
}
}
template <typename T>
__global__ void ka(const T * __restrict__ M, T * __restrict__ N, const int * __restrict__ n, const int * __restrict__ i, const int * __restrict__ j, const int num_M){
int x = threadIdx.x;
for (int y = threadIdx.y; y < My; y += blockDim.y)
atomicAdd(N+n[blockIdx.x]*Nx*Ny+(j[blockIdx.x]+y)*Nx+i[blockIdx.x]+x, M[blockIdx.x*Mx*My+y*Mx+x]);
}
__device__ void acquire_semaphore(volatile int *lock){
while (atomicCAS((int *)lock, 0, 1) != 0);
}
__device__ void release_semaphore(volatile int *lock){
*lock = 0;
__threadfence();
}
template <typename T>
__global__ void kl(const T * __restrict__ M, T * __restrict__ N, const int * __restrict__ n, const int * __restrict__ i, const int * __restrict__ j, const int num_M, int * __restrict__ locks){
if ((threadIdx.x == 0) && (threadIdx.y == 0))
acquire_semaphore(locks+n[blockIdx.x]);
__syncthreads();
//begin critical section
int x = threadIdx.x;
for (int y = threadIdx.y; y < My; y += blockDim.y){
N[n[blockIdx.x]*Nx*Ny + i[blockIdx.x] + (j[blockIdx.x]+y)*Nx + x] = reduction_op(
N[n[blockIdx.x]*Nx*Ny + i[blockIdx.x] + (j[blockIdx.x]+y)*Nx + x], M[blockIdx.x*Mx*My + y*Mx + x]);}
// end critical section
__threadfence(); // not strictly necessary for the lock, but to make any global updates in the critical section visible to other threads in the grid
__syncthreads();
if ((threadIdx.x == 0) && (threadIdx.y == 0))
release_semaphore(locks+n[blockIdx.x]);
}
typedef float mt;
int main(){
mt *d_M, *h_M, *d_N, *h_N, *r1, *r2;
int *d_n, *h_n, *d_i, *h_i, *d_j, *h_j;
h_M = new mt[M*Mx*My];
h_N = new mt[N*Nx*Ny];
r1 = new mt[N*Nx*Ny];
r2 = new mt[N*Nx*Ny];
h_n = new int[M];
h_i = new int[M];
h_j = new int[M];
cudaMalloc(&d_M, M*Mx*My*sizeof(mt));
cudaMalloc(&d_N, N*Nx*Ny*sizeof(mt));
cudaMalloc(&d_n, M*sizeof(int));
cudaMalloc(&d_i, M*sizeof(int));
cudaMalloc(&d_j, M*sizeof(int));
for (int i = 0; i < M; i++){
h_n[i] = rand()%N;
h_i[i] = rand()%(Nx - Mx);
h_j[i] = rand()%(Ny - My);}
for (int i = 0; i < N*Nx*Ny; i++) h_N[i] = (mt)(i%3);
for (int i = 0; i < M*Mx*My; i++) h_M[i] = (mt)((i%3)+1);
cudaMemcpy(d_M, h_M, M*Mx*My*sizeof(mt), cudaMemcpyHostToDevice);
cudaMemcpy(d_N, h_N, N*Nx*Ny*sizeof(mt), cudaMemcpyHostToDevice);
cudaMemcpy(d_n, h_n, M*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_i, h_i, M*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_j, h_j, M*sizeof(int), cudaMemcpyHostToDevice);
#ifdef USE_SINGLE_N
cudaMemset(d_n, 0, M*sizeof(int));
#endif
#if 0
const int sl = 40;
const int sb = sl * Nx * sizeof(mt);
ki<<<N, nTPB, sb>>>(d_M, d_N, d_n, d_i, d_j, M, sl);
cudaMemcpy(r2, d_N, N*Nx*Ny*sizeof(mt), cudaMemcpyDeviceToHost);
#endif
dim3 block(Mx, 8);
#if 0
ka<<<M, block>>>(d_M, d_N, d_n, d_i, d_j, M);
cudaMemcpy(r2, d_N, N*Nx*Ny*sizeof(mt), cudaMemcpyDeviceToHost);
#endif
int *d_locks;
cudaMalloc(&d_locks, N*sizeof(int));
cudaMemset(d_locks, 0, N*sizeof(int));
kl<<<M, block>>>(d_M, d_N, d_n, d_i, d_j, M, d_locks);
cudaMemcpy(r2, d_N, N*Nx*Ny*sizeof(mt), cudaMemcpyDeviceToHost);
cudaMemcpy(d_N, h_N, N*Nx*Ny*sizeof(mt), cudaMemcpyHostToDevice);
k<<<N, nTPB>>>(d_M, d_N, d_n, d_i, d_j, M);
cudaMemcpy(r1, d_N, N*Nx*Ny*sizeof(mt), cudaMemcpyDeviceToHost);
for (int i = 0; i < N*Nx*Ny; i++) if (r1[i] != r2[i]) {std::cout << "mismatch at: " << i << " was: " << r2[i] << " should be: " << r1[i] << std::endl; return 0;}
}
$ nvcc -o t34 t34.cu -O3 -lineinfo
$ nvprof ./t34
==17970== NVPROF is profiling process 17970, command: ./t34
==17970== Profiling application: ./t34
==17970== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 34.57% 3.09036s 2 1.54518s 1.54294s 1.54742s [CUDA memcpy DtoH]
33.18% 2.96615s 1 2.96615s 2.96615s 2.96615s void k<float>(float const *, float*, int const *, int const *, int const *, int)
31.81% 2.84401s 6 474.00ms 1.4255ms 1.27035s [CUDA memcpy HtoD]
0.45% 39.949ms 1 39.949ms 39.949ms 39.949ms void kl<float>(float const *, float*, int const *, int const *, int const *, int, int*)
0.00% 2.1120us 1 2.1120us 2.1120us 2.1120us [CUDA memset]
API calls: 96.13% 8.94558s 8 1.11820s 1.9203ms 4.51030s cudaMemcpy
3.60% 334.59ms 6 55.765ms 277.58us 330.37ms cudaMalloc
0.15% 13.752ms 8 1.7190ms 1.3268ms 2.2025ms cuDeviceTotalMem
0.11% 10.472ms 808 12.959us 172ns 728.50us cuDeviceGetAttribute
0.01% 997.81us 8 124.73us 100.93us 176.73us cuDeviceGetName
0.00% 69.047us 2 34.523us 32.349us 36.698us cudaLaunchKernel
0.00% 68.013us 1 68.013us 68.013us 68.013us cudaMemset
0.00% 46.172us 8 5.7710us 1.8940us 23.025us cuDeviceGetPCIBusId
0.00% 8.5060us 16 531ns 260ns 1.5030us cuDeviceGet
0.00% 3.7870us 8 473ns 229ns 881ns cuDeviceGetUuid
0.00% 3.3980us 3 1.1320us 610ns 2.0780us cuDeviceGetCount
$
Extended discussion:
On performance:
This is a memory bound algorithm. Therefore, we can estimate optimal kernel performance by determining the minimum number of memory reads and writes needed to perform the operation, then dividing by the available memory bandwidth, to determine the optimal or lower-bound for kernel duration. Unfortunately the determination of the minimum number of reads and writes depends on the positioning of the M matrices, so cannot be easily generally determined, without inspecting the n, i, and j matrices.
However we can look for another way to estimate. Another approach to estimation would be to observe that each M matrix update will require reading 2 values and writing one value. If we then use that as our estimate, we come up with M*Mx*My*3*sizeof(element_of_M)/GPU_memory_bandwidth. On my V100 (~700GB/s BW) this works out to about 20ms lower bound on kernel duration.
On approaches considered:
"naive" approach, kernel k: Each threadblock will be responsible for one of the N matrices, and will iterate through the M matrices, inspecting n to determine if the M matrices will update the assigned N matrix. This gives a non-optimal run time of ~3s but seems to be mostly invariant performance-wise based on the distribution of n, and can use an "arbitrary" reduction op.
attempt at "optimal" approach, kernel ki: Each threadblock will be responsible for one of the N matrices, but will only load a chunk of that matrix at a time. It will then proceed through the M matrices updating that chunk, similar the the k kernel. This necessitates more loops through the matrices, but should "almost" only load or save each global memory item the minimum number of times necessary. Nevertheless, the run time is really long, ~40s
atomic approach, kernel ka: Each threadblock will be responsible for one of the M matrices, and will atomically update the relevant N matrix. Simplicity. And the runtime is "fast" at ~40ms. (The atomic approach may be even faster than this is non-uniform n distributions. I witnessed kernel runtimes as low as 8ms!) However this is not readily generalizable to operations that don't have an atomic equivalent, such as multiply.
lock based approach, kernel kl: Like the atomic approach, each threadblock will be responsible for one of the M matrices, and will first acquire a lock on the relevant N matrix. The lock means that atomics are not necessary. For the uniformly distributed n case presented, it has about the same performance as the atomic case. It has the benefit that it can handle other reduction ops, such as multiply, readily. A disadvantage is that in the presence of non-uniformly-random distribution in n the performance can suffer, with a worst case in the ballpark of the naive kernel (3-5s).
Overall if the requirement for an arbitrary reduction operator can be dropped (e.g. only use addition, for example) then the atomic method may be best.
I am trying to create a simple program that uses Intel's AVX technology and perform vector multiplication and addition. Here I am using Open MP alongside this. But it is getting segmentation fault due to the function call _mm256_store_ps().
I have tried with OpenMP atomic features like atomic, critical, etc so that if this function is atomic in nature and multiple cores are attempting to execute at the same time, but it is not working.
#include<stdio.h>
#include<time.h>
#include<stdlib.h>
#include<immintrin.h>
#include<omp.h>
#define N 64
__m256 multiply_and_add_intel(__m256 a, __m256 b, __m256 c) {
return _mm256_add_ps(_mm256_mul_ps(a, b),c);
}
void multiply_and_add_intel_total_omp(const float* a, const float* b, const float* c, float* d)
{
__m256 a_intel, b_intel, c_intel, d_intel;
#pragma omp parallel for private(a_intel,b_intel,c_intel,d_intel)
for(long i=0; i<N; i=i+8) {
a_intel = _mm256_loadu_ps(&a[i]);
b_intel = _mm256_loadu_ps(&b[i]);
c_intel = _mm256_loadu_ps(&c[i]);
d_intel = multiply_and_add_intel(a_intel, b_intel, c_intel);
_mm256_store_ps(&d[i],d_intel);
}
}
int main()
{
srand(time(NULL));
float * a = (float *) malloc(sizeof(float) * N);
float * b = (float *) malloc(sizeof(float) * N);
float * c = (float *) malloc(sizeof(float) * N);
float * d_intel_avx_omp = (float *)malloc(sizeof(float) * N);
int i;
for(i=0;i<N;i++)
{
a[i] = (float)(rand()%10);
b[i] = (float)(rand()%10);
c[i] = (float)(rand()%10);
}
double time_t = omp_get_wtime();
multiply_and_add_intel_total_omp(a,b,c,d_intel_avx_omp);
time_t = omp_get_wtime() - time_t;
printf("\nTime taken to calculate with AVX2 and OMP : %0.5lf\n",time_t);
}
free(a);
free(b);
free(c);
free(d_intel_avx_omp);
return 0;
}
I expect that I will get d = a * b + c but it is showing segmentation fault. I have tried to perform the same task without OpenMP and it working errorless. Please let me know if there is any compatibility issue or I am missing any part.
gcc version 7.3.0
Intel® Core™ i3-3110M Processor
OS Ubuntu 18.04
Open MP 4.5, I have executed the command $ echo |cpp -fopenmp -dM |grep -i open and it showed #define _OPENMP 201511
Command to compile, gcc first_int.c -mavx -fopenmp
** UPDATE **
As per the discussions and suggestions, the new code is,
float * a = (float *) aligned_alloc(N, sizeof(float) * N);
float * b = (float *) aligned_alloc(N, sizeof(float) * N);
float * c = (float *) aligned_alloc(N, sizeof(float) * N);
float * d_intel_avx_omp = (float *)aligned_alloc(N, sizeof(float) * N);
This working without perfectly.
Just a note, I was trying to compare general calculations, avx calculation and avx+openmp calculation. This is the result I got,
Time taken to calculate without AVX : 0.00037
Time taken to calculate with AVX : 0.00024
Time taken to calculate with AVX and OMP : 0.00019
N = 50000
The documentation for _mm256_store_ps says:
Store 256-bits (composed of 8 packed single-precision (32-bit) floating-point elements) from a into memory. mem_addr must be aligned on a 32-byte boundary or a general-protection exception may be generated.
You can use _mm256_storeu_si256 instead for unaligned stores.
A better option is to align all your arrays on a 32-byte boundary (for 256-bit avx registers) and use aligned load and stores for maximum performance because unaligned loads/stores crossing a cache line boundary incur performance penalty.
Use std::aligned_alloc (or C11 aligned_alloc, memalign, posix_memalign, whatever you have available) instead of malloc(size), e.g.:
float* allocate_aligned(size_t n) {
constexpr size_t alignment = alignof(__m256);
return static_cast<float*>(aligned_alloc(alignment, sizeof(float) * n));
}
// ...
float* a = allocate_aligned(N);
float* b = allocate_aligned(N);
float* c = allocate_aligned(N);
float* d_intel_avx_omp = allocate_aligned(N);
In C++-17 new can allocate with alignment:
float* allocate_aligned(size_t n) {
constexpr auto alignment = std::align_val_t{alignof(__m256)};
return new(alignment) float[n];
}
Alternatively, use Vc: portable, zero-overhead C++ types for explicitly data-parallel programming that aligns heap-allocated SIMD vectors for you:
#include <cstdio>
#include <memory>
#include <chrono>
#include <Vc/Vc>
Vc::float_v random_float_v() {
alignas(Vc::VectorAlignment) float t[Vc::float_v::Size];
for(unsigned i = 0; i < Vc::float_v::Size; ++i)
t[i] = std::rand() % 10;
return Vc::float_v(t, Vc::Aligned);
}
unsigned reverse_crc32(void const* vbegin, void const* vend) {
unsigned const* begin = reinterpret_cast<unsigned const*>(vbegin);
unsigned const* end = reinterpret_cast<unsigned const*>(vend);
unsigned r = 0;
while(begin != end)
r = __builtin_ia32_crc32si(r, *--end);
return r;
}
int main() {
constexpr size_t N = 65536;
constexpr size_t M = N / Vc::float_v::Size;
std::unique_ptr<Vc::float_v[]> a(new Vc::float_v[M]);
std::unique_ptr<Vc::float_v[]> b(new Vc::float_v[M]);
std::unique_ptr<Vc::float_v[]> c(new Vc::float_v[M]);
std::unique_ptr<Vc::float_v[]> d_intel_avx_omp(new Vc::float_v[M]);
for(unsigned i = 0; i < M; ++i) {
a[i] = random_float_v();
b[i] = random_float_v();
c[i] = random_float_v();
}
auto t0 = std::chrono::high_resolution_clock::now();
for(unsigned i = 0; i < M; ++i)
d_intel_avx_omp[i] = a[i] * b[i] + c[i];
auto t1 = std::chrono::high_resolution_clock::now();
double seconds = std::chrono::duration_cast<std::chrono::duration<double>>(t1 - t0).count();
unsigned crc = reverse_crc32(d_intel_avx_omp.get(), d_intel_avx_omp.get() + M); // Make sure d_intel_avx_omp isn't optimized out.
std::printf("crc: %u, time: %.09f seconds\n", crc, seconds);
}
Parallel version:
#include <tbb/parallel_for.h>
// ...
auto t0 = std::chrono::high_resolution_clock::now();
tbb::parallel_for(size_t{0}, M, [&](unsigned i) {
d_intel_avx_omp[i] = a[i] * b[i] + c[i];
});
auto t1 = std::chrono::high_resolution_clock::now();
You must use aligned memory for these intrinsics. Change your malloc(...) to aligned_alloc(sizeof(float) * 8, ...) (C11).
This is completely unrelated to atomics. You are working on entirely separate pieces of data (even on different cache lines), so there is no need for any protection.
I am pretty new to CUDA and I'm very struggling with converting a C code to CUDA C, it builds successfully but it keeps crashing. Triple loop function is wrong for sure and I have no idea what should I change.
Function call:
for (z=0;z<=max;z++)
{
correlationsum=coefficient(x, n, dim, z);
printf("result for epsilon %d returns %d\n", z, correlation_sum);
}
Function
long coefficient(int vctr[40000], long numberofpoints, int coefficientrow, int epsilon)
{
long i, j, k, sum, numberofpairs;
long sq_epsilon;
sq_epsilon=epsilon*epsilon;
numberofpairs=0;
for (i=1;i<=numberofpoints-coefficientrow;i++)
{
sum=0;
for (j=i+1;j<=numberofpoints+1-coefficientrow;j++)
{
for (k=0;k<coefficientrow;k++)
{
sum=sum+(vctr[i+k]-vctr[j+k])*(vctr[i+k]-vctr[j+k]);
}
if(sum<sq_epsilon)
{
numberofpairs++;
sum=0;
}
}
}
return (numberofpairs);
}
I have problems limiting the function in GPU part, so it doesn't go out of bounds (e.g. k is less than coefficientrow above). I saw that it is possible to assign block/threadids and use if function. I have tried it but in triple for loop it is kinda... strange.
Here is almost full code.
#define THREADS 1024
__global__ void coefficient(int *vctr, int numberofpoints, int coefficient_row, int epsilon, int *numbofpairs){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * blockDim.z + threadIdx.z;
int sum;
numbofpairs = 0;
int sq_epsilon = epsilon*epsilon;
if (i <= numberofpoints - coefficient_row)
{
sum = 0;
if (j <= numberofpoints + 1 - coefficient_row)
{
if (k < coefficient_row)
sum = sum + (vctr[i + k] - vctr[j + k])*(vctr[i + k] - vctr[j + k]);
if (sum < sq_epsilon){
numbofpairs++;
sum = 0;
}}}}
int main()
{
int n, dim, max, z;
int *d_n, *d_dim, *d_z, *d_x, *d_numbofpairs;
int x[40000], correlation_sum = 0;
n=10;
max=10;
dim=3;
cudaMalloc((void **)&d_n, sizeof(int));
cudaMalloc((void **)&d_dim, sizeof(int));
cudaMalloc((void **)&d_z, sizeof(int));
cudaMalloc((void **)&d_x, sizeof(int));
cudaMalloc((void **)&d_numbofpairs, sizeof(int));
cudaMemcpy(d_n, &n, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_dim, &dim, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_x, &x, sizeof(int), cudaMemcpyHostToDevice);
for (z = 0; z <= max; z++)
{
cudaMemcpy(d_z, &z, sizeof(int), cudaMemcpyHostToDevice);
coefficient << <1, THREADS >> >(d_x, *d_n, *d_dim, *d_z, d_numbofpairs);
cudaMemcpy(&correlation_sum, d_numbofpairs, sizeof(int), cudaMemcpyDeviceToHost);
printf("result for epsilon %d returns %d\n", z, correlation_sum);
}
cudaFree(d_n);
cudaFree(d_dim);
cudaFree(d_z);
cudaFree(d_x);
cudaFree(d_numbofpairs);
return 0;
}
I would like some help or tips what to change, what is wrong and why it keeps crashing so I could fix it. Thank you!
EDIT: I completed some parts, sorry my bad. As for threads and blocks, I am very confused, GPU shows 1024 threads per block, and I'm not sure whether it's it or not.
So the "crash" is a seg fault. A seg fault is a problem in host code, not kernel code (although it could be in your usage of the CUDA API).
Your code has a variety of problems.
This might cause trouble:
int x[40000]
this creates a large stack-based allocation. Instead I suggest doing a dynamic allocation:
int *x = (int *)malloc(40000*sizeof(int));
dynamic allocations have much higher size limits.
It's fairly clear from your kernel usage that you intend to use the whole x vector. Therefore, this allocation on the device for d_x is not correct:
cudaMalloc((void **)&d_x, sizeof(int));
we need the same size allocation on the device as what we have on the host:
cudaMalloc((void **)&d_x, 40000*sizeof(int));
Corresponding to 2, you probably would want to copy the entire x vector to the device (it's not really clear since your code doesn't show the initialization of x), and you have incorrectly taken the address of x here, but x is already a pointer:
cudaMemcpy(d_x, &x, sizeof(int), cudaMemcpyHostToDevice);
so we want something like this instead:
cudaMemcpy(d_x, x, 40000*sizeof(int), cudaMemcpyHostToDevice);
Your other kernel parameters appear to be scalar parameters. You're mostly handling those incorrectly as well:
__global__ void coefficient(int *vctr, int numberofpoints, int coefficient_row, int epsilon, int *numbofpairs){
for a parameter like numberofpoints specified as above (one-way pass to function), we simply pass by value the host quantity we want when calling the kernel, just like we would with an ordinary C function. So this kernel invocation is not correct (even though it appears to compile):
coefficient << <1, THREADS >> >(d_x, *d_n, *d_dim, *d_z, d_numbofpairs);
instead we want to pass just the host variables, by value:
coefficient << <1, THREADS >> >(d_x, n, dim, z, d_numbofpairs);
since d_numbofpairs is going both ways, your usage is correct there.
I would also recommend adding proper cuda error checking to your code.
Here is a fully worked example with the above errors fixed. I think the results are bogus of course because the input data (e.g. x) is not initialized.
$ cat t724.cu
#include <stdio.h>
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
#define THREADS 1024
__global__ void coefficient(int *vctr, int numberofpoints, int coefficient_row, int epsilon, int *numbofpairs){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int k = blockIdx.z * blockDim.z + threadIdx.z;
int sum;
numbofpairs = 0;
int sq_epsilon = epsilon*epsilon;
if (i <= numberofpoints - coefficient_row)
{
sum = 0;
if (j <= numberofpoints + 1 - coefficient_row)
{
if (k < coefficient_row)
sum = sum + (vctr[i + k] - vctr[j + k])*(vctr[i + k] - vctr[j + k]);
if (sum < sq_epsilon){
numbofpairs++;
sum = 0;
}}}}
int main()
{
int n, dim, max, z;
int *d_x, *d_numbofpairs;
int correlation_sum = 0;
int *x = (int *)malloc(40000*sizeof(int));
if (x == NULL) {printf("malloc fail\n"); return -1;}
n=10;
max=10;
dim=3;
cudaMalloc((void **)&d_x, sizeof(int));
cudaCheckErrors("cudaMalloc 1 fail");
cudaMalloc((void **)&d_numbofpairs, sizeof(int));
cudaCheckErrors("cudaMalloc 2 fail");
cudaMemcpy(d_x, x, sizeof(int), cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy 1 fail");
for (z = 0; z <= max; z++)
{
coefficient << <1, THREADS >> >(d_x, n, dim, z, d_numbofpairs);
cudaMemcpy(&correlation_sum, d_numbofpairs, sizeof(int), cudaMemcpyDeviceToHost);
cudaCheckErrors("cudaMemcpy 2/kernel fail");
printf("result for epsilon %d returns %d\n", z, correlation_sum);
}
cudaFree(d_x);
cudaFree(d_numbofpairs);
return 0;
}
$ nvcc -o t724 t724.cu
$ ./t724
result for epsilon 0 returns 3
result for epsilon 1 returns 3
result for epsilon 2 returns 3
result for epsilon 3 returns 3
result for epsilon 4 returns 3
result for epsilon 5 returns 3
result for epsilon 6 returns 3
result for epsilon 7 returns 3
result for epsilon 8 returns 3
result for epsilon 9 returns 3
result for epsilon 10 returns 3
$
Note that I didn't make any changes to your kernel code.
When I read the programming guide, I got the feeling that shared memory will always improve the performance, but it seems not.
I have two functions:
const int Ntimes=1;
__global__ void testgl(float *A, float *C, int numElements){
int ti = threadIdx.x;
int b0 = blockDim.x*blockIdx.x;
if (b0+ti < numElements){
for(int i=0;i<Ntimes;i++){
A[b0+ti]=A[b0+ti]*A[b0+ti]*10-2*A[b0+ti]+1;
}
C[b0+ti] = A[b0+ti]*A[b0+ti];
}
}
__global__ void testsh(float *A, float *C, int numElements){
int ti = threadIdx.x;
int b0 = blockDim.x*blockIdx.x;
__shared__ float a[1024];
if (b0+ti < numElements){
a[ti]=A[b0+ti];
}
__syncthreads();
if (b0+ti < numElements){
for(int i=0;i<Ntimes;i++){
a[ti]=a[ti]*a[ti]*10-2*a[ti]+1;
}
C[b0+ti] = a[ti]*a[ti];
}
}
int main(void){
int numElements = 500000;
size_t size = numElements * sizeof(float);
// Allocate the host input
float *h_A = (float *)malloc(size);
float *h_B = (float *)malloc(size);
// Allocate the host output
float *h_C = (float *)malloc(size);
float *h_D = (float *)malloc(size);
// Initialize the host input
for (int i = 0; i < numElements; i++){
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = h_A[i];
}
// Allocate the device input
float *d_A = NULL; cudaMalloc((void **)&d_A, size);
float *d_B = NULL; cudaMalloc((void **)&d_B, size);
float *d_C = NULL; cudaMalloc((void **)&d_C, size);
float *d_D = NULL; cudaMalloc((void **)&d_D, size);
//Copy to Device
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 1024;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
testgl<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_C, numElements);
testsh<<<blocksPerGrid, threadsPerBlock>>>(d_B, d_D, numElements);
// Copy the device resultto the host
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_D, d_D, size, cudaMemcpyDeviceToHost);
// Free device global memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaFree(d_D);
// Free host memory
free(h_A);
free(h_B);
free(h_C);
free(h_D);
// Reset the device and exit
cudaDeviceReset();
return 0;
}
If Ntimes is set to be 1, testgl costs 49us, and testsh costs 97us.
If Ntimes is set to be 100, testgl costs 9.7ms, and testsh costs 8.9ms.
I do not know why it's more than 100 times longer.
So it seems the shared memory helps only when we want to do a lot of things in device, is that right?
The card used here is GTX680.
Thanks in advance.
shared memory will always improve the performance
Thats not true. It depends on the algorithm. If you have a perfectly coalesced memory access in the kernel and you are accessing the global memory just once it may not help. But if you are implementing suppose a matrix multiplication where you need the partial sums to be held then it will be useful.
It will be also helpful if you are accessing the same memory location more than once in the kernel it will help in this case since the shared memory latency is 100 times less than the global memory because its on-chip memory.
When you analyse that the kernel is bandwidth limited then its a good place to think if there is a scope of using the shared memory and increase the performance. Its also better strategy to check the occupancy calculator to check if the usage of shared memory is going to affect the occupancy.
shared memory helps only when we want to do a lot of things in device ?
Partial Yes. Shared memory helps when we want to do a lot of things in device.
In your case in the above kernel, as you are accessing the global memory more than once in the kernel it should help. It will be helpful if you can provide the complete reproducer to analyze the code. Also it will be helpful to know the card details you are running on.
I am familiarizing myself with CUDA by writing a dot product calculator. I wanted to test it with large array sizes to do a timing study to test two different ways of collecting the vector sum. However, when the size of the array is above 1024 I get errors. I am not so sure where the problem is coming from. The card is a GTX460M with 1.5GB of ram. I am using the card for display (this is a laptop). Aside that I am not sure where the issue could be coming from.
Here is the nvcc compile line:
nvcc D:\Research\CUDA\TestCode\test_dotProduct_1.cu --use_fast_math --gpu-architecture sm_13 --compiler-bindir="D:\Programming\VisualStudio\2010express\VC\bin" --machine 32 -o multi_dot.exe
I also seem to have trouble with compiling in 64 bit but that is another issue
Here is the output for an array of size 1024:
HOST CALCULATION: 357389824.000000
DEV PARA CALCULATION: 357389824.000000
DEV SERI CALCULATION: 357389824.000000
Here is the output for an array of size 2048:
HOST CALCULATION: 2861214720.000000
DEV PARA CALCULATION: -1.#INF00
DEV SERI CALCULATION: -1.#INF00
Here is my code:
/*Code for a CUDA test project doing a basic dot product with doubles
*
*
*
*/
#include <stdio.h>
#include <cuda.h>
__global__ void GPU_parallelDotProduct(double *array_a, double *array_b, double *array_c){
array_c[threadIdx.x] = array_a[threadIdx.x] * array_b[threadIdx.x];
}
__global__ void GPU_parallelSumVector(double *vector, double *sum, int base){
sum[threadIdx.x + blockIdx.x] = vector[blockIdx.x + threadIdx.x * base] + vector[blockIdx.x + threadIdx.x * base + 1];
}
__global__ void GPU_serialSumVector(double *vector, double *sum, int dim){
for(int i = 0; i < dim; ++i){
sum[0] += vector[i];
}
}
__host__ void CPU_serialDot(double *first, double *second, double *dot, int dim){
for(int i=0; i<dim; ++i){
dot[0] += first[i] * second[i];
}
}
__host__ void CPU_serialSetupVector(double *vector, int dim, int incrSize, int start){
for(int i=0; i<dim; ++i){
vector[i] = start + i * incrSize;
}
}
int main(){
//define array size to be used
//int i,j;
const int VECTOR_LENGTH = 2048;
int SUM_BASE = 2;
int SUM_ROUNDS = VECTOR_LENGTH / SUM_BASE;
int ELEMENT_SIZE = sizeof(double);
// int currentSize = VECTOR_LENGTH;
//arrays for dot product
//host
double *array_a = (double*) malloc(VECTOR_LENGTH * ELEMENT_SIZE);
double *array_b = (double*) malloc(VECTOR_LENGTH * ELEMENT_SIZE);
double *dev_dot_product_parallel = (double*) malloc(VECTOR_LENGTH * ELEMENT_SIZE);
double *dev_dot_product_serial = (double*) malloc(VECTOR_LENGTH * ELEMENT_SIZE);
double host_dot_product = 0.0;
//fill with values
CPU_serialSetupVector(array_a, VECTOR_LENGTH, 1, 0);
CPU_serialSetupVector(array_b, VECTOR_LENGTH, 1, 0);
CPU_serialDot(array_a, array_b, &host_dot_product, VECTOR_LENGTH);
//device
double *dev_array_a;
double *dev_array_b;
double *dev_array_c;
double *dev_dot_serial;
double *dev_dot_parallel;
//allocate cuda memory
cudaMalloc((void**)&dev_array_a, ELEMENT_SIZE * VECTOR_LENGTH);
cudaMalloc((void**)&dev_array_b, ELEMENT_SIZE * VECTOR_LENGTH);
cudaMalloc((void**)&dev_array_c, ELEMENT_SIZE * VECTOR_LENGTH);
cudaMalloc((void**)&dev_dot_parallel, ELEMENT_SIZE * VECTOR_LENGTH);
cudaMalloc((void**)&dev_dot_serial, ELEMENT_SIZE * VECTOR_LENGTH);
//copy to from host to device
cudaMemcpy(dev_array_a, array_a, ELEMENT_SIZE * VECTOR_LENGTH, cudaMemcpyHostToDevice);
cudaMemcpy(dev_array_b, array_b, ELEMENT_SIZE * VECTOR_LENGTH, cudaMemcpyHostToDevice);
cudaMemcpy(dev_dot_parallel, &dev_dot_product_parallel, ELEMENT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(dev_dot_serial, &dev_dot_product_serial, ELEMENT_SIZE, cudaMemcpyHostToDevice);
//perform CUDA dot product
GPU_parallelDotProduct<<<1, VECTOR_LENGTH>>>(dev_array_a, dev_array_b, dev_array_c);
//condense a second vector in serial to compare speed up of tree condensing
GPU_serialSumVector<<<1,1>>>(dev_array_c, dev_dot_serial, VECTOR_LENGTH);
//condense vector (parallel)
for(int i=SUM_ROUNDS; i>1; i/=SUM_BASE){
GPU_parallelSumVector<<<1,i>>>(dev_array_c, dev_array_c, SUM_BASE);
}
GPU_parallelSumVector<<<1,1>>>(dev_array_c, dev_array_c, SUM_BASE);
//get computed product back to the machine
cudaMemcpy(dev_dot_product_parallel, dev_array_c, VECTOR_LENGTH * ELEMENT_SIZE, cudaMemcpyDeviceToHost);
cudaMemcpy(dev_dot_product_serial, dev_dot_serial, VECTOR_LENGTH * ELEMENT_SIZE, cudaMemcpyDeviceToHost);
FILE *output = fopen("test_dotProduct_1.txt", "w");
fprintf(output, "HOST CALCULATION: %f \n", host_dot_product);
fprintf(output, "DEV PARA CALCULATION: %f \n", dev_dot_product_parallel[0]);
fprintf(output, "DEV SERI CALCULATION: %f \n", dev_dot_product_serial[0]);
/*
fprintf(output, "VALUES OF DEV_ARRAY_C VEC: \n");
for(int i=0; i<VECTOR_LENGTH; ++i){
fprintf(output, "value %i is: %f \n", i, dev_dot_product_parallel[i]);
}
*/
free(array_a);
free(array_b);
//free(host_dot_product);
cudaFree(dev_array_a);
cudaFree(dev_array_b);
cudaFree(dev_array_c);
cudaFree(dev_dot_parallel);
cudaFree(dev_dot_serial);
return(0);
}
The maximum number of threads for a block for your card is 1024, which is why you are getting an error (for some older cards its 512). You either need to split up your blocks to use multiple dimensions (again limited to 1024 in a direction for x,y,z on your card) or use more than one block in your grid.