Same issue as post (Cuda - Multiple sums in each vector element). How do you perform 2D block striding in both x- and y-direction with varying summation limits. The 2D algorithm can be seen in the CPU and monolithic kernel. I included openmp for the CPU so as to get a more fair speedup result. If there is a way to increase the speed of the CPU function as well I would be happy to find out.
This version of the code takes a 2D array and flattens it to a 1D array. I still use the 2D thread dim3 indexing so I can index the double summations more intuitively.
(p.s. all credit to user Robert Crovella for the 1D striding code.)
The code so far is,
#include <stdio.h>
#include <iostream>
#include <cuda.h>
#include <sys/time.h>
typedef double df;
#define USECPSEC 1000000ULL
#define BSX 1<<5
#define BSY 1<<5
#define N 100
#define M 100
const bool sync = true;
const bool nosync = false;
unsigned long long dtime_usec(unsigned long long start, bool use_sync = nosync){
if (use_sync == sync) cudaDeviceSynchronize();
timeval tv;
gettimeofday(&tv, 0);
return ((tv.tv_sec*USECPSEC)+tv.tv_usec)-start;
}
int divUp(int a, int b) {return (a + b - 1) / b;}
float cpu_sum(int n, int m, df *a, df *b, df *c) {
df q, r;
#pragma omp parallel for collapse(2)
for (int x = 0; x < n; x++) {
for (int y = 0; y < m; y++) {
q = 0.0f;
for (int i = 0; i <= x; i++) {
r = 0.0f;
for (int j = 0; j <= y; j++) {
r += a[i * n + j] * b[(x - i) * n + y - j];
}
for (int j = 1; j < m - y; j++) {
r += a[i * n + j] * b[(x - i) * n + y + j]
+ a[i * n + y + j] * b[(x - i) * n + j];
}
q += r;
}
for (int i = 1; i < n-x; i++) {
r = 0.0f;
for (int j = 0; j <= y; j++) {
r += a[i * n + j] * b[(x + i) * n + y - j]
+ a[(x + i) * n + j] * b[ i * n + y - j];
}
for (int j = 1; j < m - y; j++) {
r += a[i * n + j] * b[(x + i) * n + y + j]
+ a[(x + i) * n + y + j] * b[(x + i) * n + j]
+a[(x + i) * n + j] * b[i * n + y + j]
+ a[(x + i) * n + y + j] * b[i * n + j];
}
q += r;
}
c[x * N + y] = 0.25f*q;
}
}
return 0;
}
const int P2 = 5;
const int TPB = 1<<P2;
const unsigned row_mask = ~((0xFFFFFFFFU>>P2)<<P2);
__global__ void chebyprod_imp(int n, int m, df *a, df *b, df *c){
__shared__ df sdata[TPB*TPB];
int x = blockIdx.x;
int y = blockIdx.y;
int row_width_x = (((x)>(n-x))?(x):(n-x))+1;
int row_width_y = (((y)>(m-y))?(y):(m-y))+1;
int strides_x = (row_width_x>>P2) + ((row_width_x&row_mask)?1:0);
int strides_y = (row_width_y>>P2) + ((row_width_y&row_mask)?1:0);
int i = threadIdx.x;
df tmp_a;
df sum = 0.0f;
for (int s=0; s < strides_x; s++) { // block-stride x loop
int j = threadIdx.y;
for (int u=0; u < strides_y; u++) { // block-stride y loop
if (i < n && j < m) {tmp_a = a[i * n + j];}
if (i <= x) {
if (j <= y) {sum += tmp_a * b[(x - i) * n + y - j];}
if ((j > 0) && (j < (m-y))) {sum += tmp_a * b[(x - i) * n + y + j]
+ a[i * n + y + j] * b[(x - i) * n + j];}
}
if ((i > 0) && (i < (n-x))) {
if (j <= y) {sum += tmp_a * b[(x + i) * n + y - j]
+ a[(x + i) * n + j] * b[ i * n + y - j];}
if ((j > 0) && (j < (m-y))) {sum += tmp_a * b[(x + i) * n + y + j]
+ a[(x + i) * n + y + j] * b[(x + i) * n + j]
+ a[(x + i) * n + j] * b[i * n + y + j]
+ a[(x + i) * n + y + j] * b[i * n + j];}
}
j += TPB;
}
i += TPB;
}
sdata[threadIdx.x * TPB + threadIdx.y] = sum;
for (int s = TPB>>1; s > 0; s>>=1) { // sweep reduction in x
for (int u = TPB>>1; u > 0; u>>=1) { // sweep reduction in x
__syncthreads();
if (threadIdx.x < s && threadIdx.y < u) {
sdata[threadIdx.x * TPB + threadIdx.y] += sdata[(threadIdx.x + s) * TPB + threadIdx.y + u];
}
}
}
if (!threadIdx.x && !threadIdx.y) c[x * n + y] = 0.25f*sdata[0];
}
__global__ void chebyprod(int n, int m, df *a, df *b, df *c){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
df q, r;
if (x < n && y < m) {
q = 0.0f;
for (int i = 0; i <= x; i++) {
r = 0.0f;
for (int j = 0; j <= y; j++) {
r += a[i * n + j] * b[(x - i) * n + y - j];
}
for (int j = 1; j < m - y; j++) {
r += a[i * n + j] * b[(x - i) * n + y + j]
+ a[i * n + y + j] * b[(x - i) * n + j];
}
q += r;
}
for (int i = 1; i < n-x; i++) {
r = 0.0f;
for (int j = 0; j <= y; j++) {
r += a[i * n + j] * b[(x + i) * n + y - j]
+ a[(x + i) * n + j] * b[ i * n + y - j];
}
for (int j = 1; j < m - y; j++) {
r += a[i * n + j] * b[(x + i) * n + y + j]
+ a[(x + i) * n + y + j] * b[(x + i) * n + j]
+a[(x + i) * n + j] * b[i * n + y + j]
+ a[(x + i) * n + y + j] * b[i * n + j];
}
q += r;
}
c[x * N + y] = 0.25f*q;
}
}
int main(void){
int size = N*M*sizeof(df);
df *a, *b, *c, *cc, *ci, *d_a, *d_b, *d_c, *d_ci;
a = (df*)malloc(size);
b = (df*)malloc(size);
c = (df*)malloc(size);
cc = (df*)malloc(size);
ci = (df*)malloc(size);
cudaMalloc(&d_a, size);
cudaMalloc(&d_b, size);
cudaMalloc(&d_c, size);
cudaMalloc(&d_ci, size);
#pragma omp parallel for collapse (2)
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++) {
a[i * M + j] = 0.1f;
b[i * M + j] = 0.2f;
}
}
unsigned long long dt = dtime_usec(0);
// Perform chebyprod on N elements
cpu_sum(N, M, a, b, cc);
dt = dtime_usec(dt,sync);
printf("Time taken 2D CPU: %fs\n", dt/(float)USECPSEC);
df dtc = dt/(float)USECPSEC;
std::cout << "Vector cc: [ ";
for (int k = 0; k < 10; ++k)
std::cout << cc[k] << " ";
std::cout <<"]\n";
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
dim3 dimBlock(BSX, BSY);
dim3 dimGrid(divUp(N, BSX), divUp(M, BSY));
//std::cout << "dimBlock: " << dimBlock << "\n dimGrid: " << dimGrid << "\n";
dt = dtime_usec(0);
// Perform chebyprod on N elements
chebyprod<<< dimBlock, dimGrid >>>(N, M, d_a, d_b, d_c);
dt = dtime_usec(dt,sync);
printf("Time taken 2D monolithic kernel: %fs\n", dt/(float)USECPSEC);
printf("Speedup: %fs\n", dtc/(dt/(float)USECPSEC));
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
std::cout << "Vector c: [ ";
for (int k = 0; k < 10; ++k)
std::cout << c[k] << " ";
std::cout <<"]\n";
dt = dtime_usec(0);
// Perform chebyprod on N elements
chebyprod_imp<<< dimBlock, dimGrid >>>(N, M, d_a, d_b, d_ci);
dt = dtime_usec(dt,sync);
printf("Time taken 2D stride kernel: %fs\n", dt/(float)USECPSEC);
cudaMemcpy(ci, d_ci, size, cudaMemcpyDeviceToHost);
std::cout << "Vector ci: [ ";
for (int k = 0; k < 10; ++k)
std::cout << ci[k] << " ";
std::cout <<"]\n";
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFree(d_ci);
free(a);
free(b);
free(c);
free(cc);
free(ci);
}
For me, anyway, the results for the CPU code don't match between the cases where I compile with OpenMP support and without, if I omit -O3. I seem to get the correct results with OpenMP compilation if I also specify -O3. I'm not sure why that should matter for correctness, although it obviously has an impact on CPU code performance.
You seem to have gotten your grid and block sizing backwards:
chebyprod<<< dimBlock, dimGrid >>>(....
the first kernel config parameter is the grid dimension, not the block dimension. I'm not sure how this came about since you had it done correctly in your previous question.
As in the previous question, we need to pick a thread strategy and implement it correctly. You seemed to be confused about striding, so hopefully the code below will clarify things. The thread strategy I will use here is one warp per output point. A warp is a group of threads with a dimension of 32 (threads) in the x direction, and 1 in the y direction. Therefore the loop striding will be by an increment of 32 in the x direction, but only 1 in the y direction, to cover the entire space. The choice of thread strategy also affects grid sizing.
You seem to have jumbled the relationships that I think should exist for the two dimensions. The x direction, N, and n should all be connected. Likewise the y direction, M and m should all be connected (for example, M is the dimension in the y direction).
When it comes to 2D threadblocks, we want to arrange indexing for coalescing on the GPU such that the index that includes threadIdx.x is not multiplied by anything. (A simplified statement of coalescing is that we want adjacent threads in the warp to access adjacent elements in memory. Since threadIdx.x increases by 1 as we go from thread to thread in the warp, we want to use this characteristic to generate adjacent memory indexing. If we multiply threadIdx.x by anything except 1, we break the pattern.) You have this reversed - where the index including threadIdx.x is typically multiplied by the row dimension (N, or n). This really cannot be correct, and also does not make for good coalesced access. To solve this, we want to transpose our indexing and also transpose the data storage for a and b (and therefore c). In the code below, I have tranposed the indexing for the data setup for a and b, and also the relevant indexing has been transposed in the striding kernel (only). In your non-striding kernel and also your CPU version, I have not transposed the indexing, I leave that as an exercise for you, if needed. For the results, numerically, it does not matter, because your entire a matrix has the same value at every location, and a similar statement can be made about your b matrix. Numerically, then, for this example code, transposing (or not) has no bearing on the result. But it matters for performance (of the striding kernel, at least). Also note that I believe performing the indexing "transpose" on the "monolithic" kernel should also improve its performance. I don't know if it would affect the performance of the CPU version.
I've also added back in the const __restrict__ usage that I included in my previous answer. According to my testing, on "smaller" GPUs this provides noticeable performance benefit. It's not strictly necessary for correctness, however. Here's a worked example with the above changes that gives numerically matching results for all 3 test cases:
$ cat t1498.cu
#include <stdio.h>
#include <iostream>
#include <cuda.h>
#include <time.h>
#include <sys/time.h>
typedef double df;
#define USECPSEC 1000000ULL
#define BSX 1<<5
#define BSY 1<<5
#define N 100
#define M 100
const bool sync = true;
const bool nosync = false;
unsigned long long dtime_usec(unsigned long long start, bool use_sync = nosync){
if (use_sync == sync) cudaDeviceSynchronize();
timeval tv;
gettimeofday(&tv, 0);
return ((tv.tv_sec*USECPSEC)+tv.tv_usec)-start;
}
int divUp(int a, int b) {return (a + b - 1) / b;}
void cpu_sum(int n, int m, df *a, df *b, df *c) {
df q, r;
#pragma omp parallel for collapse(2)
for (int x = 0; x < n; x++) {
for (int y = 0; y < m; y++) {
q = 0.0f;
for (int i = 0; i <= x; i++) {
r = 0.0f;
for (int j = 0; j <= y; j++) {
r += a[i * n + j] * b[(x - i) * n + y - j];
}
for (int j = 1; j < m - y; j++) {
r += a[i * n + j] * b[(x - i) * n + y + j]
+ a[i * n + y + j] * b[(x - i) * n + j];
}
q += r;
}
for (int i = 1; i < n-x; i++) {
r = 0.0f;
for (int j = 0; j <= y; j++) {
r += a[i * n + j] * b[(x + i) * n + y - j]
+ a[(x + i) * n + j] * b[ i * n + y - j];
}
for (int j = 1; j < m - y; j++) {
r += a[i * n + j] * b[(x + i) * n + y + j]
+ a[(x + i) * n + y + j] * b[(x + i) * n + j]
+a[(x + i) * n + j] * b[i * n + y + j]
+ a[(x + i) * n + y + j] * b[i * n + j];
}
q += r;
}
c[x * N + y] = 0.25f*q;
}
}
}
// choose one warp per output point
const int P2 = 5; // assumes warp size is 32
const unsigned row_mask = ~((0xFFFFFFFFU>>P2)<<P2);
__global__ void chebyprod_imp(int n, int m, const df * __restrict__ a, const df * __restrict__ b, df * __restrict__ c){
int x = blockIdx.x;
int y = threadIdx.y+blockDim.y*blockIdx.y;
int width_x = (((x)>(n-x))?(x):(n-x))+1;
int height_y = (((y)>(m-y))?(y):(m-y))+1;
int strides_x = (width_x>>P2) + ((width_x&row_mask)?1:0);
int strides_y = height_y;
int i = threadIdx.x;
df tmp_a;
df sum = 0.0f;
if ((x < n) && (y < m)){
for (int s=0; s < strides_x; s++) { // warp-stride x loop
for (int j=0; j < strides_y; j++) { // y loop
if (i < n && j < m) {tmp_a = a[j * n + i];}
if (i <= x) {
if (j <= y) {sum += tmp_a * b[(y - j) * n + x - i];}
if ((j > 0) && (j < (m-y))) {sum += tmp_a * b[(y+j) * n + x - i] + a[(y+j)* n + i] * b[j*n+(x - i)];}
}
if ((i > 0) && (i < (n-x))) {
if (j <= y) {sum += tmp_a * b[(y-j) * n + x+i] + a[j*n + (x + i)] * b[(y - j)*n + i];}
if ((j > 0) && (j < (m-y)))
{sum += tmp_a * b[(y+j) * n + x+i]
+ a[(y+j) * n + x + i] * b[j*n+(x + i)]
+ a[j*n + (x + i)] * b[(y+j)*n + i]
+ a[(y+j)*n + x + i] * b[j*n+i];}
}
}
i += 32;
}
// warp-shuffle reduction
for (int offset = warpSize>>1; offset > 0; offset >>= 1)
sum += __shfl_down_sync(0xFFFFFFFFU, sum, offset);
if (!threadIdx.x) c[y*m+x] = 0.25f*sum;}
}
__global__ void chebyprod(int n, int m, df *a, df *b, df *c){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
df q, r;
if (x < n && y < m) {
q = 0.0f;
for (int i = 0; i <= x; i++) {
r = 0.0f;
for (int j = 0; j <= y; j++) {
r += a[i * n + j] * b[(x - i) * n + y - j];
}
for (int j = 1; j < m - y; j++) {
r += a[i * n + j] * b[(x - i) * n + y + j]
+ a[i * n + y + j] * b[(x - i) * n + j];
}
q += r;
}
for (int i = 1; i < n-x; i++) {
r = 0.0f;
for (int j = 0; j <= y; j++) {
r += a[i * n + j] * b[(x + i) * n + y - j]
+ a[(x + i) * n + j] * b[ i * n + y - j];
}
for (int j = 1; j < m - y; j++) {
r += a[i * n + j] * b[(x + i) * n + y + j]
+ a[(x + i) * n + y + j] * b[(x + i) * n + j]
+a[(x + i) * n + j] * b[i * n + y + j]
+ a[(x + i) * n + y + j] * b[i * n + j];
}
q += r;
}
c[x * N + y] = 0.25f*q;
}
}
int main(void){
int size = N*M*sizeof(df);
df *a, *b, *c, *cc, *ci, *d_a, *d_b, *d_c, *d_ci;
a = (df*)malloc(size);
b = (df*)malloc(size);
c = (df*)malloc(size);
cc = (df*)malloc(size);
ci = (df*)malloc(size);
cudaMalloc(&d_a, size);
cudaMalloc(&d_b, size);
cudaMalloc(&d_c, size);
cudaMalloc(&d_ci, size);
#pragma omp parallel for collapse (2)
for (int j = 0; j < M; j++) {
for (int i = 0; i < N; i++) {
a[j * N + i] = 0.1f;
b[j * N + i] = 0.2f;
}
}
unsigned long long dt = dtime_usec(0);
// Perform chebyprod on N elements
cpu_sum(N, M, a, b, cc);
dt = dtime_usec(dt,sync);
printf("Time taken 2D CPU: %fs\n", dt/(float)USECPSEC);
df dtc = dt/(float)USECPSEC;
std::cout << "Vector cc: [ ";
for (int k = 0; k < 10; ++k)
std::cout << cc[k] << " ";
std::cout <<"]\n";
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
dim3 dimBlock(BSX, BSY);
dim3 dimGrid(divUp(N, BSX), divUp(M, BSY));
//std::cout << "dimBlock: " << dimBlock << "\n dimGrid: " << dimGrid << "\n";
dt = dtime_usec(0);
// Perform chebyprod on N elements
chebyprod<<< dimGrid, dimBlock >>>(N, M, d_a, d_b, d_c);
dt = dtime_usec(dt,sync);
printf("Time taken 2D monolithic kernel: %fs\n", dt/(float)USECPSEC);
printf("Speedup: %fs\n", dtc/(dt/(float)USECPSEC));
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
std::cout << "Vector c: [ ";
for (int k = 0; k < 10; ++k)
std::cout << c[k] << " ";
std::cout <<"]\n";
dt = dtime_usec(0);
// Perform chebyprod on N elements
dim3 dimGrid2(N, (M+dimBlock.y-1)/dimBlock.y);
chebyprod_imp<<< dimGrid2, dimBlock >>>(N, M, d_a, d_b, d_ci);
dt = dtime_usec(dt,sync);
printf("Time taken 2D stride kernel: %fs\n", dt/(float)USECPSEC);
printf("Speedup: %fs\n", dtc/(dt/(float)USECPSEC));
cudaMemcpy(ci, d_ci, size, cudaMemcpyDeviceToHost);
std::cout << "Vector ci: [ ";
for (int k = 0; k < 10; ++k)
std::cout << ci[k] << " ";
std::cout <<"]\n";
df max_error = 0;
for (int k = 0; k < N*M; k++)
max_error = fmax(max_error, fabs(c[k] - ci[k]));
std::cout << "Max diff = " << max_error << std::endl;
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFree(d_ci);
free(a);
free(b);
free(c);
free(cc);
free(ci);
}
$ nvcc -O3 -Xcompiler -fopenmp -arch=sm_52 -o t1498 t1498.cu
$ ./t1498
Time taken 2D CPU: 0.034830s
Vector cc: [ 198.005 197.01 196.015 195.02 194.025 193.03 192.035 191.04 190.045 189.05 ]
Time taken 2D monolithic kernel: 0.033687s
Speedup: 1.033930s
Vector c: [ 198.005 197.01 196.015 195.02 194.025 193.03 192.035 191.04 190.045 189.05 ]
Time taken 2D stride kernel: 0.013526s
Speedup: 2.575041s
Vector ci: [ 198.005 197.01 196.015 195.02 194.025 193.03 192.035 191.04 190.045 189.05 ]
Max diff = 8.52651e-13
$
CUDA 10.1.105, Fedora 29, GTX 960
Note that when we run this same test on a Tesla V100, which can take the most advantage of the "extra" threads available in the striding kernel case, the benefit is more obvious:
$ OMP_NUM_THREADS=32 ./t1498
Time taken 2D CPU: 0.031610s
Vector cc: [ 198.005 197.01 196.015 195.02 194.025 193.03 192.035 191.04 190.045 189.05 ]
Time taken 2D monolithic kernel: 0.018228s
Speedup: 1.734145s
Vector c: [ 198.005 197.01 196.015 195.02 194.025 193.03 192.035 191.04 190.045 189.05 ]
Time taken 2D stride kernel: 0.000731s
Speedup: 43.242137s
Vector ci: [ 198.005 197.01 196.015 195.02 194.025 193.03 192.035 191.04 190.045 189.05 ]
Max diff = 8.52651e-13
If you perform the indexing "transpose" on your monolithic kernel similar to what I have done in the striding kernel, I think you'll end up in a performance situation that is roughly similar to where you ended up in the last question. Little or no performance benefit for the striding kernel over your monolithic kernel on a "small" GPU. ~5x improvement on a "large" GPU.
Related
I need to turn the following implementation of this function into a threaded one.
Yet, I have absolutely no clue on how to do so nor how to approach the problem.
Any tips or orientation is much appreciated.
void compute_target_pixel(int x, int y) {
int i, j, sum = 0;
int delta = (KLEN - 1) / 2;
for (i = -delta; i <= delta; ++i)
for (j = -delta; j <= delta; ++j)
if (0 <= x + i && x + i < WIDTH && 0 <= y + j && y + j < HEIGHT)
sum += filter.values[(i + delta) * KLEN + (j + delta)] * pixels[(x + i) * HEIGHT + (y + j)];
if(filter.sum > 0) target[x * HEIGHT + y] = sum / filter.sum;
else target[x * HEIGHT + y] = sum;
}
I have got this function written in C.
Are them two statements equivalent?
void dot_prod(float *A, int m, float *B, float *C) {
int i, j, z, k;
for (i = 0; i < m; i++) {
for (j = 0; j < m; j++) {
for (k = 0; k < m; k++) {
C[i * m + k] += A[i * m + j] * B[m * j + k];
//*(C + i * m + k) += *(A + i * m + j) * (*(B + m * j + k)); // is equivalent?
}
}
}
}
Is
C[i * m + k] += A[i * m + j] * B[m * j + k]
equivalent to
*(C + i * m + k) += *(A + i * m + j) * (*(B + m * j + k))?
Almost.
C[i * m + k] += A[i * m + j] * B[m * j + k]
is equivalent to
*(C + (i * m + k)) += *(A + (i * m + j)) * (*(B + (m * j + k)))
This is different than without the extra parentheses, since the addition in the indices may overflow, in which case the pointer/integer addition is not necessarily associative, depending on the sizes of int and pointers.
I've written a CUDA C program to parallelize matrix multiplication by having each thread calculate a row of the result matrix. I've stored my matrices as 1D arrays in row-major form. I can't seem to find anywhere why my program shouldn't be working, be it issues with pointers or the kernel code. Help will be appreciated, thanks!
Code:
#include <cuda.h>
#include <time.h>
#include <stdlib.h>
__global__ void multiplyMatricesKernel(float* d_x, float* d_y, float* d_z, int m, int n, int p)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < m)
{
for(int j = 0; j < p; ++j)
{
d_z[i * p + j] = 0;
for(int k = 0; k < m; ++k)
{
d_z[i * p + j] += d_x[i * n + k] * d_y[k * p + j];
}
}
}
}
void multiplyMatrices(float* x, float* y, float* z, int m, int n, int p)
{
int elements_x = m * n * sizeof(float);
int elements_y = n * p * sizeof(float);
int elements_z = m * p * sizeof(float);
float* d_x;
float* d_y;
float* d_z;
cudaMalloc((void**) &d_x, elements_x);
cudaMalloc((void**) &d_y, elements_y);
cudaMalloc((void**) &d_z, elements_z);
cudaMemcpy(d_x, x, elements_x, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, elements_y, cudaMemcpyHostToDevice);
multiplyMatricesKernel<<<ceil(m / 64.0), 64>>>(d_x, d_y, d_z, m, n, p);
cudaMemcpy(z, d_z, elements_z, cudaMemcpyDeviceToHost);
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
}
int main()
{
srand(time(NULL));
int m = rand() % 8 + 1;
int n = rand() % 8 + 1;
int p = rand() % 8 + 1;
float x[m * n] = {0};
float y[n * p] = {0};
float z[m * p] = {0};
printf("X =\n[");
for(int i = 0; i < sizeof(x) / sizeof(float); ++i)
{
x[i] = rand() % 129 - 64;
printf("%.1f ", x[i]);
if((i + 1) % n == 0 && i != (sizeof(x) / sizeof(float) - 1))
{
printf("]\n[");
}
if(i == (sizeof(x) / sizeof(float) - 1))
{
printf("]\n\n");
}
}
printf("Y = \n[");
for(int i = 0; i < sizeof(y) / sizeof(float); ++i)
{
y[i] = rand() % 129 - 64;
printf("%.1f ", y[i]);
if((i + 1) % p == 0 && i != (sizeof(y) / sizeof(float) - 1))
{
printf("]\n[");
}
if(i == (sizeof(y) / sizeof(float) - 1))
{
printf("]\n\n");
}
}
multiplyMatrices(x, y, z, m, n, p);
printf("Z = \n[");
for(int i = 0; i < sizeof(z) / sizeof(float); ++i)
{
printf("%.1f ", z[i]);
if((i + 1) % p == 0 && i != (sizeof(z) / sizeof(float) - 1))
{
printf("]\n[");
}
if(i == (sizeof(z) / sizeof(float) - 1))
{
printf("]\n\n");
}
}
return 0;
}
Regarding your kernel, there is a mistake in dimentioning y and z
int elements_x = m * n * sizeof(float);
int elements_y = n * p * sizeof(float);
int elements_z = m * p * sizeof(float);
x is m x n
y is m x p, not n x p
z is n x p, not m x p
This kind of error is very difficult to find...
The problem is in this line inside your kernel.
for(int k = 0; k < m; ++k)
It should be n not m, as each element is the sum of n multiplications, like this:
for(int k = 0; k < n; ++k)
I want to add also that you can make the block of size 1024 not only 64.
multiplyMatricesKernel<<<ceil(m / 1024.0), 1024>>>(d_x, d_y, d_z, m, n, p);
Finally, you can make it even faster by calculating all elements in parallel, not only the rows.
I managed to solve my issue. Turned out that because I hadn't rebooted my Arch Linux system after a kernel package update, the necessary nvidia module needed to do GPU memory allocation and GPU to CPU memory transfers wasn't loaded (nvidia_uvm was the module name). A restart of my system did the trick. Thanks for all your help, especially Robert Crovella and AbdelAziz AbdelLatef for pointing out my iterative error in my kernel. Thanks!
I wrote a program that has two versions of a median filter implemented using OpenCV in C, one is sequential and the other is parallelized with OpenMP. My problem lies in that the OpenMP version seems to be running slower than my sequential one, no matter the chunk size or the number of threads.
Any ideas/advice is very much welcomed!
Here is my sequential code:
void medianFilter (const IplImage* img){
IplImage* output = cvCloneImage(img);
int rows, cols, step;
uchar *data;
rows = output->height;
cols = output->width;
step = output->widthStep;
data = (uchar *)output->imageData;
if(!data)
{ return; }
//create a sliding window of size 9
int window[9];
for(int y = 1; y < rows - 1; y++){
for(int x = 1; x < cols - 1; x++){
// Pick up window element
window[0] = data[(y - 1) * step + (x - 1)];
window[1] = data[y * step + (x - 1)];
window[2] = data[(y + 1) * step + (x - 1)];
window[3] = data[(y - 1) * step + x];
window[4] = data[y * step + x];
window[5] = data[(y + 1) * step + x];
window[6] = data[(y - 1) * step + (x + 1)];
window[7] = data[y * step + (x + 1)];
window[8] = data[(y + 1) * step + (x + 1)];
// Sort the window to find median
insertionSort(window);
// Assign the median to centered element of the matrix
data[y * step + x] = window[4];
}
}
cvNamedWindow("Post-filter", CV_WINDOW_AUTOSIZE);
cvShowImage("Post-filter", output);
cvReleaseImage(&output);
}
Here is my parallelized code:
void omp_medianFilter (const IplImage* img){
IplImage* output = cvCloneImage(img);
int rows, cols, step, nthreads;
uchar *data;
rows = output->height;
cols = output->width;
step = output->widthStep;
data = (uchar *)output->imageData;
if(!data)
{ return; }
// Create a sliding window of size 9
int window[9], x, y;
// Set the number of threads to use
omp_set_num_threads(NUM_THREADS);
// Parallel code segment. Window, x and y are private variables for each thread
#pragma omp parallel private(window, x, y)
{
//if(omp_get_thread_num() == 0){
//nthreads = omp_get_num_threads();
//printf("Numer of threads running: %d \n", nthreads);
//}
// Parallel for loop with dynamic scheduling and collapsing nested loops
#pragma omp for schedule(dynamic, CHUNK) collapse(2)
for(y = 1; y < rows - 1; y++){
for(x = 1; x < cols - 1; x++){
// Pick up 3x3 window elements
window[0] = data[(y - 1) * step + (x - 1)];
window[1] = data[y * step + (x - 1)];
window[2] = data[(y + 1) * step + (x - 1)];
window[3] = data[(y - 1) * step + x];
window[4] = data[y * step + x];
window[5] = data[(y + 1) * step + x];
window[6] = data[(y - 1) * step + (x + 1)];
window[7] = data[y * step + (x + 1)];
window[8] = data[(y + 1) * step + (x + 1)];
// Sort the window to find median
insertionSort(window);
// Assign the median to centered element of the matrix
data[y * step + x] = window[4];
}
}
}
cvNamedWindow("Post-filter (OMP)", CV_WINDOW_AUTOSIZE);
cvShowImage("Post-filter (OMP)", output);
cvReleaseImage(&output);
}
Full Code:
#include <stdio.h>
#include <opencv2/imgproc/imgproc_c.h>
#include <opencv2/highgui/highgui_c.h>
#include <opencv2/core/types_c.h>
#include <sys/time.h>
#include <omp.h>
#define NUM_THREADS 8
#define CHUNK 15000
//Function to measure time
double get_walltime() {
struct timeval tp; gettimeofday(&tp, NULL);
return (double) (tp.tv_sec + tp.tv_usec*1e-6);
}
//Sort the window elements using insertion sort
void insertionSort(int window[])
{
int temp, i , j;
for(i = 0; i < 9; i++){
temp = window[i];
for(j = i-1; j >= 0 && temp < window[j]; j--){
window[j+1] = window[j];
}
window[j+1] = temp;
}
}
void medianFilter (const IplImage* img){
IplImage* output = cvCloneImage(img);
int rows, cols, step;
uchar *data;
rows = output->height;
cols = output->width;
step = output->widthStep;
data = (uchar *)output->imageData;
if(!data)
{ return; }
//create a sliding window of size 9
int window[9];
for(int y = 1; y < rows - 1; y++){
for(int x = 1; x < cols - 1; x++){
// Pick up window element
window[0] = data[(y - 1) * step + (x - 1)];
window[1] = data[y * step + (x - 1)];
window[2] = data[(y + 1) * step + (x - 1)];
window[3] = data[(y - 1) * step + x];
window[4] = data[y * step + x];
window[5] = data[(y + 1) * step + x];
window[6] = data[(y - 1) * step + (x + 1)];
window[7] = data[y * step + (x + 1)];
window[8] = data[(y + 1) * step + (x + 1)];
// Sort the window to find median
insertionSort(window);
// Assign the median to centered element of the matrix
data[y * step + x] = window[4];
}
}
cvNamedWindow("Post-filter", CV_WINDOW_AUTOSIZE);
cvShowImage("Post-filter", output);
cvReleaseImage(&output);
}
// Parallelized implementation of median filter
void omp_medianFilter (const IplImage* img){
IplImage* output = cvCloneImage(img);
int rows, cols, step, nthreads;
uchar *data;
rows = output->height;
cols = output->width;
step = output->widthStep;
data = (uchar *)output->imageData;
if(!data)
{ return; }
// Create a sliding window of size 9
int window[9], x, y, j, k, min;
// Set the number of threads to use
omp_set_num_threads(NUM_THREADS);
// Parallel code segment. Window, x and y are private variables for each thread
#pragma omp parallel private(window, x, y, j, k, min)
{
//if(omp_get_thread_num() == 0){
//nthreads = omp_get_num_threads();
//printf("Numer of threads running: %d \n", nthreads);
//}
// Parallel for loop with dynamic scheduling and collapsing nested loops
#pragma omp for schedule(dynamic, CHUNK) collapse(2)
for(y = 1; y < rows - 1; y++){
for(x = 1; x < cols - 1; x++){
// Pick up 3x3 window elements
window[0] = data[(y - 1) * step + (x - 1)];
window[1] = data[y * step + (x - 1)];
window[2] = data[(y + 1) * step + (x - 1)];
window[3] = data[(y - 1) * step + x];
window[4] = data[y * step + x];
window[5] = data[(y + 1) * step + x];
window[6] = data[(y - 1) * step + (x + 1)];
window[7] = data[y * step + (x + 1)];
window[8] = data[(y + 1) * step + (x + 1)];
// Sort the window to find median
//insertionSort(window);
for (int j = 0; j < 5; ++j)
{
// Find position of minimum element
int min = j;
for (int l = j + 1; l < 9; ++l)
if (window[l] < window[min])
min = l;
// Put found minimum element in its place
const int temp = window[j];
window[j] = window[min];
window[min] = temp;
}
// Assign the median to centered element of the matrix
data[y * step + x] = window[4];
}
}
}
cvNamedWindow("Post-filter (OMP)", CV_WINDOW_AUTOSIZE);
cvShowImage("Post-filter (OMP)", output);
cvReleaseImage(&output);
}
int main(int argc, char *argv[])
{
IplImage* src;
double time1, time2;
if(argc<2){
printf("Usage: main <image-file-name>\n\7");
exit(0);
}
// Load a source image
src = cvLoadImage(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
cvNamedWindow("Original", CV_WINDOW_AUTOSIZE);
cvShowImage("Original", src);
/*time1 = get_walltime();
medianFilter(src);
time2 = get_walltime();
printf("Sequential Code Performance: %fs\n", time2 - time1);*/
time1 = get_walltime();
omp_medianFilter(src);
time2 = get_walltime();
printf("Parallel Code Performance: %fs\n", time2 - time1);
cvWaitKey(0);
cvReleaseImage(&src);
return 0;
}
Fixed:
I did apply much of the advice given, and I did see performance improvemets, but the things mentioned weren't the problem.
Turns out it was something pretty stupid. I was running this on a VM with Ubuntu 16.04, and I had accidentally forgotten to increase the number of cores, therefore it was only using 1, which might as well mean it wasn't parallelized at all.
I have a typical algorithm for matrix multiplication. I am trying to apply and understand loop unrolling, but I am having a problem implementing the algorithm when I am trying to unroll k times when k isn't a multiple of the matrices size. (I get very large numbers as a result instead). That means I am not getting how to handle the remaining elements after unrolling. Here is what I have:
void Mult_Matx(unsigned long* a, unsigned long* b, unsigned long*c, long n)
{
long i = 0, j = 0, k = 0;
unsigned long sum, sum1, sum2, sum3, sum4, sum5, sum6, sum7;
for (i = 0; i < n; i++)
{
long in = i * n;
for (j = 0; j < n; j++)
{
sum = sum1 = sum2 = sum3 = sum4 = sum5 = sum6 = sum7 = 0;
for (k = 0; k < n; k += 8)
{
sum = sum + a[in + k] * b[k * n + j];
sum1 = sum1 + a[in + (k + 1)] * b[(k + 1) * n + j];
sum2 = sum2 + a[in + (k + 2)] * b[(k + 2) * n + j];
sum3 = sum3 + a[in + (k + 3)] * b[(k + 3) * n + j];
sum4 = sum4 + a[in + (k + 4)] * b[(k + 4) * n + j];
sum5 = sum5 + a[in + (k + 5)] * b[(k + 5) * n + j];
sum6 = sum6 + a[in + (k + 6)] * b[(k + 6) * n + j];
sum7 = sum7 + a[in + (k + 7)] * b[(k + 7) * n + j];
}
if (n % 8 != 0)
{
for (k = 8 * (n / 8); k < n; k++)
{
sum = sum + a[in + k] * b[k * n + j];
}
}
c[in + j] = sum + sum1 + sum2 + sum3 + sum4 + sum5 + sum6 + sum7;
}
}
}
Let's say size aka n is 12. When I unroll it 4 times, this code works, meaning when it never enters the remainder loop. But I am losing track of what's going on when it does! If anyone can direct me where I am going wrong, I'd really appreciate it. I am new to this, and having a hard time figuring out.
A generic way of unrolling a loop on this shape:
for(int i=0; i<N; i++)
...
is
int i;
for(i=0; i<N-L; i+=L)
...
for(; i<N; i++)
...
or if you want to keep the index variable in the scope of the loops:
for(int i=0; i<N-L; i+=L)
...
for(int i=L*(N/L); i<N; i++)
...
Here, I'm using the fact that integer division is rounded down. L is the number of steps you do in the first loop.
Example:
const int N=22;
const int L=6;
int i;
for(i=0; i<N-L; i+=L)
{
printf("%d\n", i);
printf("%d\n", i+1);
printf("%d\n", i+2);
printf("%d\n", i+3);
printf("%d\n", i+4);
printf("%d\n", i+5);
}
for(; i<N; i++)
printf("%d\n", i);
But I recommend taking a look at Duff's device. However, I do suspect that it's not always a good thing to use. The reason is that modulo is a pretty expensive operation.
The condition if (n % 8 != 0) should not be needed. The for header should take care of that if written properly.