Intel Intrinsics code optimization - c

So i'm trying to multiply a constant with short int a[101] with intel intrinsics. I have done it with addition but i can't seem to figure why it wont work with multiplication. Also before we used ints of 32 bits and now we use 16 bit short so we can have double as many values in the intrinsics to fill the 128 bit as far as i understand?
naive example of what im trying to do:
int main(int argc, char **argv){
short int a[101];
int len = sizeof(a)/sizeof(short);
/*Populating array a with values 1 to 101*/
mult(len, a);
return 0;
}
int mult(int len, short int *a){
int result = 0;
for(int i=0; i<len; i++){
result += a[i]*20;
}
return result;
}
And my code trying to do the same in intrinsics
/*Same main as before with a short int a[101] containing values 1 to 101*/
int SIMD(int len, short int *a){
int res;
int val[4];
/*Setting constant value to mulitply with*/
__m128i sum = _mm_set1_epi16(20);
__m128i s = _mm_setzero_si128( );
for(int i=0; i<len/4*4; i += 4){
__m128i vec = _mm_loadu_si128((__m128i *)(a+i));
s += _mm_mul_epu32(vec,sum);
}
_mm_storeu_si128((__m128i*) val, s);
res += val[0] + val[1] + val[2] + val[3];
/*Haldeling tail*/
for(int i=len/4*4; i<len; i++){
res += a[i];
}
return res;
}
So i do get a number out as result, but the number does not match the naive method, i have tried other intrinsics and changing numbers to see if it makes any noticable difference but nothing comes close to the output i expect. The computation time is almost the same as the naive at the moment aswell.

There are 8 short in one __m128i. So:
for(int i=0; i<len/4*4; i += 4)
should be
for(int i=0; i<len/8*8; i += 8)`
and:
res += val[0] + val[1] + val[2] + val[3];
should be:
res += val[0] + val[1] + val[2] + val[3] + val[4] + val[5] + val[6] + val[7];
and:
for(int i=len/4*4; i<len; i++)
should be:
for(int i=len/8*8; i<len; i++)
In:
s += _mm_mul_epu32(vec,sum);
_mm_mul_epu32 operates on 32-bit elements. It should be:
s += _mm_mullo_epi16(vec, sum);
The object res is not initialized; it should be:
int res = 0;
Here is working code:
#include <stdio.h>
#include <stdlib.h>
#include <immintrin.h>
// Number of elements in an array.
#define NumberOf(x) (sizeof (x) / sizeof *(x))
// Compute the result with scalar arithmetic.
static int mult(int len, short int *a)
{
int result = 0;
for (size_t i=0; i<len; i++)
{
result += a[i]*20;
}
return result;
}
// Compute the result with SIMD arithmetic.
static int SIMD(int len, short int *a)
{
// Initialize the multiplier and the sum.
__m128i multiplier = _mm_set1_epi16(20);
__m128i s = _mm_setzero_si128( );
// Process blocks of 8 short.
for (int i=0; i<len/8*8; i += 8)
{
__m128i vec = _mm_loadu_si128((__m128i *)(a+i));
// Multtiply by multiplier and add to sum.
s = _mm_add_epi16(s, _mm_mullo_epi16(vec, multiplier));
}
// Store the sum so far so its individual elements can be manipulated.
short val[8];
_mm_storeu_si128((__m128i*) val, s);
// Add the individual elements.
int res = 0;
for (size_t i = 0; i < 8; ++i)
res += val[i];
// Add the elements in the tail.
for (size_t i = len/8*8; i < len; ++i)
{
res += a[i];
}
return res;
}
int main(int argc, char **argv)
{
short int a[96];
int len = NumberOf(a);
// Initiailize a.
for (size_t i = 0; i < len; ++i)
a[i] = i+1;
printf("sum by scalar arithmetic is %d.\n", mult(len, a));
printf("sum by SIMD arithmetic is %d.\n", SIMD(len, a));
return 0;
}

Related

Split int into char array in C

I have an int, and I need to split it to a char array, so 2 chars in each array position. After that, I need to do the opposite process. This is the best I could come up with, but I still couldn't make it work. Any suggestions?
#include <stdio.h>
#include <stdlib.h>
int main()
{
int length = 10968;
int bytesNeeded = sizeof(length) / 2;
char *controlPacket = (char*)malloc(sizeof(char*)*bytesNeeded);
for (int i = 0; i < bytesNeeded; i++)
{
controlPacket[i] = (length >> (8* i));
}
int newSize = 0;
for (int i = 0; i < bytesNeeded; i++)
{
newSize += (controlPacket[i] << (8 * i));
}
printf("Newsize is: %d\n", newSize);
}
Change the variables that you're performing bitwise operations on to unsigned, and also mask the result of shifting before assigning to the array. Otherwise, you get overflow, which causes incorrect results (maybe undefined behavior, I'm not sure).
You also shouldn't divide sizeof(length) by 2. It will work for values that only use the low order half of the number, but not for larger values; e.g. if you use length = 1096800; the result will be 48824.
#include <stdio.h>
#include <stdlib.h>
int main()
{
unsigned int length = 10968;
int bytesNeeded = sizeof(length);
unsigned char *controlPacket = malloc(sizeof(unsigned char)*bytesNeeded);
for (int i = 0; i < bytesNeeded; i++)
{
controlPacket[i] = (length >> (8* i) & 0xff);
}
unsigned int newSize = 0;
for (int i = 0; i < bytesNeeded; i++)
{
newSize += (controlPacket[i] << (8 * i));
}
printf("Newsize is: %d\n", newSize);
free(controlPacket);
}

Algorithm for multiplying arbitrary length polynomials?

Let's say we represent polynomials as an array of floats, where the degree of each item in the polynomial corresponds to the index in the array (eg. 4.2x^5+x^2-1.4 would be represented as {-1.4, 0, 1, 0, 0, 4.2}.
My assignment question is to write a method in C that multiples two arbitrary length polynomials and prints out the result (rather than returning it).
Normally, when I ask questions on SO I include what I've attempted so far, but I'm genuinely completely clueless with this one. This is all I have:
void multpoly(float *a, int len_a, float *b, int len_b)
{
for(i = 0; i < len_result; i++)
{
printf(" %.5f, ", product[i]);
}
}
Any help would be greatly appreciated!
I believe this does what you want:
// constraints: 'result' must have space for at least len1 + len2 - 1 elements.
void multpoly(const float *poly1, int len1, const float *poly2, int len2, float *result)
{
int i, p1i, p2i;
int len_result = len1 + len2 - 1;
for (i = 0; i < len_result; i++) result[i] = 0.0;
for (p1i = 0; p1i < len1; ++p1i)
for (p2i = 0; p2i < len2; ++p2i)
result[p1i + p2i] += poly1[p1i] * poly2[p2i];
}
Ideone example of this function
#include <stdio.h>
#include <string.h>
main()
{
float a[5]={1, 2, 3};
float b[5]={2, 0, 1};
printPol(a, b, 3, 3);
}
void printPol(float*a, float*b, int len1, int len2)
{
int i, j;
// order of resulting poly is o1+o2
// o1 = len1 -1
// o2 = len2 -1
// length is order + 1 (+1 is the constant number)
int len = (len1-1)+(len2-1)+1;
float res[len];
//initialize
for(i=0;i<len; i++) res[i] = 0;
for(i=0; i<len1; i++)
for(j=0; j<len2; j++)
{
// mutually multiply all elements
res[i+j] += a[i]*b[j];
}
printf("%f ", res[0]);
for(i=1;i<len; i++) printf("+%f*x^%d ", res[i], i);
}

Simple CUDA kernel with Bizarre Result?

I am using a CUDA kernel object in MATLAB in order to fill a 2D array with all '55's. The result is very strange. The 2D array only fills up to a certain point as shown below. After row 1025, the array is all zeros. Any idea what could be going wrong?
As I mentioned in the comment above, you are mistakenly offsetting the matrix rows. The code below is a full working example proving this point.
#include<thrust\device_vector.h>
__global__ void myKern(double* masterForces, int r_max, int iterations) {
int threadsPerBlock = blockDim.x * blockDim.y;
int blockId = blockIdx.x + (blockIdx.y * gridDim.x);
int threadId = threadIdx.x + (threadIdx.y * blockDim.x);
int globalIdx = (blockId * threadsPerBlock) + threadId;
//for (int i=0; i<iterations; i++) masterForces[globalIdx * r_max + i] = 55;
for (int i=0; i<iterations; i++) masterForces[globalIdx * iterations + i] = 55;
}
void main() {
int ThreadBlockSize = 32;
int GridSize = 32;
int reps = 1024;
int iterations = 2000;
thrust::device_vector<double> gpuF_M(reps*iterations, 0);
myKern<<<GridSize,ThreadBlockSize>>>(thrust::raw_pointer_cast(gpuF_M.data()),reps,iterations);
int numerrors = 0;
for (int i=0; i<reps*iterations; i++) {
double test = gpuF_M[i];
if (test != 55) { printf("Error %i %f\n",i,test); numerrors++; }
}
printf("Finished!\n");
printf("The number of errors is = %i\n",numerrors);
getchar();
}

Search an ordered array in a CUDA kernel

I'm writing a CUDA kernel and each thread has to complete the following task: suppose I have an ordered array a of n unsigned integers (the first one is always 0) stored in shared memory, each thread has to find the array index i such that a[i] ≤ threadIdx.x and a[i + 1] > threadIdx.x.
A naive solution could be:
for (i = 0; i < n - 1; i++)
if (a[i + 1] > threadIdx.x) break;
but I suppose this is not the optimal way to do it... can anyone suggest anything better?
Like Robert, I was thinking that a binary search has got to be faster that a naïve loop -- the upper bound of operation count for a binary search is O(log(n)), compared to O(N) for the loop.
My extremely simple implementation:
#include <iostream>
#include <climits>
#include <assert.h>
__device__ __host__
int midpoint(int a, int b)
{
return a + (b-a)/2;
}
__device__ __host__
int eval(int A[], int i, int val, int imin, int imax)
{
int low = (A[i] <= val);
int high = (A[i+1] > val);
if (low && high) {
return 0;
} else if (low) {
return -1;
} else {
return 1;
}
}
__device__ __host__
int binary_search(int A[], int val, int imin, int imax)
{
while (imax >= imin) {
int imid = midpoint(imin, imax);
int e = eval(A, imid, val, imin, imax);
if(e == 0) {
return imid;
} else if (e < 0) {
imin = imid;
} else {
imax = imid;
}
}
return -1;
}
__device__ __host__
int linear_search(int A[], int val, int imin, int imax)
{
int res = -1;
for(int i=imin; i<(imax-1); i++) {
if (A[i+1] > val) {
res = i;
break;
}
}
return res;
}
template<int version>
__global__
void search(int * source, int * result, int Nin, int Nout)
{
extern __shared__ int buff[];
int tid = threadIdx.x + blockIdx.x*blockDim.x;
int val = INT_MAX;
if (tid < Nin) val = source[threadIdx.x];
buff[threadIdx.x] = val;
__syncthreads();
int res;
switch(version) {
case 0:
res = binary_search(buff, threadIdx.x, 0, blockDim.x);
break;
case 1:
res = linear_search(buff, threadIdx.x, 0, blockDim.x);
break;
}
if (tid < Nout) result[tid] = res;
}
int main(void)
{
const int inputLength = 128000;
const int isize = inputLength * sizeof(int);
const int outputLength = 256;
const int osize = outputLength * sizeof(int);
int * hostInput = new int[inputLength];
int * hostOutput = new int[outputLength];
int * deviceInput;
int * deviceOutput;
for(int i=0; i<inputLength; i++) {
hostInput[i] = -200 + 5*i;
}
cudaMalloc((void**)&deviceInput, isize);
cudaMalloc((void**)&deviceOutput, osize);
cudaMemcpy(deviceInput, hostInput, isize, cudaMemcpyHostToDevice);
dim3 DimBlock(256, 1, 1);
dim3 DimGrid(1, 1, 1);
DimGrid.x = (outputLength / DimBlock.x) +
((outputLength % DimBlock.x > 0) ? 1 : 0);
size_t shmsz = DimBlock.x * sizeof(int);
for(int i=0; i<5; i++) {
search<1><<<DimGrid, DimBlock, shmsz>>>(deviceInput, deviceOutput,
inputLength, outputLength);
}
for(int i=0; i<5; i++) {
search<0><<<DimGrid, DimBlock, shmsz>>>(deviceInput, deviceOutput,
inputLength, outputLength);
}
cudaMemcpy(hostOutput, deviceOutput, osize, cudaMemcpyDeviceToHost);
for(int i=0; i<outputLength; i++) {
int idx = hostOutput[i];
int tidx = i % DimBlock.x;
assert( (hostInput[idx] <= tidx) && (tidx < hostInput[idx+1]) );
}
cudaDeviceReset();
return 0;
}
gave about a five times speed up compared to the loop:
>nvprof a.exe
======== NVPROF is profiling a.exe...
======== Command: a.exe
======== Profiling result:
Time(%) Time Calls Avg Min Max Name
60.11 157.85us 1 157.85us 157.85us 157.85us [CUDA memcpy HtoD]
32.58 85.55us 5 17.11us 16.63us 19.04us void search<int=1>(int*, int*, int, int)
6.52 17.13us 5 3.42us 3.35us 3.73us void search<int=0>(int*, int*, int, int)
0.79 2.08us 1 2.08us 2.08us 2.08us [CUDA memcpy DtoH]
I'm sure that someoneclever could do a lot better than that. But perhaps this gives you at least a few ideas.
can anyone suggest anything better?
A brute force approach would be to have each thread do a binary search (on threadIdx.x + 1).
// sets idx to the index of the first element in a that is
// equal to or larger than key
__device__ void bsearch_range(const int *a, const int key, const unsigned len_a, unsigned *idx){
unsigned lower = 0;
unsigned upper = len_a;
unsigned midpt;
while (lower < upper){
midpt = (lower + upper)>>1;
if (a[midpt] < key) lower = midpt +1;
else upper = midpt;
}
*idx = lower;
return;
}
__global__ void find_my_idx(const int *a, const unsigned len_a, int *my_idx){
unsigned idx = (blockDim.x * blockIdx.x) + threadIdx.x;
unsigned sp_a;
int val = idx+1;
bsearch_range(a, val, len_a, &sp_a);
my_idx[idx] = ((val-1) < a[sp_a]) ? sp_a:-1;
}
This is coded in browser, not tested. It's hacked from a piece of working code, however. If you have trouble making it work, I can revisit it. I don't recommend this approach on a device without caches (cc 1.x device).
This is actually searching on the full unique 1D thread index (blockDim.x * blockIdx.x + threadIdx.x + 1) You can change val to be anything you like.
You could also add an appropriate thread check, if the number of threads you intend to launch is greater than the length of your my_idx result vector.
I imagine there is a more clever approach that may use something akin to prefix sums.
This is the best algorithm so far. It's called: LPW Indexed Search
__global__ void find_position_lpw(int *a, int n)
{
int idx = threadIdx.x;
__shared__ int aux[ MAX_THREADS_PER_BLOCK /*1024*/ ];
aux[idx] = 0;
if (idx < n)
atomicAdd( &aux[a[idx]], 1); // atomics in case there are duplicates
__syncthreads();
int tmp;
for (int j = 1; j <= MAX_THREADS_PER_BLOCK / 2; j <<= 1)
{
if( idx >= j ) tmp = aux[idx - j];
__syncthreads();
if( idx >= j ) aux[idx] += tmp;
__syncthreads();
}
// result in "i"
int i = aux[idx] - 1;
// use "i" here...
// ...
}

Computing Hamming distances to several strings with SSE

I have n (8 bit) character strings all of them of the same length (say m), and another string s of the same length. I need to compute Hamming distances from s to each of the others strings. In plain C, something like:
unsigned char strings[n][m];
unsigned char s[m];
int distances[n];
for(i=0; i<n; i++) {
int distances[i] = 0;
for(j=0; j<m; j++) {
if(strings[i][j] != s[j])
distances[i]++;
}
}
I would like to use SIMD instructions with gcc to perform such computations more efficiently. I have read that PcmpIstrI in SSE 4.2 can be useful and my target computer supports that instruction set, so I would prefer a solution using SSE 4.2.
EDIT:
I wrote following function to compute Hamming distance between two strings:
static inline int popcnt128(__m128i n) {
const __m128i n_hi = _mm_unpackhi_epi64(n, n);
return _mm_popcnt_u64(_mm_cvtsi128_si64(n)) + _mm_popcnt_u64(_mm_cvtsi128_si64(n_hi));
}
int HammingDist(const unsigned char *p1, unsigned const char *p2, const int len) {
#define MODE (_SIDD_UBYTE_OPS | _SIDD_CMP_EQUAL_EACH | _SIDD_BIT_MASK | _SIDD_NEGATIVE_POLARITY)
__m128i smm1 = _mm_loadu_si128 ((__m128i*) p1);
__m128i smm2 = _mm_loadu_si128 ((__m128i*) p2);
__m128i ResultMask;
int iters = len / 16;
int diffs = 0;
int i;
for(i=0; i<iters; i++) {
ResultMask = _mm_cmpestrm (smm1,16,smm2,16,MODE);
diffs += popcnt128(ResultMask);
p1 = p1+16;
p2 = p2+16;
smm1 = _mm_loadu_si128 ((__m128i*)p1);
smm2 =_mm_loadu_si128 ((__m128i*)p2);
}
int mod = len % 16;
if(mod>0) {
ResultMask = _mm_cmpestrm (smm1,mod,smm2,mod,MODE);
diffs += popcnt128(ResultMask);
}
return diffs;
}
So I can solve my problem by means of:
for(i=0; i<n; i++) {
int distances[i] = HammingDist(s, strings[i], m);
}
Is this the best I can do or can I use the fact that one of the strings compared is always the same? In addition, should I do some alignment on my arrays to improve performance?
ANOTHER ATTEMPT
Following Harold's recomendation, I have written following code:
void _SSE_hammingDistances(const ByteP str, const ByteP strings, int *ds, const int n, const int m) {
int iters = m / 16;
__m128i *smm1, *smm2, diffs;
for(int j=0; j<n; j++) {
smm1 = (__m128i*) str;
smm2 = (__m128i*) &strings[j*(m+1)]; // m+1, as strings are '\0' terminated
diffs = _mm_setzero_si128();
for (int i = 0; i < iters; i++) {
diffs = _mm_add_epi8(diffs, _mm_cmpeq_epi8(*smm1, *smm2));
smm1 += 1;
smm2 += 1;
}
int s = m;
signed char *ptr = (signed char *) &diffs;
for(int p=0; p<16; p++) {
s += *ptr;
ptr++;
}
*ds = s;
ds++;
}
}
but I am not able to do the final addition of bytes in __m128i by using psadbw. Can anyone please help me with that?
Here's an improved version of your latest routine, which uses PSADBW (_mm_sad_epu8) to eliminate the scalar code:
void hammingDistances_SSE(const uint8_t * str, const uint8_t * strings, int * const ds, const int n, const int m)
{
const int iters = m / 16;
const __m128i smm1 = _mm_loadu_si128((__m128i*)str);
assert((m & 15) == 0); // m must be a multiple of 16
for (int j = 0; j < n; j++)
{
__m128i smm2 = _mm_loadu_si128((__m128i*)&strings[j*(m+1)]); // m+1, as strings are '\0' terminated
__m128i diffs = _mm_setzero_si128();
for (int i = 0; i < iters; i++)
{
diffs = _mm_sub_epi8(diffs, _mm_cmpeq_epi8(smm1, smm2));
}
diffs = _mm_sad_epu8(diffs, _mm_setzero_si128());
ds[j] = m - (_mm_extract_epi16(diffs, 0) + _mm_extract_epi16(diffs, 4));
}
}

Resources