Poor maths performance in C vs Python/numpy - c

Near-duplicate / related:
How does BLAS get such extreme performance? (If you want fast matmul in C, seriously just use a good BLAS library unless you want to hand-tune your own asm version.) But that doesn't mean it's not interesting to see what happens when you compile less-optimized matrix code.
how to optimize matrix multiplication (matmul) code to run fast on a single processor core
Matrix Multiplication with blocks
Out of interest, I decided to compare the performance of (inexpertly) handwritten C vs. Python/numpy performing a simple matrix multiplication of two, large, square matrices filled with random numbers from 0 to 1.
I found that python/numpy outperformed my C code by over 10,000x This is clearly not right, so what is wrong with my C code that is causing it to perform so poorly? (even compiled with -O3 or -Ofast)
The python:
import time
import numpy as np
t0 = time.time()
m1 = np.random.rand(2000, 2000)
m2 = np.random.rand(2000, 2000)
t1 = time.time()
m3 = m1 # m2
t2 = time.time()
print('creation time: ', t1 - t0, ' \n multiplication time: ', t2 - t1)
The C:
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
int main(void) {
clock_t t0=clock(), t1, t2;
// create matrices and allocate memory
int m_size = 2000;
int i, j, k;
double running_sum;
double *m1[m_size], *m2[m_size], *m3[m_size];
double f_rand_max = (double)RAND_MAX;
for(i = 0; i < m_size; i++) {
m1[i] = (double *)malloc(sizeof(double)*m_size);
m2[i] = (double *)malloc(sizeof(double)*m_size);
m3[i] = (double *)malloc(sizeof(double)*m_size);
}
// populate with random numbers 0 - 1
for (i=0; i < m_size; i++)
for (j=0; j < m_size; j++) {
m1[i][j] = (double)rand() / f_rand_max;
m2[i][j] = (double)rand() / f_rand_max;
}
t1 = clock();
// multiply together
for (i=0; i < m_size; i++)
for (j=0; j < m_size; j++) {
running_sum = 0;
for (k = 0; k < m_size; k++)
running_sum += m1[i][k] * m2[k][j];
m3[i][j] = running_sum;
}
t2 = clock();
float t01 = ((float)(t1 - t0) / CLOCKS_PER_SEC );
float t12 = ((float)(t2 - t1) / CLOCKS_PER_SEC );
printf("creation time: %f", t01 );
printf("\nmultiplication time: %f", t12 );
return 0;
}
EDIT: Have corrected the python to do a proper dot product which closes the gap a little and the C to time with a resolution of microseconds and use the comparable double data type, rather than float, as originally posted.
Outputs:
$ gcc -O3 -march=native bench.c
$ ./a.out
creation time: 0.092651
multiplication time: 139.945068
$ python3 bench.py
creation time: 0.1473407745361328
multiplication time: 0.329038143157959
It has been pointed out that the naive algorithm implemented here in C could be improved in ways that lend themselves to make better use of compiler optimisations and the cache.
EDIT: Having modified the C code to transpose the second matrix in order to achieve a more efficient access pattern, the gap closes more
The modified multiplication code:
// transpose m2 in order to capitalise on cache efficiencies
// store transposed matrix in m3 for now
for (i=0; i < m_size; i++)
for (j=0; j < m_size; j++)
m3[j][i] = m2[i][j];
// swap the pointers
void *mtemp = *m3;
*m3 = *m2;
*m2 = mtemp;
// multiply together
for (i=0; i < m_size; i++)
for (j=0; j < m_size; j++) {
running_sum = 0;
for (k = 0; k < m_size; k++)
running_sum += m1[i][k] * m2[j][k];
m3[i][j] = running_sum;
}
The results:
$ gcc -O3 -march=native bench2.c
$ ./a.out
creation time: 0.107767
multiplication time: 10.843431
$ python3 bench.py
creation time: 0.1488208770751953
multiplication time: 0.3335080146789551
EDIT: compiling with -0fast, which I am reassured is a fair comparison, brings down the difference to just over an order of magnitude (in numpy's favour).
$ gcc -Ofast -march=native bench2.c
$ ./a.out
creation time: 0.098201
multiplication time: 4.766985
$ python3 bench.py
creation time: 0.13812589645385742
multiplication time: 0.3441300392150879
EDIT: It was suggested to change indexing from arr[i][j] to arr[i*m_size + j] this yielded a small performance increase:
for m_size = 10000
$ gcc -Ofast -march=native bench3.c # indexed by arr[ i * m_size + j ]
$ ./a.out
creation time: 1.280863
multiplication time: 626.327820
$ gcc -Ofast -march=native bench2.c # indexed by art[I][j]
$ ./a.out
creation time: 2.410230
multiplication time: 708.979980
$ python3 bench.py
creation time: 3.8284950256347656
multiplication time: 39.06089973449707
The up to date code bench3.c:
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
int main(void) {
clock_t t0, t1, t2;
t0 = clock();
// create matrices and allocate memory
int m_size = 10000;
int i, j, k, x, y;
double running_sum;
double *m1 = (double *)malloc(sizeof(double)*m_size*m_size),
*m2 = (double *)malloc(sizeof(double)*m_size*m_size),
*m3 = (double *)malloc(sizeof(double)*m_size*m_size);
double f_rand_max = (double)RAND_MAX;
// populate with random numbers 0 - 1
for (i=0; i < m_size; i++) {
x = i * m_size;
for (j=0; j < m_size; j++)
m1[x + j] = ((double)rand()) / f_rand_max;
m2[x + j] = ((double)rand()) / f_rand_max;
m3[x + j] = ((double)rand()) / f_rand_max;
}
t1 = clock();
// transpose m2 in order to capitalise on cache efficiencies
// store transposed matrix in m3 for now
for (i=0; i < m_size; i++)
for (j=0; j < m_size; j++)
m3[j*m_size + i] = m2[i * m_size + j];
// swap the pointers
double *mtemp = m3;
m3 = m2;
m2 = mtemp;
// multiply together
for (i=0; i < m_size; i++) {
x = i * m_size;
for (j=0; j < m_size; j++) {
running_sum = 0;
y = j * m_size;
for (k = 0; k < m_size; k++)
running_sum += m1[x + k] * m2[y + k];
m3[x + j] = running_sum;
}
}
t2 = clock();
float t01 = ((float)(t1 - t0) / CLOCKS_PER_SEC );
float t12 = ((float)(t2 - t1) / CLOCKS_PER_SEC );
printf("creation time: %f", t01 );
printf("\nmultiplication time: %f", t12 );
return 0;
}

CONCLUSION: So the original absurd factor of x10,000 difference was largely due to mistakenly comparing element-wise multiplication in Python/numpy to C code and not compiled with all of the available optimisations and written with a highly inefficient memory access pattern that likely didn't utilise the cache.
A 'fair' comparison (ie. correct, but highly inefficient single-threaded algorithm, compiled with -Ofast) yields a performance factor difference of x350
A number of simple edits to improve the memory access pattern brought the comparison down to a factor of x16 (in numpy's favour) for large matrix (10000 x 10000) multiplication. Furthermore, numpy automatically utilises all four virtual cores on my machine whereas this C does not, so the performance difference could be a factor of x4 - x8 (depending on how well this program ran on hyperthreading). I consider a factor of x4 - x8 to be fairly sensible, given that I don't really know what I'm doing and just knocked a bit of code together whereas numpy is based on BLAS which I understand has been extensively optimised over the years by experts from all over the place so I consider the question answered/solved.

Related

OpenMP Matrix Multiplication Issues

I am trying to multiple the values of a matrix.
#include <stdio.h>
#include <omp.h>
#include <time.h>
#include <stdlib.h>
#include <omp.h>
#define N 2048
#define FactorIntToDouble 1.1;
#define THREAD_NUM 4
double firstMatrix [N] [N] = {0.0};
double secondMatrix [N] [N] = {0.0};
double matrixMultiResult [N] [N] = {0.0};
// Sync
void matrixMulti() {
for(int row = 0 ; row < N ; row++){
for(int col = 0; col < N ; col++){
double resultValue = 0;
for(int transNumber = 0 ; transNumber < N ; transNumber++) {
resultValue += firstMatrix [row] [transNumber] * secondMatrix [transNumber] [col] ;
}
matrixMultiResult [row] [col] = resultValue;
}
}
}
void matrixInit() {
for(int row = 0 ; row < N ; row++ ) {
for(int col = 0 ; col < N ;col++){
srand(row+col);
firstMatrix [row] [col] = ( rand() % 10 ) * FactorIntToDouble;
secondMatrix [row] [col] = ( rand() % 10 ) * FactorIntToDouble;
}
}
}
// Parallel
void matrixMulti2(int start, int end) {
printf("Op: %d - %d\n", start, end);
for(int row = start ; row < end ; row++){
for(int col = 0; col < N ; col++){
double resultValue = 0;
for(int transNumber = 0 ; transNumber < N ; transNumber++) {
resultValue += firstMatrix [row] [transNumber] * secondMatrix [transNumber] [col] ;
}
matrixMultiResult [row] [col] = resultValue;
}
}
}
void process1(){
clock_t t1 = clock();
#pragma omp parallel
{
int thread = omp_get_thread_num();
int thread_multi = N / 4;
int start = (thread) * thread_multi;
int end = 0;
if(thread == (THREAD_NUM - 1)){
end = (start + thread_multi);
}else{
end = (start + thread_multi) - 1;
}
matrixMulti2(start, end);
}
clock_t t2 = clock();
printf("time 2: %ld\n", t2-t1);
}
int main(){
matrixInit();
clock_t t1 = clock();
matrixMulti();
clock_t t2 = clock();
printf("time: %ld", t2-t1);
process1();
return 0;
}
I have both a parallel and sync version. But the parallel version is longer than the sync version.
Current the sync takes around 90 seconds and the parallel over 100. Which makes no sense to me.
My logic was to split the matrix into 4 parts from the first 4 statement. Which I believe is logical.
After I finish this part. I would like to figure out how to speed up this process for the parallel even more. Possibly using Strassen's Matrix Multiplication. I just don't know where to start or how to get to this point.
I've already spent around 5 hours trying to figure this out.
Here it is:
// Sync
void matrixMulti() {
#pragma omp parallel for collapse(2)
for(int row = 0 ; row < N ; row++){
for(int col = 0; col < N ; col++){
double resultValue = 0;
for(int transNumber = 0 ; transNumber < N ; transNumber++) {
resultValue += firstMatrix [row] [transNumber] * secondMatrix [transNumber] [col] ;
}
matrixMultiResult [row] [col] = resultValue;
}
}
}
Update: Here is what I got on an 8 core system using gcc 10.3 -O3 -fopenmp flags (I show you the program's output and result of linux time command) :
main() was changed to measure the time with omp_get_wtime() because in linux clock() measures processor time:
double t1 = omp_get_wtime();
matrixMulti();
double t2 = omp_get_wtime();
printf("time: %f", t2-t1);
Serial program:
time: 25.895234
real 0m33.296s
user 0m33.139s
sys 0m0.152s
using: #pragma omp parallel for
time: 3.573521
real 0m11.120s
user 0m32.205s
sys 0m0.136s
using: #pragma omp parallel for collapse(2)
time: 5.466674
real 0m12.786s
user 0m49.978s
sys 0m0.248s
The results suggest that initialization of matrix takes ca. 8 s, so it may also be worth parallelizing. Without collapse(2) the program runs faster, so do not use collapse(2) clause.
Note that on your system you may got different speed improvement or even decrease depending on your hardware. Speed of matrix multiplication strongly depends on the speed of memory read/write. Shared-Memory Multicore systems (i.e most PCs, laptops) may not show any speed increase upon parallelization of this program, but Distributed-Memory Multicore systems (i.e. high-end serves) definitely show performance increase. For more details please read e.g. this.
Update2: On Ryzen 7 5800X I got 41.6 s vs 1.68 s, which is a bigger increase than the number of cores. It is because more cache memory is available when all the cores is used.

Why the c and fortran versions of this same program produce different results?

I use gcc (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0
The c code is:
// Compile with:
// gcc -o little_c little.c
#include <stdio.h> // printf
void main(void) {
int n = 800;
float a[n][n], b[n][n], c[n][n];
int i, j, k;
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
a[i][j] = (float) (i+j);
b[i][j] = (float) (i-j);
}
}
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
float t = (float) 0.0;
for (k = 0; k < n; k++)
t += a[i][k] * a[i][k] + b[k][j] * b[k][j];
//t += a[i][k] + b[k][j]; // If I comment the above line and uncomment this, the c and fortran reults are the same
c[i][j] = t;
}
}
printf("%f", c[n-1][n-1]); // prints the very last element
}
Fortran code:
! Compile with:
! gfortran -o little_fort little.f90
program little
implicit none
integer, parameter :: n = 800
real :: a(n,n), b(n,n), c(n,n)
real :: t
integer :: i, j, k ! Counters
do i = 1, n
do j = 1, n
a(i,j) = real(i-1+j-1) ! Minus one, for it to be like the c version
b(i,j) = real(i-1-(j-1)) ! because in c, the index goes from 0 to n-1
end do
end do
do i = 1, n
do j = 1, n
t = 0.0
do k = 1, n
t = t + a(i,k) * a(i,k) + b(k,j) * b(k,j)
!t = t + a(i,k) + b(k,j) ! If I comment the above line and uncomment this, the c and fortran reults are the same
end do
c(i,j) = t
end do
end do
write (*,"(F20.4)") c(n,n) ! This is the same as c[n-1][n-1] in c
end program little
The c program prints: 1362136192.000000
and the Fortran program prints: 1362137216.0000
If I do not multiply each element by itself, as I state in the comments in the code, I get the same value for both versions of the program:
c prigram: 639200.000000
Fortran program: 639200.0000
Why when I use a multiplication the c and Fortran code produce different results?. Does it have to be with different implementations of the real numbers?
The difference is due to the order of evaluation combined with the limited precision of the floating point type.
If you change the Fortran version to
t = t + (a(i,k) * a(i,k) + b(k,j) * b(k,j))
i.e. add parenthesis around the terms with a and b, you get the same result for both languages. The C version already uses this order of evaluation due to the use of the += assignment operator.
As mentioned in the comments, this is expected behavior at the limits of the available precision.
When I wrote an Ada version of the program I found that I had to reduce the decimal precision to 6 decimals to achieve the Fortran answer.
The Ada version is:
with Ada.Text_IO; use Ada.Text_Io;
procedure Main is
type Real is digits 6;
package Real_IO is new Ada.Text_IO.Float_IO(Real);
use Real_IO;
subtype Index is integer range 1..800;
type Matrix is array(Index, Index) of Real;
A : Matrix;
B : Matrix;
C : Matrix;
T : Real := 0.0;
begin
for I in Index loop
for J in Index loop
A(I,J) := Real(I - 1 + J - 1);
B(I,J) := Real(I - 1 - (J - 1));
end loop;
end loop;
for I in Index loop
for J in Index loop
T := 0.0;
for K in Index loop
T := T + A(I,K) * A(I,K) + B(K,J) *B(K,J);
end loop;
C(I,J) := T;
end loop;
end loop;
Put(Item => C(Index'Last, Index'Last), Exp => 0, Aft => 4);
New_Line;
end Main;
The line defining type Real defines the precision of the floating point type:
type Real is digits 6;
The value produced using six digits of precision is
1362137216.0000
Use of higher precision floating point types resulted in the value
1362135200.0000

Sparse matrix multiplication in Eigen giving wrong result?

I am using Eigen in a project of mine, and I am running into a strange issue. I have complex sparse matrices A and B (1500x1500 or larger), and am multiplying them together with coefficients.
When A = B, and taking vector x of ones, I expect that
(A-B)*x = 0, (A*B-B*A)*x = 0,
(A*A*B*B - B*B*A*A)*x = 0,
etc. and I do get this result for all these cases. (A.isApprox(B) evaluates to 1 and (A-B).norm() = 0).
However, when I multiply the matrices by doubles, as in
(c1*A*c2*A*d1*B*d2*B - d1*B*d2*B*c1*A*c2*A)*x,
I get a nonzero result, which doesn't make sense to me, as scalars should commute with the matrices. In fact, if I do,
(c1*c2*d1*d2*A*A*B*B - d1*d2*c1*c2*B*B*A*A)*x
I get zero. Any time the coefficients are interspersed in the matrix manipulation, I get a nonzero result.
I am not using any compiler optimizations, etc.
What am I doing wrong here?
Edit:
I have worked up a simple example. Maybe I'm missing something dumb, but here it is. This gives me an error of 10^20.
'''
#include <iostream>
#include <cmath>
#include <vector>
#include <Eigen/Sparse>
#include <complex>
typedef std::complex<double> Scalar;
typedef Eigen::SparseMatrix<Scalar, Eigen::RowMajor> SpMat;
typedef Eigen::Triplet<Scalar> trip;
int main(int argc, const char * argv[]) {
double k0 = M_PI;
double dz = 0.01;
double nz = 1500;
std::vector<double> rhos(nz), atten(nz), cp(nz);
for(int i = 0; i < nz; ++i){
if(i < 750){
rhos[i] = 1.5;
cp[i] = 2500;
atten[i] = 0.5;
}
else{
rhos[i] = 1;
cp[i] = 1500;
atten[i] = 0;
}
}
Scalar ci, eta, n, rho, drhodz;
Scalar t1, t2, t3, t4;
ci = Scalar(0,1);
eta = 1.0/(40.0*M_PI*std::log10(std::exp(1.0)));
int Mp = 6;
std::vector<std::vector<trip> > mat_entries_N(Mp), mat_entries_D(Mp);
for(int i = 0; i < nz; ++i){
n = 1500./cp[i] * (1.+ ci * eta * atten[i]);
rho = rhos[i];
if(i > 0 && i < nz-1){
drhodz = (rhos[i+1]-rhos[i-1])/(2*dz);
}
else if(i == 0){
drhodz = (rhos[i+1]-rhos[i])/(dz);
}
else if(i == nz-1){
drhodz = (rhos[i]-rhos[i-1])/(dz);
}
t1 = (n*n - 1.);
t2 = 1./(k0*k0)*(-2./(dz * dz));
t3 = 1./(k0*k0)*(drhodz/rho*2.*dz);
t4 = 1./(k0*k0)*(1/(dz * dz));
/* MATRICES N AND D ARE IDENTICAL EXCEPT FOR COEFFICIENT*/
double c,d;
for(int mp = 0; mp < Mp; ++mp){
c = std::pow(std::sin((mp+1)*M_PI/(2*Mp+1)),2);
d = std::pow(std::cos((mp+1)*M_PI/(2*Mp+1)),2);
mat_entries_N[mp].push_back(trip(i,i,(c*(t1 + t2))));
mat_entries_D[mp].push_back(trip(i,i,(d*(t1 + t2))));
if(i < nz - 1){
mat_entries_N[mp].push_back(trip(i,i+1,(c*(-t3 + t4))));
mat_entries_D[mp].push_back(trip(i,i+1,(d*(-t3 + t4))));
}
if(i > 0){
mat_entries_N[mp].push_back(trip(i,i-1,(c*(t3 + t4))));
mat_entries_D[mp].push_back(trip(i,i-1,(d*(t3 + t4))));
}
}
}
SpMat N(nz,nz), D(nz,nz);
SpMat identity(nz, nz);
std::vector<trip> idcoeffs;
for(int i = 0; i < nz; ++i){
idcoeffs.push_back(trip(i,i,1));
}
identity.setFromTriplets(idcoeffs.begin(), idcoeffs.end());
SpMat temp(nz,nz);
N = identity;
D = identity;
for(int mp = 0; mp < Mp; ++mp){
temp.setFromTriplets(mat_entries_N[mp].begin(), mat_entries_N[mp].end());
N = (temp*N).eval();
temp.setFromTriplets(mat_entries_D[mp].begin(), mat_entries_D[mp].end());
D = (temp*D).eval();
}
std::cout << (N*D - D*N).norm() << std::endl;
return 0;
}
'''
The problem is that without a meaningful reference value defining what is the expected order of magnitude of a non-zero value, it is impossible to conclude whether 1e20 is a huge or a tiny value.
In your case, the norm of the matrices N and D are about 1e20 and 1e18 respectively, and the norm of N*D is about 1e38. Given that the relative precision of double is about 1e-16, an error of 1e20 can be considered as 0 compared to 1e38.
To summarize, it is most of the time meaningless to look at the absolute error. Instead, you have to look at the relative error:
std::cout << (N*D - D*N).norm()/(N*D).norm() << std::endl;
which gives you about 1e-17. This is indeed smaller that the numerical precision of double.

How to optimize Matrix initialization and transposition to run faster using C

The dimension of this matrix is 40000*40000. I was supposed to consider spatial and temporal locality for program but I have no idea to optimize this code. It cost about 50+ seconds in my computer which is not acceptable for our group.The size of block is 500 now. Could someone help me to improve this code?
void InitializeMatrixRowwise(){
int i,j,ii,jj;
double x;
x = 0.0;
for (i = 0; i < DIMENSION; i += BLOCKSIZE)
{
for (j = 0; j < DIMENSION; j += BLOCKSIZE)
{
for (ii = i; ii < i+BLOCKSIZE && ii < DIMENSION; ii++)
{
for (jj = j; jj < j+BLOCKSIZE && jj < DIMENSION; jj++)
{
if (ii >= jj)
{
Matrix[ii][jj] = x++;
}
else
Matrix[ii][jj] = 1.0;
}
}
}
}
}
void TransposeMatrixRowwise(){
int column,row,i,j;
double temp;
for (row = 0; row < DIMENSION; row += BLOCKSIZE)
{
for (column = 0; column < DIMENSION; column += BLOCKSIZE)
{
for (i = row; i < row + BLOCKSIZE && i < DIMENSION; i++)
{
for (j = column; j < column + BLOCKSIZE && j < DIMENSION; j++)
{
if (i > j)
{
temp = Matrix[i][j];
Matrix[i][j] = Matrix[j][i];
Matrix[j][i] = temp;
}
}
}
}
}
}
Your transpose function seems like it might be more complex than necessary and therefore perhaps slower than necessary. However, I created two versions of the code with timing inserted on the 'full size' (40k x 40k array, with 500 x 500 blocks), one using your transpose function and one using this much simpler algorithm:
static void TransposeMatrixRowwise(void)
{
for (int row = 0; row < DIMENSION; row++)
{
for (int col = row + 1; col < DIMENSION; col++)
{
double temp = Matrix[row][col];
Matrix[row][col] = Matrix[col][row];
Matrix[col][row] = temp;
}
}
}
This looks much simpler; it has only two nested loops instead of four, but the timing turns out to be dramatically worse — 31.5s vs 14.7s.
# Simple transpose
# Count = 7
# Sum(x1) = 220.87
# Sum(x2) = 6979.00
# Mean = 31.55
# Std Dev = 1.27 (sample)
# Variance = 1.61 (sample)
# Min = 30.41
# Max = 33.54
# Complex transpose
# Count = 7
# Sum(x1) = 102.81
# Sum(x2) = 1514.00
# Mean = 14.69
# Std Dev = 0.82 (sample)
# Variance = 0.68 (sample)
# Min = 13.59
# Max = 16.21
The reason for the performance difference is almost certainly due to locality of reference. The more complex algorithm is working with two separate blocks of memory at a time, whereas the simpler algorithm is ranging over far more memory, leading to many more page misses, and the slower performance.
Thus, while you might be able to tune the transpose algorithm using different block sizes (it needn't be the same block size as was used to generate the matrices), there is little doubt based on these measurements
that the more complex algorithm is more efficient.
I also did a check at 1/10th scale — 4k x 4k matrix, 50 x 50 block size — to ensure that the output from the transposition was the same (about 152 MiB of text). I didn't save the data at full scale with more than 100 times as much data. The times at 1/10th scale were dramatically better — less than 1/100th time — for both versions at the 1/10th scale:
< Initialization: 0.068667
< Transposition: 0.063927
---
> Initialization: 0.081022
> Transposition: 0.039169
4005c4005
< Print transposition: 3.901960
---
> Print transposition: 4.040136
JFTR: Testing on a 2016 MacBook Pro running macOS High Sierra 10.13.1 with 2.7 GHz Intel Core i7 CPU and 16 GB 2133 MHz LPDDR3 RAM. The compiler was GCC 7.2.0 (home-built). There was a browser running (but mostly inactive) and music playing in the background, so the machine wasn't idle, but I don't think those will dramatically affect the numbers.

C: Accessing lookup tables faster?

I have a piece of code that traces 4 sines at a time.
My original code was making roughly 12000 sin() function calls per frame and was running at 30 fps.
I tried optimizing it by generating lookup tables. I ended up with 16 different lookup tables. I declared and load them in a separate header file at the top of my program. Each table is declared like so:
static const float d4_lookup[800] {...};
Now, with this new method I actually lost fps?! I'm running at 20 fps now instead of 30. Each frame now only has to do 8 sin / cos calls and 19200 lookup calls vs 12000 sin() calls.
I compile using gcc with -O3 flag on. At the moment, the lookup tables are included at the top and are part of the global scope of the program.
I assume I'm not loading them in the right memory or something to that effect. How can I speed up the lookup time?
** EDIT 1 **
As requested, here's the function that uses the lookup calls, it is called once per frame:
void
update_sines(void)
{
static float c1_sin, c1_cos;
static float c2_sin, c2_cos;
static float c3_sin, c3_cos;
static float c4_sin, c4_cos;
clock_gettime(CLOCK_MONOTONIC, &spec);
s = spec.tv_sec;
ms = spec.tv_nsec * 0.0000001;
etime = concatenate((long)s, ms);
c1_sin = sinf(etime * 0.00525);
c1_cos = cosf(etime * 0.00525);
c2_sin = sinf(etime * 0.007326);
c2_cos = cosf(etime * 0.007326);
c3_sin = sinf(etime * 0.0046);
c3_cos = cosf(etime * 0.0046);
c4_sin = sinf(etime * 0.007992);
c4_cos = cosf(etime * 0.007992);
int k;
for (k = 0; k < 800; ++k)
{
sine1[k] = a1_lookup[k] * ((bx1_sin_lookup[k] * c1_cos) + (c1_sin * bx1_cos_lookup[k])) + d1_lookup[k];
sine2[k] = a2_lookup[k] * ((bx2_sin_lookup[k] * c2_cos) + (c2_sin * bx2_cos_lookup[k])) + d2_lookup[k] + 50;
sine3[k] = a3_lookup[k] * ((bx3_sin_lookup[k] * c3_cos) + (c3_sin * bx3_cos_lookup[k])) + d3_lookup[k];
sine4[k] = a4_lookup[k] * ((bx4_sin_lookup[k] * c4_cos) + (c4_sin * bx4_cos_lookup[k])) + d4_lookup[k] + 50;
}
}
** UPDATE **
For anyone reading this thread, I gave up on this problem. I tried using OpenCL kernels, structs, SIMD instructions as well as all the solutions shown here. In the end the original code that computed the sinf() 12800 per frame worked faster than the lookup tables since the lookup tables didn't fit into the cache. Yet it was still only doing 30 fps. It just had too much going on to keep up with my 60fps expectations. I've decided to take a different direction. Thanks to everyone who contributed to this thread. Most of these solutions would probably work to get some half decent speed improvements but nothing like the 200% speed up I needed here to have the lookup tables work the way I wanted.
Sometimes it's hard to know what's slowing you down, but potentially you are going to ruin your cache hits, you could try a lookup of a struct
typedef struct
{
float bx1_sin;
float bx2_sin;
float bx3_sin;
float bx4_sin;
float bx1_cos;
etc etc
including sine1,2,3,4 as well
} lookup_table
then
lookup_table lookup[800]
now everything at the kth lookup will be in the same small chunk of memory.
also, if you use a macro that takes k as a parameter to do do the contents of the loop lets say SINE_CALC(k), or an inline function...
you can do
for (k = 0; k < 800; ++k)
{
SINE_CALC(k); k++;
SINE_CALC(k); k++;
SINE_CALC(k); k++;
SINE_CALC(k); k++;
SINE_CALC(k); k++;
}
if you do a macro, make sure the k++ is outside the macro call like shown
Try unrolling your loops like this:
for (k = 0; k < 800; ++k)
{
sine1[k] = a1_lookup[k];
sine2[k] = a2_lookup[k];
sine3[k] = a3_lookup[k];
sine4[k] = a4_lookup[k];
}
for (k = 0; k < 800; ++k)
{
sine1[k] *= ((bx1_sin_lookup[k] * c1_cos) + (c1_sin * bx1_cos_lookup[k]));
sine2[k] *= ((bx2_sin_lookup[k] * c2_cos) + (c2_sin * bx2_cos_lookup[k]));
sine3[k] *= ((bx3_sin_lookup[k] * c3_cos) + (c3_sin * bx3_cos_lookup[k]));
sine4[k] *= ((bx4_sin_lookup[k] * c4_cos) + (c4_sin * bx4_cos_lookup[k]));
}
for (k = 0; k < 800; ++k)
{
sine1[k] += d1_lookup[k];
sine2[k] += d2_lookup[k] + 50;
sine3[k] += d3_lookup[k];
sine4[k] += d4_lookup[k] + 50;
}
By accessing fewer lookup tables in each loop, you should be able to stay in the cache. The middle loop could be split up as well, but you'll need to create an intermediate table for one of the sub-expressions.
Intel processors can predict serial access (and perform prefetch) for up to 4 arrays both for forward and backward traverse. At least this was true in Core 2 Duo days. Split your for in:
for (k = 0; k < 800; ++k)
sine1[k] = a1_lookup[k] * ((bx1_sin_lookup[k] * c1_cos) + (c1_sin * bx1_cos_lookup[k])) + d1_lookup[k];
for (k = 0; k < 800; ++k)
sine2[k] = a2_lookup[k] * ((bx2_sin_lookup[k] * c2_cos) + (c2_sin * bx2_cos_lookup[k])) + d2_lookup[k] + 50;
for (k = 0; k < 800; ++k)
sine3[k] = a3_lookup[k] * ((bx3_sin_lookup[k] * c3_cos) + (c3_sin * bx3_cos_lookup[k])) + d3_lookup[k];
for (k = 0; k < 800; ++k)
sine4[k] = a4_lookup[k] * ((bx4_sin_lookup[k] * c4_cos) + (c4_sin * bx4_cos_lookup[k])) + d4_lookup[k] + 50;
I guess you have more cache load than benchmarks in other answers so this does matters. I recommend you not to unroll loops, compilers do it well.
Using a simple sin lookup table will yields >20% speed increase on my linux machine (vm, gcc, 64bit). Interestingly, the size of lookup table (within reasonable < L1 cache size values) does not influence the speed of execution.
Using a fastsin simple implementation from here I got >45% improvement.
Code:
#include <math.h>
#include <stdio.h>
#include <stdint.h>
#include <sys/time.h>
#include <time.h>
#define LOOKUP_SIZE 628
uint64_t currentTimestampUs( void )
{
struct timeval tv;
time_t localTimeRet;
uint64_t timestamp = 0;
//time_t tzDiff = 0;
struct tm when;
int64_t localeOffset = 0;
{
localTimeRet = time(NULL);
localtime_r ( &localTimeRet, &when );
localeOffset = when.tm_gmtoff * 1000000ll;
}
gettimeofday ( &tv, NULL );
timestamp = ((uint64_t)((tv.tv_sec) * 1000000ll) ) + ( (uint64_t)(tv.tv_usec) );
timestamp+=localeOffset;
return timestamp;
}
const double PI = 3.141592653589793238462;
const double PI2 = 3.141592653589793238462 * 2;
static float sinarr[LOOKUP_SIZE];
void initSinArr() {
int a =0;
for (a=0; a<LOOKUP_SIZE; a++) {
double arg = (1.0*a/LOOKUP_SIZE)*((double)PI * 0.5);
float sinval_f = sin(arg); // double computation earlier to avoid losing precision on value
sinarr[a] = sinval_f;
}
}
float sinlookup(float val) {
float normval = val;
while (normval < 0) {
normval += PI2;
}
while (normval > PI2) {
normval -= PI2;
}
int index = LOOKUP_SIZE*(2*normval/PI);
if (index > 3*LOOKUP_SIZE) {
index = -index + 4*LOOKUP_SIZE;//LOOKUP_SIZE - (index-3*LOOKUP_SIZE);
return -sinarr[index];
} else if (index > 2*LOOKUP_SIZE) {
index = index - 2*LOOKUP_SIZE;
return -sinarr[index];
} else if (index > LOOKUP_SIZE) {
index = 2*LOOKUP_SIZE - index;
return sinarr[index];
} else {
return sinarr[index];
}
}
float sin_fast(float x) {
while (x < -PI)
x += PI2;
while (x > PI)
x -= PI2;
//compute sine
if (x < 0)
return 1.27323954 * x + .405284735 * x * x;
else
return 1.27323954 * x - 0.405284735 * x * x;
}
int main(void) {
initSinArr();
int a = 0;
float val = 0;
const int num_tries = 100000;
uint64_t startLookup = currentTimestampUs();
for (a=0; a<num_tries; a++) {
for (val=0; val<PI2; val+=0.01) {
float compval = sinlookup(val);
(void)compval;
}
}
uint64_t startSin = currentTimestampUs();
for (a=0; a<num_tries; a++) {
for (val=0; val<PI2; val+=0.01) {
float compval = sin(val);
(void)compval;
}
}
uint64_t startFastSin = currentTimestampUs();
for (a=0; a<num_tries; a++) {
for (val=0; val<PI2; val+=0.01) {
float compval = sin_fast(val);
(void)compval;
}
}
uint64_t end = currentTimestampUs();
int64_t lookupMs = (startSin - startLookup)/1000;
int64_t sinMs = (startFastSin - startSin)/1000;
int64_t fastSinMs = (end - startFastSin)/1000;
printf(" lookup: %lld ms\n", lookupMs );
printf(" sin: %lld ms\n", sinMs );
printf(" diff: %lld ms\n", sinMs-lookupMs);
printf(" diff%: %lld %\n", 100*(sinMs-lookupMs)/sinMs);
printf("fastsin: %lld ms\n", fastSinMs );
printf(" sin: %lld ms\n", sinMs );
printf(" diff: %lld ms\n", sinMs-fastSinMs);
printf(" diff%: %lld %\n", 100*(sinMs-fastSinMs)/sinMs);
}
Sample result:
lookup: 2276 ms
sin: 3004 ms
diff: 728 ms
diff%: 24 %
fastsin: 1500 ms
sin: 3004 ms
diff: 1504 ms
diff%: 50 %

Resources