Closed. This question needs to be more focused. It is not currently accepting answers.
Want to improve this question? Update the question so it focuses on one problem only by editing this post.
Closed 4 years ago.
Improve this question
I need to estimate for my internship the firing rate of neurons that follows an ODE. I code at first in python and it goes pretty well but for better performance, my supervisor told to write the same code in C. However, I never coded in C so I am a very beginner and the file in which i want to have the values for the firing rate is full of zero... Can someone help me ?
Thank you very much
So here is my python code :
import numpy as np
import matplotlib.pyplot as plt
from math import cos, sin, sqrt, pi, exp as cos, sin, sqrt, pi, exp
#parameters
eps = 0.05
f = 0.215
mu = 1.1
D = 0.001
DeltaT = 0.01
timewindow = 40
num_points = int(timewindow/DeltaT)
T = np.linspace(0, timewindow, num_points)
#signal
s = [sin(2*3.14*f*t) for t in T]
N=30000
compteur=np.zeros(num_points)
v = np.zeros((num_points,N))
samples = np.random.normal(0, 1, (num_points,N))
for i in range(1,num_points):
for j in range(N):
v[i,j] = v[i-1,j] + DeltaT *(-v[i-1,j]+ mu + eps*s[i-1]) + \
sqrt(2*D*DeltaT)*samples[i,j]
if v[i,j]>1:
v[i,j]=0
compteur[i]+=1/DeltaT/N
plt.plot(T,compteur)
plt.show()
and here is my "translation" in C :
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define PI 3.1415926536
float s(float x, float);
double AWGN_generator();
FILE* fopen(const char* nomDuFichier, const char* modeOuverture);
int fclose(FILE* pointeurSurFichier);
int main(int argc, char *argv[])
{
//parameters
double eps = 0.05;
float f = 0.215 ;
double mu = 1.1 ;
double D = 0.001 ;
int time_window = 90;
int num_points = 1000;
long num_neurons = 1000;
double deltaT = time_window/num_points;
int i ;
//time
double Time[num_points] ;
Time[0]= 0.0;
for (i = 1 ; i < num_points ; i++ )
{ Time[i] = Time[i-1] + deltaT;
}
//opening file for saving data
FILE* fichier = NULL;
fichier = fopen("challala.txt", "w");
if (fichier != NULL)
{
double v[num_points][num_neurons] ;
memset(v, 0, num_points*num_neurons*sizeof(long) );
long compteur[num_points];
memset( compteur, 0, num_points*sizeof(long) );
int pos_1 ;
int pos_2 ;
//Euler's method
for (pos_1 = 1 ; pos_1 < num_points ; pos_1 ++)
{
for (pos_2 = 0 ; pos_2<num_neurons ; pos_2 ++)
{
float t = Time[pos_1-1] ;
v[pos_1][pos_2] = v[pos_1-1][pos_2] + deltaT *(-v[pos_1-1]
[pos_2]+ mu + eps*s(t, f))+ sqrt(2*D*deltaT)*AWGN_generator();
if (v[pos_1][pos_2]>1)
{
v[pos_1][pos_2]=0 ;
compteur[pos_1]+=1/deltaT/num_neurons ;
}
}
fprintf(fichier, "%ld",compteur[pos_1]);
}
fclose(fichier);
printf("ca a marche test.txt");
}
else
{
// On affiche un message d'erreur si on veut
printf("Impossible d'ouvrir le fichier test.txt");
}
return 0;
}
float s(float x, float f)
{
return sin(2*M_PI*f*x);
}
double AWGN_generator()
{/* Generates additive white Gaussian Noise samples with zero mean and a
standard deviation of 1. */
double temp1;
double temp2;
double result;
int p;
p = 1;
while( p > 0 )
{
temp2 = ( rand() / ( (double)RAND_MAX ) ); /* rand() function generates an
integer between 0 and
RAND_MAX,
which is defined in
stdlib.h.
*/
if ( temp2 == 0 )
{// temp2 is >= (RAND_MAX / 2)
p = 1;
}// end if
else
{// temp2 is < (RAND_MAX / 2)
p = -1;
}// end else
}// end while()
temp1 = cos( ( 2.0 * (double)PI ) * rand() / ( (double)RAND_MAX ) );
result = sqrt( -2.0 * log( temp2 ) ) * temp1;
return result; // return the generated random sample to the caller
}
Code repaired as able with old code commented out. See comments for change details.
Need to see challala.txt for a definitive test.
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
// Why use a coarse approximation?
//#define PI 3.1415926536
#define PI 3.1415926535897932384626433832795
// Let us stick to double only.
//float s(float x, float);
double s(double x, double);
// Add void, else declaration does not check the parameters.
//double AWGN_generator();
double AWGN_generator(void);
// These should have already been declared in <stdio.h>
// FILE* fopen(const char* nomDuFichier, const char* modeOuverture);
// int fclose(FILE* pointeurSurFichier);
int main(int argc, char *argv[]) {
//parameters
double eps = 0.05;
// float f = 0.215;
double f = 0.215;
double mu = 1.1;
double D = 0.001;
int time_window = 90;
// Unclear why `int/long` used here. size_t would be idiomatic for array sizing.
int num_points = 1000;
long num_neurons = 1000;
// Avoid integer division when a FP quotinet is desired
// double deltaT = time_window / num_points;
double deltaT = 1.0*time_window / num_points;
int i;
//time
double Time[num_points];
Time[0] = 0.0;
for (i = 1; i < num_points; i++)
{
Time[i] = Time[i - 1] + deltaT;
}
//opening file for saving data
FILE* fichier = NULL;
fichier = fopen("challala.txt", "w");
if (fichier != NULL) {
double v[num_points][num_neurons];
// zero fill use wrong type in sizeof.
// Avoid type in sizeof, better to use sizeof object
// memset(v, 0, num_points * num_neurons * sizeof(long));
memset(v, 0, sizeof v);
// Let us use FP here.
//long compteur[num_points];
double compteur[num_points];
memset(compteur, 0, sizeof compteur);
int pos_1;
int pos_2;
//Euler's method
for (pos_1 = 1; pos_1 < num_points; pos_1++)
{
for (pos_2 = 0; pos_2 < num_neurons; pos_2++) {
// float t = Time[pos_1 - 1];
double t = Time[pos_1 - 1];
v[pos_1][pos_2] = v[pos_1 - 1][pos_2]
+ deltaT * (-v[pos_1 - 1][pos_2] + mu + eps * s(t, f))
+ sqrt(2 * D * deltaT) * AWGN_generator();
if (v[pos_1][pos_2] > 1) {
v[pos_1][pos_2] = 0;
compteur[pos_1] += 1 / deltaT / num_neurons;
}
}
// Change of type
// fprintf(fichier, "%ld", compteur[pos_1]);
fprintf(fichier, " %g", compteur[pos_1]);
}
fclose(fichier);
printf("ca a marche test.txt");
} else {
// Was not the file another name?
// printf("Impossible d'ouvrir le fichier test.txt");
printf("Impossible d'ouvrir le fichier \"%s\"\n", challala.txt);
}
return 0;
}
//float s(float x, float f) {
double s(double x, double f) {
// M_PI is not defined in the standard C library, although common in extensions.
//return sin(2 * M_PI * f * x);
return sin(2 * PI * f * x);
}
double AWGN_generator() {
double temp1;
double temp2;
double result;
int p;
p = 1;
while (p > 0) {
temp2 = (rand() / ((double) RAND_MAX));
if (temp2 == 0) { // temp2 is >= (RAND_MAX / 2)
p = 1;
} // end if
else { // temp2 is < (RAND_MAX / 2)
p = -1;
} // end else
} // end while()
temp1 = cos((2.0 * (double) PI) * rand() / ((double) RAND_MAX));
result = sqrt(-2.0 * log(temp2)) * temp1;
return result; // return the generated random sample to the caller
}
Minor and advanced numeric issue:
Realize that quotient 1.0*time_window / num_points maybe be a little different than mathematical expected due to finite precision of double. This is, at worst, expected to be a very small amount, maybe about 0.5 parts in 253.
Yet the repetitive additions accumulate the error.
double deltaT = 1.0*time_window / num_points;
int i;
double Time[num_points];
Time[0] = 0.0;
for (i = 1; i < num_points; i++) {
Time[i] = Time[i - 1] + deltaT;
}
To avoid that accumulated error, code can re-calculate Time[i] anew on each iteration.
double deltaT = 1.0*time_window / num_points;
double Time[num_points];
for (int i = 0; i < num_points; i++) {
Time[i] = deltaT*i;
}
Of course, such small errors are often ignorable, but mayne not when num_points is large enough. This happens when your good code is applied to ever larger tasks.
regarding the following 3 statements
int time_window = 90;
int num_points = 1000;
double deltaT = time_window/num_points;
Since time_window and num_points are integers, the division is performed as an integer divide.
In an integer divide, all fractions are truncated.
the expression: time_window/num_points is actually:
90 / 1000
the resulting fraction has everything right of the decimal point truncated, so the result is 0
so: Time[0] + 0 results in 0.0.
The same (calculated) value: 0.0 is then propagated thorough the whole array
suggest changing:
int time_window = 90;
int num_points = 1000;
to
double time_window = 90.0;
double num_points = 1000.0;
regarding:
memset(v, 0, num_points*num_neurons*sizeof(long) );
this statement may (or may not) perform the desired functionality. It depends on if the size of double is the same as the size of long
Suggest using:
memset( v, 0, sizeof( v ) );
I have a number of time series each containing a sequence of 400 numbers that are close to each other. I have thousands of time series; each has its own series of close numbers.
TimeSeries1 = 184.56, 184.675, 184.55, 184.77, ...
TimeSeries2 = 145.73, 145.384, 145.96, 145.33, ...
TimeSeries3 = -126.48, -126.78, -126.55, ...
I can store an 8 byte double for each time Series, so for most of the time series, I can compress each double to a single byte by multiplying by 100 and taking the delta of the current value and the previous value.
Here is my compress/decompress code:
struct{
double firstValue;
double nums[400];
char compressedNums[400];
int compressionOK;
} timeSeries;
void compress(void){
timeSeries.firstValue = timeSeries.nums[0];
double lastValue = timeSeries.firstValue;
for (int i = 1; i < 400; ++i){
int delta = (int) ((timeSeries.nums[i] * 100) - (lastValue* 100));
timeSeries.compressionOK = 1;
if (delta > CHAR_MAX || delta < -CHAR_MAX){
timeSeries.compressionOK = 0;
return;
}
else{
timeSeries.compressedNums[i] = (char) delta;
lastValue = timeSeries.nums[i];
}
}
}
double decompressedNums[400];
void decompress(void){
if (timeSeries.compressionOK){
double lastValue = timeSeries.firstValue;
for (int i = 1; i < 400; ++i){
decompressedNums[i] = lastValue + timeSeries.compressedNums[i] / 100.0;
lastValue = decompressedNums[i];
}
}
}
I can tolerate some lossiness, on the order of .005 per number. However, I am getting more loss than I can tolerate, especially since a precision loss in one of the compressed series carries forward and causes an increasing amount of loss.
So my questions are:
Is there something I can change to reduce the lossiness?
Is there an altogether different compression method that has a comparable, or better, than this 8 to 1 ratio?
You can avoid the slow drift in precision by working out the delta not from the precise value of the previous element, but rather from the computed approximation of the previous element (i.e. the sum of the deltas). That way, you will always get the closest approximation to the next value.
Personally, I'd use integer arithmetic for this purpose, but it will probably be fine with floating point arithmetic too, since floating point is reproducible even if not precise.
Look at the values as stored in memory:
184. == 0x4067000000000000ull
184.56 == 0x406711eb851eb852ull
The first two bytes are the same but the last six bytes are different.
For integer deltas, multiply by 128 instead of 100, this will get you 7 bits of the fractional part. If the delta is too large for one byte use a three byte sequence {0x80, hi_delta, lo_delta}, so 0x80 is used a special indicator. If the delta happened to be -128, then that would be {0x80, 0xff, 0x80}.
You should round the values before converting to an int to avoid the problems, as in this code.
#include <limits.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
enum { TS_SIZE = 400 };
typedef struct
{
double firstValue;
double nums[TS_SIZE];
signed char compressedNums[TS_SIZE];
int compressionOK;
} timeSeries;
static
void compress(timeSeries *t1)
{
t1->firstValue = t1->nums[0];
double lastValue = t1->firstValue;
for (int i = 1; i < TS_SIZE; ++i)
{
int delta = (int) round((t1->nums[i] - lastValue) * 100.0);
t1->compressionOK = 1;
if (delta > CHAR_MAX || delta < -CHAR_MAX)
{
printf("Delta too big: %d (%.3f) vs %d (%.3f) = delta %.3f\n",
i-1, t1->nums[i-1], i, t1->nums[i], t1->nums[i] - t1->nums[i-1]);
t1->compressionOK = 0;
return;
}
else
{
t1->compressedNums[i] = (char) delta;
lastValue = t1->nums[i];
}
}
}
static
void decompress(timeSeries *t1)
{
if (t1->compressionOK)
{
double lastValue = t1->firstValue;
for (int i = 1; i < TS_SIZE; ++i)
{
t1->nums[i] = lastValue + t1->compressedNums[i] / 100.0;
lastValue = t1->nums[i];
}
}
}
static void compare(const timeSeries *t0, const timeSeries *t1)
{
for (int i = 0; i < TS_SIZE; i++)
{
char c = (fabs(t0->nums[i] - t1->nums[i]) > 0.005) ? '!' : ' ';
printf("%c %03d: %.3f vs %.3f = %+.3f\n", c, i, t0->nums[i], t1->nums[i], t0->nums[i] - t1->nums[i]);
}
}
int main(void)
{
timeSeries t1;
timeSeries t0;
int i;
for (i = 0; i < TS_SIZE; i++)
{
if (scanf("%lf", &t0.nums[i]) != 1)
break;
}
if (i != TS_SIZE)
{
printf("Reading problems\n");
return 1;
}
t1 = t0;
for (i = 0; i < 10; i++)
{
printf("Cycle %d:\n", i+1);
compress(&t1);
decompress(&t1);
compare(&t0, &t1);
}
return 0;
}
With the following data (generated from integers in the range 18456..18855 divided by 100 and randomly perturbed by a small amount (about 0.3%, to keep the values close enough together), I got the same data over, and over again, for the full 10 cycles of compression and decompression.
184.60 184.80 184.25 184.62 184.49 184.94 184.95 184.39 184.50 184.96
184.54 184.72 184.84 185.02 184.83 185.01 184.43 185.00 184.74 184.88
185.04 184.79 184.55 184.94 185.07 184.60 184.55 184.57 184.95 185.07
184.61 184.57 184.57 184.98 185.24 185.11 184.89 184.72 184.77 185.29
184.98 184.91 184.76 184.89 185.26 184.94 185.09 184.68 184.69 185.04
185.39 185.05 185.41 185.41 184.74 184.77 185.16 184.84 185.31 184.90
185.18 185.15 185.03 185.41 185.18 185.25 185.01 185.31 185.36 185.29
185.62 185.48 185.40 185.15 185.29 185.19 185.32 185.60 185.39 185.22
185.66 185.48 185.53 185.59 185.27 185.69 185.29 185.70 185.77 185.40
185.41 185.23 185.84 185.30 185.70 185.18 185.68 185.43 185.45 185.71
185.60 185.82 185.92 185.40 185.85 185.65 185.92 185.80 185.60 185.57
185.64 185.39 185.48 185.36 185.69 185.76 185.45 185.72 185.47 186.04
185.81 185.80 185.94 185.64 186.09 185.95 186.03 185.55 185.65 185.75
186.03 186.02 186.24 186.19 185.62 186.13 185.98 185.84 185.83 186.19
186.17 185.80 186.15 186.10 186.32 186.25 186.09 186.20 186.06 185.80
186.02 186.40 186.26 186.15 186.35 185.90 185.98 186.19 186.15 185.84
186.34 186.20 186.41 185.93 185.97 186.46 185.92 186.19 186.15 186.32
186.06 186.25 186.47 186.56 186.47 186.33 186.55 185.98 186.36 186.35
186.65 186.60 186.52 186.13 186.39 186.55 186.50 186.45 186.29 186.24
186.81 186.61 186.80 186.60 186.75 186.83 186.86 186.35 186.34 186.53
186.60 186.69 186.32 186.23 186.39 186.71 186.65 186.37 186.37 186.54
186.81 186.84 186.78 186.50 186.47 186.44 186.36 186.59 186.87 186.70
186.90 186.47 186.50 186.74 186.80 186.86 186.72 186.63 186.78 186.52
187.22 186.71 186.56 186.90 186.95 186.67 186.79 186.99 186.85 187.03
187.04 186.89 187.19 187.33 187.09 186.92 187.35 187.29 187.04 187.00
186.79 187.32 186.94 187.07 186.92 187.06 187.39 187.20 187.35 186.78
187.47 187.54 187.33 187.07 187.39 186.97 187.48 187.10 187.52 187.55
187.06 187.24 187.28 186.92 187.60 187.05 186.95 187.26 187.08 187.35
187.24 187.66 187.57 187.75 187.15 187.08 187.55 187.30 187.17 187.17
187.13 187.14 187.40 187.71 187.64 187.32 187.42 187.19 187.40 187.66
187.93 187.27 187.44 187.35 187.34 187.54 187.70 187.62 187.99 187.97
187.51 187.36 187.82 187.75 187.56 187.53 187.38 187.91 187.63 187.51
187.39 187.54 187.69 187.84 188.16 187.61 188.03 188.06 187.53 187.51
187.93 188.04 187.77 187.69 188.03 187.81 188.04 187.82 188.14 187.96
188.05 187.63 188.35 187.65 188.00 188.27 188.20 188.21 187.81 188.04
187.87 187.96 188.18 187.98 188.46 187.89 187.77 188.18 187.83 188.03
188.48 188.09 187.82 187.90 188.40 188.32 188.33 188.29 188.58 188.53
187.88 188.32 188.57 188.14 188.02 188.25 188.62 188.43 188.19 188.54
188.20 188.06 188.31 188.19 188.48 188.44 188.69 188.63 188.34 188.76
188.32 188.82 188.45 188.34 188.44 188.25 188.39 188.83 188.49 188.18
Until I put the rounding in, the values would rapidly drift apart.
If you don't have round() — which was added to Standard C in the C99 standard — then you can use these lines in place of round():
int delta;
if (t1->nums[i] > lastValue)
delta = (int) (((t1->nums[i] - lastValue) * 100.0) + 0.5);
else
delta = (int) (((t1->nums[i] - lastValue) * 100.0) - 0.5);
This rounds correctly for positive and negative values. You could also factor that into a function; in C99, you could make it an inline function, but if that worked, you would have the round() function in the library, too. I used this code at first before switching to the round() function.
I am trying to implement a linear least squares fit onto 2 arrays of data: time vs amplitude. The only technique I know so far is to test all of the possible m and b points in (y = m*x+b) and then find out which combination fits my data best so that it has the least error. However, I think iterating so many combinations is sometimes useless because it tests out everything. Are there any techniques to speed up the process that I don't know about? Thanks.
Try this code. It fits y = mx + b to your (x,y) data.
The arguments to linreg are
linreg(int n, REAL x[], REAL y[], REAL* b, REAL* m, REAL* r)
n = number of data points
x,y = arrays of data
*b = output intercept
*m = output slope
*r = output correlation coefficient (can be NULL if you don't want it)
The return value is 0 on success, !=0 on failure.
Here's the code
#include "linreg.h"
#include <stdlib.h>
#include <math.h> /* math functions */
//#define REAL float
#define REAL double
inline static REAL sqr(REAL x) {
return x*x;
}
int linreg(int n, const REAL x[], const REAL y[], REAL* m, REAL* b, REAL* r){
REAL sumx = 0.0; /* sum of x */
REAL sumx2 = 0.0; /* sum of x**2 */
REAL sumxy = 0.0; /* sum of x * y */
REAL sumy = 0.0; /* sum of y */
REAL sumy2 = 0.0; /* sum of y**2 */
for (int i=0;i<n;i++){
sumx += x[i];
sumx2 += sqr(x[i]);
sumxy += x[i] * y[i];
sumy += y[i];
sumy2 += sqr(y[i]);
}
REAL denom = (n * sumx2 - sqr(sumx));
if (denom == 0) {
// singular matrix. can't solve the problem.
*m = 0;
*b = 0;
if (r) *r = 0;
return 1;
}
*m = (n * sumxy - sumx * sumy) / denom;
*b = (sumy * sumx2 - sumx * sumxy) / denom;
if (r!=NULL) {
*r = (sumxy - sumx * sumy / n) / /* compute correlation coeff */
sqrt((sumx2 - sqr(sumx)/n) *
(sumy2 - sqr(sumy)/n));
}
return 0;
}
Example
You can run this example online.
int main()
{
int n = 6;
REAL x[6]= {1, 2, 4, 5, 10, 20};
REAL y[6]= {4, 6, 12, 15, 34, 68};
REAL m,b,r;
linreg(n,x,y,&m,&b,&r);
printf("m=%g b=%g r=%g\n",m,b,r);
return 0;
}
Here is the output
m=3.43651 b=-0.888889 r=0.999192
Here is the Excel plot and linear fit (for verification).
All values agree exactly with the C code above (note C code returns r while Excel returns R**2).
There are efficient algorithms for least-squares fitting; see Wikipedia for details. There are also libraries that implement the algorithms for you, likely more efficiently than a naive implementation would do; the GNU Scientific Library is one example, but there are others under more lenient licenses as well.
From Numerical Recipes: The Art of Scientific Computing in (15.2) Fitting Data to a Straight Line:
Linear Regression:
Consider the problem of fitting a set of N data points (xi, yi) to a straight-line model:
Assume that the uncertainty: sigmai associated with each yi and that the xi’s (values of the dependent variable) are known exactly. To measure how well the model agrees with the data, we use the chi-square function, which in this case is:
The above equation is minimized to determine a and b. This is done by finding the derivative of the above equation with respect to a and b, equate them to zero and solve for a and b. Then we estimate the probable uncertainties in the estimates of a and b, since obviously the measurement errors in the data must introduce some uncertainty in the determination of those parameters. Additionally, we must estimate the goodness-of-fit of the data to the
model. Absent this estimate, we have not the slightest indication that the parameters a and b in the model have any meaning at all.
The below struct performs the mentioned calculations:
struct Fitab {
// Object for fitting a straight line y = a + b*x to a set of
// points (xi, yi), with or without available
// errors sigma i . Call one of the two constructors to calculate the fit.
// The answers are then available as the variables:
// a, b, siga, sigb, chi2, and either q or sigdat.
int ndata;
double a, b, siga, sigb, chi2, q, sigdat; // Answers.
vector<double> &x, &y, &sig;
// Constructor.
Fitab(vector<double> &xx, vector<double> &yy, vector<double> &ssig)
: ndata(xx.size()), x(xx), y(yy), sig(ssig), chi2(0.), q(1.), sigdat(0.)
{
// Given a set of data points x[0..ndata-1], y[0..ndata-1]
// with individual standard deviations sig[0..ndata-1],
// sets a,b and their respective probable uncertainties
// siga and sigb, the chi-square: chi2, and the goodness-of-fit
// probability: q
Gamma gam;
int i;
double ss=0., sx=0., sy=0., st2=0., t, wt, sxoss; b=0.0;
for (i=0;i < ndata; i++) { // Accumulate sums ...
wt = 1.0 / SQR(sig[i]); //...with weights
ss += wt;
sx += x[i]*wt;
sy += y[i]*wt;
}
sxoss = sx/ss;
for (i=0; i < ndata; i++) {
t = (x[i]-sxoss) / sig[i];
st2 += t*t;
b += t*y[i]/sig[i];
}
b /= st2; // Solve for a, b, sigma-a, and simga-b.
a = (sy-sx*b) / ss;
siga = sqrt((1.0+sx*sx/(ss*st2))/ss);
sigb = sqrt(1.0/st2); // Calculate chi2.
for (i=0;i<ndata;i++) chi2 += SQR((y[i]-a-b*x[i])/sig[i]);
if (ndata>2) q=gam.gammq(0.5*(ndata-2),0.5*chi2); // goodness of fit
}
// Constructor.
Fitab(vector<double> &xx, vector<double> &yy)
: ndata(xx.size()), x(xx), y(yy), sig(xx), chi2(0.), q(1.), sigdat(0.)
{
// As above, but without known errors (sig is not used).
// The uncertainties siga and sigb are estimated by assuming
// equal errors for all points, and that a straight line is
// a good fit. q is returned as 1.0, the normalization of chi2
// is to unit standard deviation on all points, and sigdat
// is set to the estimated error of each point.
int i;
double ss,sx=0.,sy=0.,st2=0.,t,sxoss;
b=0.0; // Accumulate sums ...
for (i=0; i < ndata; i++) {
sx += x[i]; // ...without weights.
sy += y[i];
}
ss = ndata;
sxoss = sx/ss;
for (i=0;i < ndata; i++) {
t = x[i]-sxoss;
st2 += t*t;
b += t*y[i];
}
b /= st2; // Solve for a, b, sigma-a, and sigma-b.
a = (sy-sx*b)/ss;
siga=sqrt((1.0+sx*sx/(ss*st2))/ss);
sigb=sqrt(1.0/st2); // Calculate chi2.
for (i=0;i<ndata;i++) chi2 += SQR(y[i]-a-b*x[i]);
if (ndata > 2) sigdat=sqrt(chi2/(ndata-2));
// For unweighted data evaluate typical
// sig using chi2, and adjust
// the standard deviations.
siga *= sigdat;
sigb *= sigdat;
}
};
where struct Gamma:
struct Gamma : Gauleg18 {
// Object for incomplete gamma function.
// Gauleg18 provides coefficients for Gauss-Legendre quadrature.
static const Int ASWITCH=100; When to switch to quadrature method.
static const double EPS; // See end of struct for initializations.
static const double FPMIN;
double gln;
double gammp(const double a, const double x) {
// Returns the incomplete gamma function P(a,x)
if (x < 0.0 || a <= 0.0) throw("bad args in gammp");
if (x == 0.0) return 0.0;
else if ((Int)a >= ASWITCH) return gammpapprox(a,x,1); // Quadrature.
else if (x < a+1.0) return gser(a,x); // Use the series representation.
else return 1.0-gcf(a,x); // Use the continued fraction representation.
}
double gammq(const double a, const double x) {
// Returns the incomplete gamma function Q(a,x) = 1 - P(a,x)
if (x < 0.0 || a <= 0.0) throw("bad args in gammq");
if (x == 0.0) return 1.0;
else if ((Int)a >= ASWITCH) return gammpapprox(a,x,0); // Quadrature.
else if (x < a+1.0) return 1.0-gser(a,x); // Use the series representation.
else return gcf(a,x); // Use the continued fraction representation.
}
double gser(const Doub a, const Doub x) {
// Returns the incomplete gamma function P(a,x) evaluated by its series representation.
// Also sets ln (gamma) as gln. User should not call directly.
double sum,del,ap;
gln=gammln(a);
ap=a;
del=sum=1.0/a;
for (;;) {
++ap;
del *= x/ap;
sum += del;
if (fabs(del) < fabs(sum)*EPS) {
return sum*exp(-x+a*log(x)-gln);
}
}
}
double gcf(const Doub a, const Doub x) {
// Returns the incomplete gamma function Q(a, x) evaluated
// by its continued fraction representation.
// Also sets ln (gamma) as gln. User should not call directly.
int i;
double an,b,c,d,del,h;
gln=gammln(a);
b=x+1.0-a; // Set up for evaluating continued fraction
// by modified Lentz’s method with with b0 = 0.
c=1.0/FPMIN;
d=1.0/b;
h=d;
for (i=1;;i++) {
// Iterate to convergence.
an = -i*(i-a);
b += 2.0;
d=an*d+b;
if (fabs(d) < FPMIN) d=FPMIN;
c=b+an/c;
if (fabs(c) < FPMIN) c=FPMIN;
d=1.0/d;
del=d*c;
h *= del;
if (fabs(del-1.0) <= EPS) break;
}
return exp(-x+a*log(x)-gln)*h; Put factors in front.
}
double gammpapprox(double a, double x, int psig) {
// Incomplete gamma by quadrature. Returns P(a,x) or Q(a, x),
// when psig is 1 or 0, respectively. User should not call directly.
int j;
double xu,t,sum,ans;
double a1 = a-1.0, lna1 = log(a1), sqrta1 = sqrt(a1);
gln = gammln(a);
// Set how far to integrate into the tail:
if (x > a1) xu = MAX(a1 + 11.5*sqrta1, x + 6.0*sqrta1);
else xu = MAX(0.,MIN(a1 - 7.5*sqrta1, x - 5.0*sqrta1));
sum = 0;
for (j=0;j<ngau;j++) { // Gauss-Legendre.
t = x + (xu-x)*y[j];
sum += w[j]*exp(-(t-a1)+a1*(log(t)-lna1));
}
ans = sum*(xu-x)*exp(a1*(lna1-1.)-gln);
return (psig?(ans>0.0? 1.0-ans:-ans):(ans>=0.0? ans:1.0+ans));
}
double invgammp(Doub p, Doub a);
// Inverse function on x of P(a,x) .
};
const Doub Gamma::EPS = numeric_limits<Doub>::epsilon();
const Doub Gamma::FPMIN = numeric_limits<Doub>::min()/EPS
and stuct Gauleg18:
struct Gauleg18 {
// Abscissas and weights for Gauss-Legendre quadrature.
static const Int ngau = 18;
static const Doub y[18];
static const Doub w[18];
};
const Doub Gauleg18::y[18] = {0.0021695375159141994,
0.011413521097787704,0.027972308950302116,0.051727015600492421,
0.082502225484340941, 0.12007019910960293,0.16415283300752470,
0.21442376986779355, 0.27051082840644336, 0.33199876341447887,
0.39843234186401943, 0.46931971407375483, 0.54413605556657973,
0.62232745288031077, 0.70331500465597174, 0.78649910768313447,
0.87126389619061517, 0.95698180152629142};
const Doub Gauleg18::w[18] = {0.0055657196642445571,
0.012915947284065419,0.020181515297735382,0.027298621498568734,
0.034213810770299537,0.040875750923643261,0.047235083490265582,
0.053244713977759692,0.058860144245324798,0.064039797355015485
0.068745323835736408,0.072941885005653087,0.076598410645870640,
0.079687828912071670,0.082187266704339706,0.084078218979661945,
0.085346685739338721,0.085983275670394821};
and, finally fuinction Gamma::invgamp():
double Gamma::invgammp(double p, double a) {
// Returns x such that P(a,x) = p for an argument p between 0 and 1.
int j;
double x,err,t,u,pp,lna1,afac,a1=a-1;
const double EPS=1.e-8; // Accuracy is the square of EPS.
gln=gammln(a);
if (a <= 0.) throw("a must be pos in invgammap");
if (p >= 1.) return MAX(100.,a + 100.*sqrt(a));
if (p <= 0.) return 0.0;
if (a > 1.) {
lna1=log(a1);
afac = exp(a1*(lna1-1.)-gln);
pp = (p < 0.5)? p : 1. - p;
t = sqrt(-2.*log(pp));
x = (2.30753+t*0.27061)/(1.+t*(0.99229+t*0.04481)) - t;
if (p < 0.5) x = -x;
x = MAX(1.e-3,a*pow(1.-1./(9.*a)-x/(3.*sqrt(a)),3));
} else {
t = 1.0 - a*(0.253+a*0.12); and (6.2.9).
if (p < t) x = pow(p/t,1./a);
else x = 1.-log(1.-(p-t)/(1.-t));
}
for (j=0;j<12;j++) {
if (x <= 0.0) return 0.0; // x too small to compute accurately.
err = gammp(a,x) - p;
if (a > 1.) t = afac*exp(-(x-a1)+a1*(log(x)-lna1));
else t = exp(-x+a1*log(x)-gln);
u = err/t;
// Halley’s method.
x -= (t = u/(1.-0.5*MIN(1.,u*((a-1.)/x - 1))));
// Halve old value if x tries to go negative.
if (x <= 0.) x = 0.5*(x + t);
if (fabs(t) < EPS*x ) break;
}
return x;
}
Here is my version of a C/C++ function that does simple linear regression. The calculations follow the wikipedia article on simple linear regression. This is published as a single-header public-domain (MIT) library on github: simple_linear_regression. The library (.h file) is tested to work on Linux and Windows, and from C and C++ using -Wall -Werror and all -std versions supported by clang/gcc.
#define SIMPLE_LINEAR_REGRESSION_ERROR_INPUT_VALUE -2
#define SIMPLE_LINEAR_REGRESSION_ERROR_NUMERIC -3
int simple_linear_regression(const double * x, const double * y, const int n, double * slope_out, double * intercept_out, double * r2_out) {
double sum_x = 0.0;
double sum_xx = 0.0;
double sum_xy = 0.0;
double sum_y = 0.0;
double sum_yy = 0.0;
double n_real = (double)(n);
int i = 0;
double slope = 0.0;
double denominator = 0.0;
if (x == NULL || y == NULL || n < 2) {
return SIMPLE_LINEAR_REGRESSION_ERROR_INPUT_VALUE;
}
for (i = 0; i < n; ++i) {
sum_x += x[i];
sum_xx += x[i] * x[i];
sum_xy += x[i] * y[i];
sum_y += y[i];
sum_yy += y[i] * y[i];
}
denominator = n_real * sum_xx - sum_x * sum_x;
if (denominator == 0.0) {
return SIMPLE_LINEAR_REGRESSION_ERROR_NUMERIC;
}
slope = (n_real * sum_xy - sum_x * sum_y) / denominator;
if (slope_out != NULL) {
*slope_out = slope;
}
if (intercept_out != NULL) {
*intercept_out = (sum_y - slope * sum_x) / n_real;
}
if (r2_out != NULL) {
denominator = ((n_real * sum_xx) - (sum_x * sum_x)) * ((n_real * sum_yy) - (sum_y * sum_y));
if (denominator == 0.0) {
return SIMPLE_LINEAR_REGRESSION_ERROR_NUMERIC;
}
*r2_out = ((n_real * sum_xy) - (sum_x * sum_y)) * ((n_real * sum_xy) - (sum_x * sum_y)) / denominator;
}
return 0;
}
Usage example:
#define SIMPLE_LINEAR_REGRESSION_IMPLEMENTATION
#include "simple_linear_regression.h"
#include <stdio.h>
/* Some data that we want to find the slope, intercept and r2 for */
static const double x[] = { 1.47, 1.50, 1.52, 1.55, 1.57, 1.60, 1.63, 1.65, 1.68, 1.70, 1.73, 1.75, 1.78, 1.80, 1.83 };
static const double y[] = { 52.21, 53.12, 54.48, 55.84, 57.20, 58.57, 59.93, 61.29, 63.11, 64.47, 66.28, 68.10, 69.92, 72.19, 74.46 };
int main() {
double slope = 0.0;
double intercept = 0.0;
double r2 = 0.0;
int res = 0;
res = simple_linear_regression(x, y, sizeof(x) / sizeof(x[0]), &slope, &intercept, &r2);
if (res < 0) {
printf("Error: %s\n", simple_linear_regression_error_string(res));
return res;
}
printf("slope: %f\n", slope);
printf("intercept: %f\n", intercept);
printf("r2: %f\n", r2);
return 0;
}
The original example above worked well for me with slope and offset but I had a hard time with the corr coef. Maybe I don't have my parenthesis working the same as the assumed precedence? Anyway, with some help of other web pages I finally got values that match the linear trend-line in Excel. Thought I would share my code using Mark Lakata's variable names. Hope this helps.
double slope = ((n * sumxy) - (sumx * sumy )) / denom;
double intercept = ((sumy * sumx2) - (sumx * sumxy)) / denom;
double term1 = ((n * sumxy) - (sumx * sumy));
double term2 = ((n * sumx2) - (sumx * sumx));
double term3 = ((n * sumy2) - (sumy * sumy));
double term23 = (term2 * term3);
double r2 = 1.0;
if (fabs(term23) > MIN_DOUBLE) // Define MIN_DOUBLE somewhere as 1e-9 or similar
r2 = (term1 * term1) / term23;
as an assignment I had to code in C a simple linear regression using RMSE loss function. The program is dynamic and you can enter your own values and choose your own loss function which is for now limited to Root Mean Square Error. But first here are the algorithms I used:
now the code... you need gnuplot to display the chart, sudo apt install gnuplot
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <sys/types.h>
#define BUFFSIZE 64
#define MAXSIZE 100
static double vector_x[MAXSIZE] = {0};
static double vector_y[MAXSIZE] = {0};
static double vector_predict[MAXSIZE] = {0};
static double max_x;
static double max_y;
static double mean_x;
static double mean_y;
static double teta_0_intercept;
static double teta_1_grad;
static double RMSE;
static double r_square;
static double prediction;
static char intercept[BUFFSIZE];
static char grad[BUFFSIZE];
static char xrange[BUFFSIZE];
static char yrange[BUFFSIZE];
static char lossname_RMSE[BUFFSIZE] = "Simple Linear Regression using RMSE'";
static char cmd_gnu_0[BUFFSIZE] = "set title '";
static char cmd_gnu_1[BUFFSIZE] = "intercept = ";
static char cmd_gnu_2[BUFFSIZE] = "grad = ";
static char cmd_gnu_3[BUFFSIZE] = "set xrange [0:";
static char cmd_gnu_4[BUFFSIZE] = "set yrange [0:";
static char cmd_gnu_5[BUFFSIZE] = "f(x) = (grad * x) + intercept";
static char cmd_gnu_6[BUFFSIZE] = "plot f(x), 'data.temp' with points pointtype 7";
static char const *commands_gnuplot[] = {
cmd_gnu_0,
cmd_gnu_1,
cmd_gnu_2,
cmd_gnu_3,
cmd_gnu_4,
cmd_gnu_5,
cmd_gnu_6,
};
static size_t size;
static void user_input()
{
printf("Enter x,y vector size, MAX = 100\n");
scanf("%lu", &size);
if (size > MAXSIZE) {
printf("Wrong input size is too big\n");
user_input();
}
printf("vector's size is %lu\n", size);
size_t i;
for (i = 0; i < size; i++) {
printf("Enter vector_x[%ld] values\n", i);
scanf("%lf", &vector_x[i]);
}
for (i = 0; i < size; i++) {
printf("Enter vector_y[%ld] values\n", i);
scanf("%lf", &vector_y[i]);
}
}
static void display_vector()
{
size_t i;
for (i = 0; i < size; i++){
printf("vector_x[%lu] = %lf\t", i, vector_x[i]);
printf("vector_y[%lu] = %lf\n", i, vector_y[i]);
}
}
static void concatenate(char p[], char q[]) {
int c;
int d;
c = 0;
while (p[c] != '\0') {
c++;
}
d = 0;
while (q[d] != '\0') {
p[c] = q[d];
d++;
c++;
}
p[c] = '\0';
}
static void compute_mean_x_y()
{
size_t i;
double tmp_x = 0.0;
double tmp_y = 0.0;
for (i = 0; i < size; i++) {
tmp_x += vector_x[i];
tmp_y += vector_y[i];
}
mean_x = tmp_x / size;
mean_y = tmp_y / size;
printf("mean_x = %lf\n", mean_x);
printf("mean_y = %lf\n", mean_y);
}
static void compute_teta_1_grad()
{
double numerator = 0.0;
double denominator = 0.0;
double tmp1 = 0.0;
double tmp2 = 0.0;
size_t i;
for (i = 0; i < size; i++) {
numerator += (vector_x[i] - mean_x) * (vector_y[i] - mean_y);
}
for (i = 0; i < size; i++) {
tmp1 = vector_x[i] - mean_x;
tmp2 = tmp1 * tmp1;
denominator += tmp2;
}
teta_1_grad = numerator / denominator;
printf("teta_1_grad = %lf\n", teta_1_grad);
}
static void compute_teta_0_intercept()
{
teta_0_intercept = mean_y - (teta_1_grad * mean_x);
printf("teta_0_intercept = %lf\n", teta_0_intercept);
}
static void compute_prediction()
{
size_t i;
for (i = 0; i < size; i++) {
vector_predict[i] = teta_0_intercept + (teta_1_grad * vector_x[i]);
printf("y^[%ld] = %lf\n", i, vector_predict[i]);
}
printf("\n");
}
static void compute_RMSE()
{
compute_prediction();
double error = 0;
size_t i;
for (i = 0; i < size; i++) {
error = (vector_predict[i] - vector_y[i]) * (vector_predict[i] - vector_y[i]);
printf("error y^[%ld] = %lf\n", i, error);
RMSE += error;
}
/* mean */
RMSE = RMSE / size;
/* square root mean */
RMSE = sqrt(RMSE);
printf("\nRMSE = %lf\n", RMSE);
}
static void compute_loss_function()
{
int input = 0;
printf("Which loss function do you want to use?\n");
printf(" 1 - RMSE\n");
scanf("%d", &input);
switch(input) {
case 1:
concatenate(cmd_gnu_0, lossname_RMSE);
compute_RMSE();
printf("\n");
break;
default:
printf("Wrong input try again\n");
compute_loss_function(size);
}
}
static void compute_r_square(size_t size)
{
double num_err = 0.0;
double den_err = 0.0;
size_t i;
for (i = 0; i < size; i++) {
num_err += (vector_y[i] - vector_predict[i]) * (vector_y[i] - vector_predict[i]);
den_err += (vector_y[i] - mean_y) * (vector_y[i] - mean_y);
}
r_square = 1 - (num_err/den_err);
printf("R_square = %lf\n", r_square);
}
static void compute_predict_for_x()
{
double x = 0.0;
printf("Please enter x value\n");
scanf("%lf", &x);
prediction = teta_0_intercept + (teta_1_grad * x);
printf("y^ if x = %lf -> %lf\n",x, prediction);
}
static void compute_max_x_y()
{
size_t i;
double tmp1= 0.0;
double tmp2= 0.0;
for (i = 0; i < size; i++) {
if (vector_x[i] > tmp1) {
tmp1 = vector_x[i];
max_x = vector_x[i];
}
if (vector_y[i] > tmp2) {
tmp2 = vector_y[i];
max_y = vector_y[i];
}
}
printf("vector_x max value %lf\n", max_x);
printf("vector_y max value %lf\n", max_y);
}
static void display_model_line()
{
sprintf(intercept, "%0.7lf", teta_0_intercept);
sprintf(grad, "%0.7lf", teta_1_grad);
sprintf(xrange, "%0.7lf", max_x + 1);
sprintf(yrange, "%0.7lf", max_y + 1);
concatenate(cmd_gnu_1, intercept);
concatenate(cmd_gnu_2, grad);
concatenate(cmd_gnu_3, xrange);
concatenate(cmd_gnu_3, "]");
concatenate(cmd_gnu_4, yrange);
concatenate(cmd_gnu_4, "]");
printf("grad = %s\n", grad);
printf("intercept = %s\n", intercept);
printf("xrange = %s\n", xrange);
printf("yrange = %s\n", yrange);
printf("cmd_gnu_0: %s\n", cmd_gnu_0);
printf("cmd_gnu_1: %s\n", cmd_gnu_1);
printf("cmd_gnu_2: %s\n", cmd_gnu_2);
printf("cmd_gnu_3: %s\n", cmd_gnu_3);
printf("cmd_gnu_4: %s\n", cmd_gnu_4);
printf("cmd_gnu_5: %s\n", cmd_gnu_5);
printf("cmd_gnu_6: %s\n", cmd_gnu_6);
/* print plot */
FILE *gnuplot_pipe = (FILE*)popen("gnuplot -persistent", "w");
FILE *temp = (FILE*)fopen("data.temp", "w");
/* create data.temp */
size_t i;
for (i = 0; i < size; i++)
{
fprintf(temp, "%f %f \n", vector_x[i], vector_y[i]);
}
/* display gnuplot */
for (i = 0; i < 7; i++)
{
fprintf(gnuplot_pipe, "%s \n", commands_gnuplot[i]);
}
}
int main(void)
{
printf("===========================================\n");
printf("INPUT DATA\n");
printf("===========================================\n");
user_input();
display_vector();
printf("\n");
printf("===========================================\n");
printf("COMPUTE MEAN X:Y, TETA_1 TETA_0\n");
printf("===========================================\n");
compute_mean_x_y();
compute_max_x_y();
compute_teta_1_grad();
compute_teta_0_intercept();
printf("\n");
printf("===========================================\n");
printf("COMPUTE LOSS FUNCTION\n");
printf("===========================================\n");
compute_loss_function();
printf("===========================================\n");
printf("COMPUTE R_square\n");
printf("===========================================\n");
compute_r_square(size);
printf("\n");
printf("===========================================\n");
printf("COMPUTE y^ according to x\n");
printf("===========================================\n");
compute_predict_for_x();
printf("\n");
printf("===========================================\n");
printf("DISPLAY LINEAR REGRESSION\n");
printf("===========================================\n");
display_model_line();
printf("\n");
return 0;
}
Look at Section 1 of this paper. This section expresses a 2D linear regression as a matrix multiplication exercise. As long as your data is well-behaved, this technique should permit you to develop a quick least squares fit.
Depending on the size of your data, it might be worthwhile to algebraically reduce the matrix multiplication to simple set of equations, thereby avoiding the need to write a matmult() function. (Be forewarned, this is completely impractical for more than 4 or 5 data points!)
The fastest, most efficient way to solve least squares, as far as I am aware, is to subtract (the gradient)/(the 2nd order gradient) from your parameter vector. (2nd order gradient = i.e. the diagonal of the Hessian.)
Here is the intuition:
Let's say you want to optimize least squares over a single parameter. This is equivalent to finding the vertex of a parabola. Then, for any random initial parameter, x0, the vertex of the loss function is located at x0 - f(1) / f(2). That's because adding - f(1) / f(2) to x will always zero out the derivative, f(1).
Side note: Implementing this in Tensorflow, the solution appeared at w0 - f(1) / f(2) / (number of weights), but I'm not sure if that's due to Tensorflow or if it's due to something else..