OpenMP gives (core dumped) | Laplace method in OpenMP - c

I am trying to write code for parallel calculation of the determinant of the matrix using the Laplace method in OpenMP, but I ran into an error: "Segmentation fault (core dumped)". Where did I make a mistake?
This is the code...
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
long double **nowaMacierz(int stopien);
void usunMacierz(long double **macierz);
long double metodaLaplace(long double **macierz, int stopien, int count);
int main() {
long double **macierz;
int stopien;
int count = 1;
int nr_wiersza, nr_kolumny;
printf("Proszę podać stopień macierzy n=");
fscanf(stdin, "%d", &stopien);
macierz = nowaMacierz(stopien);
for (nr_wiersza = 0; nr_wiersza < stopien; nr_wiersza++)
for (nr_kolumny = 0; nr_kolumny < stopien; nr_kolumny++) {
printf("A[%d,%d]=", nr_wiersza, nr_kolumny);
fscanf(stdin, "%Lf", &macierz[nr_wiersza][nr_kolumny]);
}
printf("det(A) = %Lf\n", metodaLaplace(macierz, stopien, count));
usunMacierz(macierz);
getchar();
return 0;
}
long double **nowaMacierz(int stopien) {
long double **macierz;
int nr_wiersza;
macierz = (long double **) calloc(stopien, sizeof(long double *));
*macierz = (long double *) calloc(stopien * stopien, sizeof(long double));
// Przydzielone adresy komórek pamięci są segragowane, tak by utworzyły tablicę dwuwymiarową
for (nr_wiersza = 1; nr_wiersza < stopien; nr_wiersza++)
*(macierz + nr_wiersza) = *(macierz + nr_wiersza - 1) + stopien;
return macierz;
}
void usunMacierz(long double **macierz) {
free(*macierz), free(macierz);
return;
}
long double metodaLaplace(long double **macierz, int stopien, int count) {
long double **dopelnienie;
int nr_wiersza, nr_kolumny;
int nr_kolumny_dop, nr_kolumny_mac;
long double det = 0.00;
printf("test");
if (count == 0) {
printf("sekwenc");
if (stopien <= 2)
return macierz[0][0] * (stopien > 1 ? macierz[1][1] : 1) -
(stopien > 1 ? macierz[0][1] * macierz[1][0] : 0);
dopelnienie = nowaMacierz(stopien - 1);
for (nr_kolumny = 0; nr_kolumny < stopien; nr_kolumny++) {
for (nr_kolumny_dop = 0, nr_kolumny_mac = 0;
nr_kolumny_dop < stopien - 1; nr_kolumny_dop++, nr_kolumny_mac++) {
nr_kolumny_mac += (nr_kolumny_mac == nr_kolumny ? 1
: 0);
for (nr_wiersza = 0; nr_wiersza < stopien - 1; nr_wiersza++)
dopelnienie[nr_wiersza][nr_kolumny_dop] = macierz[nr_wiersza + 1][nr_kolumny_mac];
}
// det = ?(aij * (-1)^i+j * Aij)
det += (macierz[0][nr_kolumny] * (long double) pow(-1.0, 1.0 + nr_kolumny + 1.0) *
metodaLaplace(dopelnienie, stopien - 1, count - 1));
}
} else {
printf("parallel %d", omp_get_num_threads());
// Wyznacznik stopnia co najwyżej 2. zostanie obliczony z definicji
if (stopien <= 2)
return macierz[0][0] * (stopien > 1 ? macierz[1][1] : 1) -
(stopien > 1 ? macierz[0][1] * macierz[1][0] : 0);
#pragma omp parallel private(nr_wiersza, nr_kolumny, nr_kolumny_dop, nr_kolumny_mac, dopelnienie,stopien, macierz)
dopelnienie = nowaMacierz(stopien - 1);
#pragma omp parallel for
for (nr_kolumny = 0; nr_kolumny < stopien; nr_kolumny++) {
for (nr_kolumny_dop = 0, nr_kolumny_mac = 0;
nr_kolumny_dop < stopien - 1; nr_kolumny_dop++, nr_kolumny_mac++) {
nr_kolumny_mac += (nr_kolumny_mac == nr_kolumny ? 1
: 0);
for (nr_wiersza = 0; nr_wiersza < stopien - 1; nr_wiersza++)
dopelnienie[nr_wiersza][nr_kolumny_dop] = macierz[nr_wiersza + 1][nr_kolumny_mac];
}
// det = ?(aij * (-1)^i+j * Aij)
det += (macierz[0][nr_kolumny] * (long double) pow(-1.0, 1.0 + nr_kolumny + 1.0) *
metodaLaplace(dopelnienie, stopien - 1, count - 1));
}
}
usunMacierz(dopelnienie);
return det;
}
The problem appeared when using a variable used to assign values from recursion, in "#pragma omp parallel private".
Thank you for your help!

Related

Implementing IFFT

Trying to implement IFFT. If i set the input to this:
{0, 1, 0, 0, 0, 0, 0, 1}
it should transform it to a cosine with the lowest wave number. Instead i get this weird pattern:
bin 0: 2.000000 I 0.000000
bin 1: 0.000000 I 1.414214
bin 2: 0.000000 I 0.000000
bin 3: 0.000000 I 1.414214
bin 4: -2.000000 I 0.000000
bin 5: 0.000000 I -1.414214
bin 6: 0.000000 I 0.000000
bin 7: -0.000000 I -1.414214
I just took the FFT code and conjugated twiddle factors. Im told this should work fine. (besides the scaling factor) I don't understand what is wrong.
main.c
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "utils.h"
COMPLEX_FLOAT * coefficients;
void calculate_coefficients(int N)
{
COMPLEX_FLOAT * co = (COMPLEX_FLOAT *)malloc(N/2 * sizeof(COMPLEX_FLOAT));
coefficients = co;
for(int i = 0; i < N/2; i++)
{
co[i] = cf_exp(-2.0f*PI*((float)i)/((float)N));
}
}
COMPLEX_FLOAT* ifftr(COMPLEX_FLOAT* input, int N, int multiplier, int offset)
{
COMPLEX_FLOAT *output;
COMPLEX_FLOAT *E, *O;
output = (COMPLEX_FLOAT *)malloc(N * sizeof(COMPLEX_FLOAT));
if(N == 1)
{
output[0] = input[offset];
}
else
{
E = fftr(input, N/2, multiplier*2, offset);
O = fftr(input, N/2, multiplier*2, multiplier + offset);
for(int i = 0; i < N/2; i++)
{
int index = i * multiplier;
COMPLEX_FLOAT tmp = cmul(conjugate(coefficients[index]),O[i]);
output[i] = cadd(E[i], tmp);
output[i + N/2] = csub(E[i], tmp);
}
free(E);
free(O);
}
return output;
}
void ifft(COMPLEX_FLOAT* input, COMPLEX_FLOAT* output, int N)
{
COMPLEX_FLOAT * out;
calculate_coefficients(N);
out = ifftr(input,N,1,0);
for(int i = 0; i < N; i++)
{
output[i] = out[i];
}
free(out);
free(coefficients);
return;
}
int main()
{
int size = 8;
COMPLEX_FLOAT dummy[size];
COMPLEX_FLOAT frq[size];
for(int i = 0; i < size; i++)
{
frq[i].imag = 0;
if(i == 2)
{
frq[i].real = 1;
}
else if(size - i == 1)
{
frq[i].real = 1;
}
else
{
frq[i].real = 0;
}
}
ifft(frq, dummy, size);
for(int i = 0; i < size; i++)
{
printf("bin %d: %f\t I %f\n", i, dummy[i].real, dummy[i].imag);
}
return 0;
}
utils.c
#include "utils.h"
#include "math.h"
COMPLEX_FLOAT cadd(COMPLEX_FLOAT a, COMPLEX_FLOAT b)
{
COMPLEX_FLOAT out;
out.real = a.real + b.real;
out.imag = a.imag + b.imag;
return out;
}
COMPLEX_FLOAT csub(COMPLEX_FLOAT a, COMPLEX_FLOAT b)
{
COMPLEX_FLOAT out;
out.real = a.real - b.real;
out.imag = a.imag - b.imag;
return out;
}
COMPLEX_FLOAT cmul(COMPLEX_FLOAT a, COMPLEX_FLOAT b)
{
COMPLEX_FLOAT out;
out.real = a.real * b.real - a.imag * b.imag;
out.imag = a.real * b.imag + b.real * a.imag;
return out;
}
COMPLEX_FLOAT cf_exp(float a)
{
COMPLEX_FLOAT out;
out.real = (float)cos(a);
out.imag = (float)sin(a);
return out;
}
COMPLEX_FLOAT conjugate(COMPLEX_FLOAT a)
{
COMPLEX_FLOAT out;
out.real = a.real;
out.imag = -a.imag;
return out;
}
COMPLEX_FLOAT real_num(float n)
{
COMPLEX_FLOAT out;
out.real = n;
out.imag = 0;
return out;
}
I was calling the wrong recursive function fftr instead of ifftr
COMPLEX_FLOAT* ifftr(COMPLEX_FLOAT* input, int N, int multiplier, int offset)
{
COMPLEX_FLOAT *output;
COMPLEX_FLOAT *E, *O;
output = (COMPLEX_FLOAT *)malloc(N * sizeof(COMPLEX_FLOAT));
if(N == 1)
{
output[0] = input[offset];
}
else
{
E = ifftr(input, N/2, multiplier*2, offset);
O = ifftr(input, N/2, multiplier*2, multiplier + offset);
for(int i = 0; i < N/2; i++)
{
int index = i * multiplier;
COMPLEX_FLOAT tmp = cmul(conjugate(coefficients[index]),O[i]);
output[i] = cadd(E[i], tmp);
output[i + N/2] = csub(E[i], tmp);
}
free(E);
free(O);
}
return output;
}

Need help finding the mistake in the C programme

I am trying to calculate value of sin(x) in C but I am getting black screen in code:block after execution, its taking long for compilation and execution.
#include<stdio.h>
float mult(float x, int m, int i) {
float a = x;
if (i == m) {
return x;
} else {
i++;
a = a * mult(x, m, i);
return a;
}
}
int fact(int m) {
printf("%d! ", m); fflush(stdout);
int b;
if (m == 1) {
return 1;
} else {
b = m * fact(m - 1);
return b;
}
}
float term(float x, int m) {
float a = 0, b = 0, c = 0;
int i = 0;
a = mult(x, m, i);
b = fact(m);
c = a / (1.0 * b);
return c;
}
float sinof(float x, int m, int n) {
float b = 0;
if (m >= 10) {
return (0);
} else {
printf("......%d ", m); fflush(stdout);
b = term(x, m);
m = m + 2;
n = -n;
b = b + (n * sinof(x, m, n));
return b;
}
}
int main() {
float x = 0, sin = 0;
int m = 1, n = 1;
printf("Enter the angle in radians:");
scanf("%f", &x);
sin = sinof(x, m, n);
printf("%f", sin);
}
I hope the logic is correct.
m is odd. Below fails to stop recursion.
if(m==10){ // Never true
return(0);
} else{
b=term(x,m);
m=m+2;n=-n; // ***********
b=b+(n*sinof(x,m,n));
return b;
}
I recommend OP get own code working first. There are various other issues.
For a simplified recursive sine(), mouse over to see.
static double my_sin_helper(double xx, double term, unsigned n) {
if (term + 1.0 == 1.0) {
return term;
}
return term - my_sin_helper(xx, xx *term / ((n + 1) * (n + 2)), n + 2);
}
// valid for [-pi/2 + pi/2]
double my_sin_primary(double x) {
return x * my_sin_helper(x * x, 1.0, 1);
}

C -error: expected ';', ',' or ')' before '=' token

I need help. It's displaying "error: expected ';', ',' or ')' before '=' token"
at line 5 (at the getRandom method) when i try to build and then run it. I've gone through it several times over and over again and i can't seem to figure out what the problem could be
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
double getRandom (double min = -1, double max = 1) {
return min + (rand() * (max - min) / RAND_MAX);
}
int totalPoints = 0;
int pointsInCircle = 0;
void *countPoints (void *X) {
for (int i = 0; i < totalPoints; i++) {
double X = getRandom();
double Y = getRandom();
if (X*X + Y*Y <= 1)pointsInCircle++;
}
return NULL;
}
int main()
{
srand(time(NULL));
pthread_t thread;
printf ("Enter total points for experiment : ");
scanf ("%d" , totalPoints);
pthread_create(&thread, NULL, &countPoints, NULL);
pthread_join(thread, NULL);
double PI = (4.0 * pointsInCircle) / totalPoints;
printf ("Approximate value for PI for total points %d is: %d " , totalPoints, PI);
return 0;
}
double getRandom (double min = -1, double max = 1) {
return min + (rand() * (max - min) / RAND_MAX);
}
This is not C.
double getRandom (double min, double max) {
return min + (rand() * (max - min) / RAND_MAX);
}
When you call it with no actual arguments in your code
double X = getRandom();
double Y = getRandom();
you should replace these calls with
double X = getRandom(-1, 1);
double Y = getRandom(-1, 1);

segment fault on programming C

I am tyring to make velocity Verlet method, by using C language.
I thought I made it good. However, there pops up 'Segmentation fault(core dumped)' whenever, I increase the size of the vector or array, x and y.
For the size n equal and less than 1e3, it's fine, but at the point of n = 1e4, the program gets error.
Please anybody help me on this.
Thank you.
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
double verlet(double t, double x)
{
double E = 0.252;
double B = 0.052;
double a = M_PI/2;
return -sin(x) + E*cos(t) + B*cos(2*t+a);
}
double pverlet(double(*f)(double, double), double dt, double t, double x, double y)
{
return x + dt*( y + (dt/2)*f(t, x));
}
double vverlet(double(*g)(double, double), double dt, double t, double x, double y)
{
return y + (dt/2) * g(t, x);
}
int main(void)
{
int i;
double t;
int n = 1e4;
double ti = 0, tf = 1e5, dt = (tf-ti)/n;
double *x = (double *) malloc(sizeof(double)*n);
double *y = (double *) malloc(sizeof(double)*2*n);
if (x == NULL)
{
printf("error allocating memory!\n");
return 1;
}
if (y == NULL)
{
printf("error allocating memory!\n");
return 1;
}
for (y[0] = 0, i = 1; i <2*n; i++)
{
y[i] = vverlet(verlet, dt, ti + dt*(i-1), x[i-1], y[i-1]);
}
for (x[0] = 0, i = 1; i < n; i++)
{
x[i] = pverlet(verlet, dt, ti + dt*(i-1), x[i-1], y[2*(i-1)]);
}
for (i = 0; i < n; i++)
{
t = ti + dt * i;
printf("%e %e %e\n", t, x[i], y[2*i]);
}
return 0;
free(x);
free(y);
}
for (y[0] = 0, i = 1; i <2*n; i++)
{
y[i] = vverlet(verlet, dt, ti + dt*(i-1), x[i-1], y[i-1]);
}
x is defined from 0 to n-1.

Perceptron learning algorithm not converging to 0

Here is my perceptron implementation in ANSI C:
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
float randomFloat()
{
srand(time(NULL));
float r = (float)rand() / (float)RAND_MAX;
return r;
}
int calculateOutput(float weights[], float x, float y)
{
float sum = x * weights[0] + y * weights[1];
return (sum >= 0) ? 1 : -1;
}
int main(int argc, char *argv[])
{
// X, Y coordinates of the training set.
float x[208], y[208];
// Training set outputs.
int outputs[208];
int i = 0; // iterator
FILE *fp;
if ((fp = fopen("test1.txt", "r")) == NULL)
{
printf("Cannot open file.\n");
}
else
{
while (fscanf(fp, "%f %f %d", &x[i], &y[i], &outputs[i]) != EOF)
{
if (outputs[i] == 0)
{
outputs[i] = -1;
}
printf("%f %f %d\n", x[i], y[i], outputs[i]);
i++;
}
}
system("PAUSE");
int patternCount = sizeof(x) / sizeof(int);
float weights[2];
weights[0] = randomFloat();
weights[1] = randomFloat();
float learningRate = 0.1;
int iteration = 0;
float globalError;
do {
globalError = 0;
int p = 0; // iterator
for (p = 0; p < patternCount; p++)
{
// Calculate output.
int output = calculateOutput(weights, x[p], y[p]);
// Calculate error.
float localError = outputs[p] - output;
if (localError != 0)
{
// Update weights.
for (i = 0; i < 2; i++)
{
float add = learningRate * localError;
if (i == 0)
{
add *= x[p];
}
else if (i == 1)
{
add *= y[p];
}
weights[i] += add;
}
}
// Convert error to absolute value.
globalError += fabs(localError);
printf("Iteration %d Error %.2f %.2f\n", iteration, globalError, localError);
iteration++;
}
system("PAUSE");
} while (globalError != 0);
system("PAUSE");
return 0;
}
The training set I'm using: Data Set
I have removed all irrelevant code. Basically what it does now it reads test1.txt file and loads values from it to three arrays: x, y, outputs.
Then there is a perceptron learning algorithm which, for some reason, is not converging to 0 (globalError should converge to 0) and therefore I get an infinite do while loop.
When I use a smaller training set (like 5 points), it works pretty well. Any ideas where could be the problem?
I wrote this algorithm very similar to this C# Perceptron algorithm:
EDIT:
Here is an example with a smaller training set:
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
float randomFloat()
{
float r = (float)rand() / (float)RAND_MAX;
return r;
}
int calculateOutput(float weights[], float x, float y)
{
float sum = x * weights[0] + y * weights[1];
return (sum >= 0) ? 1 : -1;
}
int main(int argc, char *argv[])
{
srand(time(NULL));
// X coordinates of the training set.
float x[] = { -3.2, 1.1, 2.7, -1 };
// Y coordinates of the training set.
float y[] = { 1.5, 3.3, 5.12, 2.1 };
// The training set outputs.
int outputs[] = { 1, -1, -1, 1 };
int i = 0; // iterator
FILE *fp;
system("PAUSE");
int patternCount = sizeof(x) / sizeof(int);
float weights[2];
weights[0] = randomFloat();
weights[1] = randomFloat();
float learningRate = 0.1;
int iteration = 0;
float globalError;
do {
globalError = 0;
int p = 0; // iterator
for (p = 0; p < patternCount; p++)
{
// Calculate output.
int output = calculateOutput(weights, x[p], y[p]);
// Calculate error.
float localError = outputs[p] - output;
if (localError != 0)
{
// Update weights.
for (i = 0; i < 2; i++)
{
float add = learningRate * localError;
if (i == 0)
{
add *= x[p];
}
else if (i == 1)
{
add *= y[p];
}
weights[i] += add;
}
}
// Convert error to absolute value.
globalError += fabs(localError);
printf("Iteration %d Error %.2f\n", iteration, globalError);
}
iteration++;
} while (globalError != 0);
// Display network generalisation.
printf("X Y Output\n");
float j, k;
for (j = -1; j <= 1; j += .5)
{
for (j = -1; j <= 1; j += .5)
{
// Calculate output.
int output = calculateOutput(weights, j, k);
printf("%.2f %.2f %s\n", j, k, (output == 1) ? "Blue" : "Red");
}
}
// Display modified weights.
printf("Modified weights: %.2f %.2f\n", weights[0], weights[1]);
system("PAUSE");
return 0;
}
In your current code, the perceptron successfully learns the direction of the decision boundary BUT is unable to translate it.
y y
^ ^
| - + \\ + | - \\ + +
| - +\\ + + | - \\ + + +
| - - \\ + | - - \\ +
| - - + \\ + | - - \\ + +
---------------------> x --------------------> x
stuck like this need to get like this
(as someone pointed out, here is a more accurate version)
The problem lies in the fact that your perceptron has no bias term, i.e. a third weight component connected to an input of value 1.
w0 -----
x ---->| |
| f |----> output (+1/-1)
y ---->| |
w1 -----
^ w2
1(bias) ---|
The following is how I corrected the problem:
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define LEARNING_RATE 0.1
#define MAX_ITERATION 100
float randomFloat()
{
return (float)rand() / (float)RAND_MAX;
}
int calculateOutput(float weights[], float x, float y)
{
float sum = x * weights[0] + y * weights[1] + weights[2];
return (sum >= 0) ? 1 : -1;
}
int main(int argc, char *argv[])
{
srand(time(NULL));
float x[208], y[208], weights[3], localError, globalError;
int outputs[208], patternCount, i, p, iteration, output;
FILE *fp;
if ((fp = fopen("test1.txt", "r")) == NULL) {
printf("Cannot open file.\n");
exit(1);
}
i = 0;
while (fscanf(fp, "%f %f %d", &x[i], &y[i], &outputs[i]) != EOF) {
if (outputs[i] == 0) {
outputs[i] = -1;
}
i++;
}
patternCount = i;
weights[0] = randomFloat();
weights[1] = randomFloat();
weights[2] = randomFloat();
iteration = 0;
do {
iteration++;
globalError = 0;
for (p = 0; p < patternCount; p++) {
output = calculateOutput(weights, x[p], y[p]);
localError = outputs[p] - output;
weights[0] += LEARNING_RATE * localError * x[p];
weights[1] += LEARNING_RATE * localError * y[p];
weights[2] += LEARNING_RATE * localError;
globalError += (localError*localError);
}
/* Root Mean Squared Error */
printf("Iteration %d : RMSE = %.4f\n",
iteration, sqrt(globalError/patternCount));
} while (globalError > 0 && iteration <= MAX_ITERATION);
printf("\nDecision boundary (line) equation: %.2f*x + %.2f*y + %.2f = 0\n",
weights[0], weights[1], weights[2]);
return 0;
}
... with the following output:
Iteration 1 : RMSE = 0.7206
Iteration 2 : RMSE = 0.5189
Iteration 3 : RMSE = 0.4804
Iteration 4 : RMSE = 0.4804
Iteration 5 : RMSE = 0.3101
Iteration 6 : RMSE = 0.4160
Iteration 7 : RMSE = 0.4599
Iteration 8 : RMSE = 0.3922
Iteration 9 : RMSE = 0.0000
Decision boundary (line) equation: -2.37*x + -2.51*y + -7.55 = 0
And here's a short animation of the code above using MATLAB, showing the decision boundary at each iteration:
It might help if you put the seeding of the random generator at the start of your main instead of reseeding on every call to randomFloat, i.e.
float randomFloat()
{
float r = (float)rand() / (float)RAND_MAX;
return r;
}
// ...
int main(int argc, char *argv[])
{
srand(time(NULL));
// X, Y coordinates of the training set.
float x[208], y[208];
Some small errors I spotted in your source code:
int patternCount = sizeof(x) / sizeof(int);
Better change this to
int patternCount = i;
so you doesn't have to rely on your x array to have the right size.
You increase iterations inside the p loop, whereas the original C# code does this outside the p loop. Better move the printf and the iteration++ outside the p loop before the PAUSE statement - also I'd remove the PAUSE statement or change it to
if ((iteration % 25) == 0) system("PAUSE");
Even doing all those changes, your program still doesn't terminate using your data set, but the output is more consistent, giving an error oscillating somewhere between 56 and 60.
The last thing you could try is to test the original C# program on this dataset, if it also doesn't terminate, there's something wrong with the algorithm (because your dataset looks correct, see my visualization comment).
globalError will not become zero, it will converge to zero as you said, i.e. it will become very small.
Change your loop like such:
int maxIterations = 1000000; //stop after one million iterations regardless
float maxError = 0.001; //one in thousand points in wrong class
do {
//loop stuff here
//convert to fractional error
globalError = globalError/((float)patternCount);
} while ((globalError > maxError) && (i<maxIterations));
Give maxIterations and maxError values applicable to your problem.

Resources