Segmentation Fault in C (array related) - c

I wrote a code in c in order to solve Project Euler Problem 45 (https://projecteuler.net/problem=45). I keep getting segmentation fault error 139. I am sure it is not about trying to access a memory location that I do not have permission for.
My guessing is , the problem is related to sizes of my arrays. I looked up the answer and it is some 10 digit number. To get that ten digit number the size of the array "triangle" has to be something between one million and two million. But when I make the array that big i get the error. I don't get the error in the code below since size of that array is 500 000 (but of course that is not enough).
I use ubuntu 16.04 and Geany.
If you need more information please ask. Thanks in advance.
#include <stdio.h>
#include <stdlib.h>
unsigned long pentagonalgenerator(int n);
unsigned long trianglegenerator(int n);
unsigned long hexagonalgenerator(int n);
_Bool search_function(unsigned int to_be_looked_for , unsigned long array[] , int sizeofarray);
int main(void)
{
unsigned long pentagon[28000] = {0};
int sizeofpentagon = 28000;
unsigned long hexagon[100000] = {0};
int sizeofhexagon = 100000;
unsigned long triangle[500000] = {0};
int sizeoftriangle = 500000;
int counter;
for(counter = 0 ; counter < sizeofpentagon ; counter++)
{
pentagon[counter] = pentagonalgenerator(counter + 2);
}
for(counter = 0 ; counter < sizeofhexagon ; counter++)
{
hexagon[counter] = hexagonalgenerator(counter + 2);
}
for(counter = 0 ; counter < sizeoftriangle ; counter++)
{
triangle[counter] = trianglegenerator(counter + 2);
}
printf("%lu \n%lu \n%lu \n", hexagon[sizeofhexagon - 1] , pentagon[sizeofpentagon - 1] , triangle[sizeoftriangle - 1]);
for(counter = 0 ; counter < sizeofhexagon ; counter++)
{
if(search_function(hexagon[counter] , pentagon , sizeofpentagon))
{
if(search_function(hexagon[counter] , triangle , sizeoftriangle) && hexagon[counter] != 40755)
{
printf("%lu", hexagon[counter]);
return 0;
}
}
}
return 1;
}
_Bool search_function(unsigned int to_be_looked_for , unsigned long array[] , int sizeofarray)
{
int left = 0, right = sizeofarray - 1 , middle = 0;
while(left <= right)
{
middle = (left + right) / 2;
if(to_be_looked_for == array[middle]) return 1;
else if(to_be_looked_for < array[middle]) right = middle - 1;
else if(to_be_looked_for > array[middle]) left = middle + 1;
}
return 0;
}
unsigned long pentagonalgenerator(int n)
{
unsigned int return_value = 0;
return_value = (n*(3*n - 1)) / 2;
return return_value;
}
unsigned long hexagonalgenerator(int n)
{
unsigned int return_value = 0;
return_value = n*(2*n - 1);
return return_value;
}
unsigned long trianglegenerator(int n)
{
unsigned int return_value = 0;
return_value = (n*(n + 1)) / 2;
return return_value;
}

That's a lot of memory for the stack. Instead of this
unsigned long pentagon[28000] = {0};
int sizeofpentagon = 28000;
unsigned long hexagon[100000] = {0};
int sizeofhexagon = 100000;
unsigned long triangle[500000] = {0};
int sizeoftriangle = 500000;
Try this:
unsigned long *pentagon = calloc(28000*sizeof(unsigned long));
int sizeofpentagon = 28000;
unsigned long *hexagon = calloc(100000 * sizeof(unsigned long));
int sizeofhexagon = 100000;
unsigned long *triangle = calloc(500000 * sizeof(unsigned long));
int sizeoftriangle = 500000;

You have very large arrays defined as local variables in the stack. You are getting a stack overflow because of that. Arrays pentagon hexagon triangle are very large.
These need to be moved to the global space or they should be dynamically allocated. For your use case, it is easier to move the arrays to global.
unsigned long pentagon[28000] = {0};
unsigned long hexagon[100000] = {0};
unsigned long triangle[500000] = {0};
int main(void)
{
int sizeofpentagon = 28000;
int sizeofhexagon = 100000;
int sizeoftriangle = 500000;
....

The maximum size for automatic variables is an implementation dependent detail. BUT major implementation have options to set it.
For example, if you are using gcc or clang, automatic variables are stored in the stack, and the stack size is controlled at link time by the option --stack <size>. The default size is 2Mb and your arrays require 628000 unsigned long so at least 5Mb.
Provided you have more standard requirements in other places of this code, I would try a 8Mb stack:
cc myprog.c -Wl,--stack -Wl,0x800000 -o myprog
(-Wl, is used to pass the argument to the linker phase of the build).
This avoids to reformat your code (for examble using allocated arrays) to only solve a compilation problem.

Related

Why are only the first 2 outputs correct in my binary to decimal converter programm?

I have to program a converter which takes the strings from numbers[] and outputs them as decimals.
I am looping through size and index to then add up the current index to the power of its position and then sum it all up. Like: 101 = 1^2 + 0^1 + 1^0
So I am currently stuck with this:
#include <stdio.h>
#include <math.h> // Kompilieren mit -lm : gcc -Wall -std=c11 dateiname.c -lm
int main() {
char* numbers[] = {
"01001001",
"00101010",
"010100111001",
"011111110100101010010111",
"0001010110011010101111101111010101110110",
"01011100110000001101"};
// Add here..
int strlen(char *str){
int len=0;
for(;str[len]!='\0';len++){}
return len;
}
int sum = 0;
int length = sizeof(numbers) / sizeof(numbers[0]);
for( int i = 0; i < length; i++ ){
int size = strlen(numbers[i]);
for (int j = 0; j < size; j++) {
if(numbers[i][j] == '1'){
sum += 1 * pow(2,j-1);
}else{
sum += 0 * pow(2,j-1);
}
}
printf("%s to the base of 2 \nequals %d to the base of 10 \n\n",numbers[i], sum);
sum = 0;
}
return 0;
}
The output of the first two loops is correct which is 01001001 = 73 and 00101010 = 42. But, as soon the length get bigger, my output is completely wrong; e.g. 010100111001 = 1253 instead of 1337 and 011111110100101010010111 = 7645567 instead of 8342167.
There are a number of issues with your code. First and foremost, as pointed out in the comments, you are processing your binary digits from left-to-right, whereas you should be doing that right-to-left.
Second, declaring a function inside another one (as you have done for your strlen) is not Standard C (though some compilers may allow it). If you really can't use the standard strlen function (provided in <string.h>), then move your definition to outside (and before) the body of main.
Third, you shouldn't be using the pow function (which takes and returns double values) for integer arithmetic. Just use a running int variable and multiply that by two each time the inner for loop runs.
Fourth, your "0001010110011010101111101111010101110110" value will overflow the int type on most machines (assuming that is 32 bits), so try using long long int (most likely 64 bits) where necessary.
Finally, there's no point in adding 0 * x to anything, whatever x is, so you can do away with the else block.
Here's a working version (using the standard strlen):
#include <stdio.h>
#include <string.h> // For "strlen" - we don't need math.h if we don't use "pow".
int main(void) // For strict compliance, you should add the "void" argument list
{
char* numbers[] = {
"01001001",
"00101010",
"010100111001",
"011111110100101010010111",
"0001010110011010101111101111010101110110",
"01011100110000001101" };
long long int sum = 0; // So we can use more than 32 bits!
size_t length = sizeof(numbers) / sizeof(numbers[0]);
for (size_t i = 0; i < length; i++) {
int size = (int)strlen(numbers[i]); // strlen gives a "size_t" type
long long int p = 1;
for (int j = size-1; j >= 0; j--) { // Start at the END of the string and work backwards!
if (numbers[i][j] == '1') {
sum += p;
}
// No point in adding zero times anything!
p *= 2; // Times by two each time through the loop
}
printf("%s to the base of 2 \nequals %lld to the base of 10 \n\n", numbers[i], sum);
sum = 0;
}
return 0;
}
sizeof(); // it will give you the size of datatype (in bytes), not the length of a string.
You have to use string function instead.
length = strlen(numbers[0]);
Your function is quite bad, complicated and uses pow. You do not need to know the length of the string.
It can be done much easier:
unsigned long long bstrtoint(const char *str)
{
unsigned long long result = 0;
while(*str)
{
result *= 2;
result += *str++ == '1';
}
return result;
}
or for any base (lower than number of digits)
//bad digits considered as zeroes
static const char digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUWXYZ";
unsigned long long strtoint(const char *str, unsigned base)
{
unsigned long long result = 0;
char *ppos;
while(*str)
{
result *= base;
result += (ppos = strchr(digits, toupper(*str++))) ? (ppos - digits < base) ? ppos - digits : 0 : 0;
}
return result;
}
Examples:
printf("%llu\n", bstrtoint("1111000011110000"));
printf("%llu\n", strtoint("0001010110011010101111101111010101110110", 2));
printf("%llu\n", strtoint("1dr45Xvy4", 36)); // base 36 number
https://godbolt.org/z/bsG5rfTsb
If you want to use your program layout and do it correctly:
int main(void) // For strict compliance, you should add the "void" argument list
{
char* numbers[] = {
"01001001",
"00101010",
"010100111001",
"011111110100101010010111",
"0001010110011010101111101111010101110110",
"01011100110000001101" };
unsigned long long sum = 0;
size_t length = sizeof(numbers) / sizeof(numbers[0]);
for (size_t i = 0; i < length; i++)
{
size_t size = strlen(numbers[i]); // strlen gives a "size_t" type
sum = 0;
for (size_t j = 0; j < size; j++)
{
sum *= 2;
if (numbers[i][j] == '1')
{
sum += 1;
}
}
printf("%s to the base of 2 \nequals %llu to the base of 10 \n\n", numbers[i], sum);
}
return 0;
}
but you do not have to integrate your string twice - strlen is not needed at all
int main(void) // For strict compliance, you should add the "void" argument list
{
char* numbers[] = {
"01001001",
"00101010",
"010100111001",
"011111110100101010010111",
"0001010110011010101111101111010101110110",
"01011100110000001101" };
unsigned long long sum = 0;
size_t length = sizeof(numbers) / sizeof(numbers[0]);
for (size_t i = 0; i < length; i++)
{
sum = 0;
for (size_t j = 0; numbers[i][j] != 0; j++)
{
sum *= 2;
if (numbers[i][j] == '1')
{
sum += 1;
}
}
printf("%s to the base of 2 \nequals %llu to the base of 10 \n\n", numbers[i], sum);
}
return 0;
}

Split int into char array in C

I have an int, and I need to split it to a char array, so 2 chars in each array position. After that, I need to do the opposite process. This is the best I could come up with, but I still couldn't make it work. Any suggestions?
#include <stdio.h>
#include <stdlib.h>
int main()
{
int length = 10968;
int bytesNeeded = sizeof(length) / 2;
char *controlPacket = (char*)malloc(sizeof(char*)*bytesNeeded);
for (int i = 0; i < bytesNeeded; i++)
{
controlPacket[i] = (length >> (8* i));
}
int newSize = 0;
for (int i = 0; i < bytesNeeded; i++)
{
newSize += (controlPacket[i] << (8 * i));
}
printf("Newsize is: %d\n", newSize);
}
Change the variables that you're performing bitwise operations on to unsigned, and also mask the result of shifting before assigning to the array. Otherwise, you get overflow, which causes incorrect results (maybe undefined behavior, I'm not sure).
You also shouldn't divide sizeof(length) by 2. It will work for values that only use the low order half of the number, but not for larger values; e.g. if you use length = 1096800; the result will be 48824.
#include <stdio.h>
#include <stdlib.h>
int main()
{
unsigned int length = 10968;
int bytesNeeded = sizeof(length);
unsigned char *controlPacket = malloc(sizeof(unsigned char)*bytesNeeded);
for (int i = 0; i < bytesNeeded; i++)
{
controlPacket[i] = (length >> (8* i) & 0xff);
}
unsigned int newSize = 0;
for (int i = 0; i < bytesNeeded; i++)
{
newSize += (controlPacket[i] << (8 * i));
}
printf("Newsize is: %d\n", newSize);
free(controlPacket);
}

Unable to sequentially process a file > 250kb using Windows file mapping with n-sized chunks

I am trying to make a software that takes a txt file and xor every 4 byte with a pre-defined number.
I am doing this mapping the file in memory and opening chunks of the file with MapViewOfFile of size n.
The algorithm I'm attaching works well for txt files of less than 250 kb. But for file > 250kb it only xor some parts of the file and I cannot understand why and how to fix this.
Can someone help me?
#include "stdafx.h"
#include "Windows.h"
#include <stdio.h>
#include <stdint.h>
#include <iso646.h>
#include <math.h>
unsigned int strToUl(char *s)
{
int size = 4;
unsigned int ul = 0;
memcpy(&ul, (unsigned int *)s, size);
return ul;
}
char *ulToStr(unsigned int *ul)
{
int size = 4;
char *tch = (char *)calloc(size, sizeof(char *));
memcpy(tch, (char *)ul, size);
return tch;
}
unsigned int uixor(unsigned int n, unsigned int seed)
{
srand(seed);
unsigned int mask = rand();
char ch[5] = { 0 };
strcpy_s(ch, 5, ulToStr(&n));
for (int j = 0; j < 5; j++)
{
ch[j] = ch[j] ^ mask;
}
return strToUl(ch);
}
BOOL mapWriteChunk(PHANDLE phFile, DWORD dwFileSize, int start, int buffsize, uint32_t xork)
{
DWORD offset = start;// / 4;// / sizeof(DWORD);
SYSTEM_INFO SysInfo;
GetSystemInfo(&SysInfo);
DWORD dwSysGran = SysInfo.dwAllocationGranularity;
DWORD dwFileMapStart = (offset/dwSysGran) * dwSysGran;
DWORD dwMapViewSize = (offset % dwSysGran) + buffsize;
DWORD dwFileMapSize = offset + buffsize;
unsigned int *ulMVBuffer = (unsigned int *)MapViewOfFile(*phFile, FILE_MAP_ALL_ACCESS, 0, dwFileMapStart, 0);
if (ulMVBuffer == NULL)
{
printf("ulMVBuffer = NULL\n");
}
int iViewDelta = offset - dwFileMapStart;
for (int i = 0; i < buffsize; i++)
{
unsigned int *u = (unsigned int *)ulMVBuffer + (iViewDelta + i);
unsigned int u1 = *u;
unsigned int u2 = uixor(u1, xork);
*u = u2;
printf("write on %d -> ", iViewDelta);
}
UnmapViewOfFile(ulMVBuffer);
return TRUE;
}
int main()
{
char name[] = "test.txt";
OFSTRUCT tOfStrIn;
tOfStrIn.cBytes = sizeof tOfStrIn;
HANDLE hFile = (HANDLE)OpenFile(name, &tOfStrIn, OF_READWRITE);
DWORD dwFileSize = GetFileSize(hFile, NULL);
HANDLE hFileMap = CreateFileMapping(hFile, NULL, PAGE_READWRITE, 0, dwFileSize, NULL);
if (hFileMap == NULL)
{
printf("hFileMap = NULL\n");
}
int pos = 0;
int chunk = 4;
int bSize = dwFileSize / sizeof(DWORD);
int rseed = 10;
for (pos = 0; pos < bSize; pos+=chunk)
{
mapWriteChunk(&hFileMap, dwFileSize, pos, chunk, rseed);
}
CloseHandle(hFile);
CloseHandle(hFileMap);
system("PAUSE");
return 0;
}
Ok, I figured out the problem and I'm writing here so anyone who have the same problem, know what's wrong.
Talk is cheap, I show you the code (and then I'll explain):
char *ulMVBuffer = (char *)MapViewOfFile(phFile, FILE_MAP_ALL_ACCESS, 0, dwFileMapStart, 0);
if (ulMVBuffer == NULL)
{
printf("ulMVBuffer = NULL\n");
}
int iViewDelta = offset - dwFileMapStart;
unsigned int mask = myrand(xork);
for(int i = 0; i < buffsize; i++)
{
unsigned int c = ulMVBuffer[iViewDelta + i] ^ mask;
ulMVBuffer[iViewDelta + i] = c;
}
So you have to map the memory using a char pointer and then, when you use the XOR operator like that:
unsigned int c = ulMVBuffer[iViewDelta + i] ^ mask;
You obtain the XOR to be applied to a group of 4 bytes and not only on 1 byte, because - as far as I understood playing around - the XOR between a char (1 byte) and a unsigned int (4 bytes) forces the operator to pick 3 more bytes from the memory and use it for the bitwise operation.
This wasn't working using a pointer to unsigned int because, I guess, it stored the bytes from the memory in a different fashion (maybe OS or machine dependent?) and so you were able to XOR only 1 byte every 4 and not groups of 4 bytes all together.
If anyone has a better understanding to this or wants to add more to this solution, I will be more than happy to read it!

What is the fastest way to list the elements of the unit group of a given size?

There are several fast algorithms to calculate prime numbers up to a given number n. But, what is the fastest implementation to list all the numbers r relatively prime to some number n in C? That is, find all the elements of the multiplicative group with n elements as efficiently as possible in C. In particular, I am interested in the case where n is a primorial.
The n primorial is like the factorial except only prime numbers are multiplied together and all other numbers are ignored. So, for example 12 primorial would be 12#=11*7*5*3*2.
My current implementation is very naive. I hard code the first 3 groups as arrays and use those to create the larger ones. Is there something faster?
#include "stdafx.h"
#include <stdio.h> /* printf, fgets */
#include <stdlib.h> /* atoi */
#include <math.h>
int IsPrime(unsigned int number)
{
if (number <= 1) return 0; // zero and one are not prime
unsigned int i;
unsigned int max=sqrt(number)+.5;
for (i = 2; i<= max; i++)
{
if (number % i == 0) return 0;
}
return 1;
}
unsigned long long primorial( int Primes[], int size)
{
unsigned long long answer = 1;
for (int k = 0;k < size;k++)
{
answer *= Primes[k];
}
return answer;
}
unsigned long long EulerPhi(int Primes[], int size)
{
unsigned long long answer = 1;
for (int k = 0;k < size;k++)
{
answer *= Primes[k]-1;
}
return answer;
}
int gcd( unsigned long long a, unsigned long long b)
{
while (b != 0)
{
a %= b;
a ^= b;
b ^= a;
a ^= b;
}
//Return whethere a is relatively prime to b
if (a > 1)
{
return false;
}
return true;
}
void gen( unsigned long long *Gx, unsigned int primor, int *G3)
{
//Get the magic numbers
register int Blocks = 30; //5 primorial=30.
unsigned long long indexTracker = 0;
//Find elements using G3
for (unsigned long long offset = 0; offset < primor; offset+=Blocks)
{
for (int j = 0; j < 8;j++) //The 8 comes from EulerPhi(2*3*5=30)
{
if (gcd(offset + G3[j], primor))
{
Gx[indexTracker] = offset + G3[j];
indexTracker++;
}
}
}
}
int main(int argc, char **argv)
{
//Hardcoded values
int G1[] = {1};
int G2[] = {1,5};
int G3[] = {1,7,11,13,17,19,23,29};
//Lazy input checking. The world might come to an end
//when unexpected parameters given. Its okey, we will live.
if (argc <= 1) {
printf("Nothing done.");
return 0;
}
//Convert argument to integer
unsigned int N = atoi(argv[1]);
//Known values
if (N <= 2 )
{
printf("{1}");
return 0;
}
else if (N<=4)
{
printf("{1,5}");
return 0;
}
else if (N <=6)
{
printf("{1,7,11,13,17,19,23,29}");
return 0;
}
//Hardcoded for simplicity, also this primorial is ginarmous as it is.
int Primes[50] = {0};
int counter = 0;
//Find all primes less than N.
for (int a = 2; a <= N; a++)
{
if (IsPrime(a))
{
Primes[counter] = a;
printf("\n Prime: : %i \n", a);
counter++;
}
}
//Get the group size
unsigned long long MAXELEMENT = primorial(Primes, counter);
unsigned long long Gsize = EulerPhi(Primes, counter);
printf("\n GSize: %llu \n", Gsize);
printf("\n GSize: %llu \n", Gsize);
//Create the list to hold the values
unsigned long long *GROUP = (unsigned long long *) calloc(Gsize, sizeof(unsigned long long));
//Populate the list
gen( GROUP, MAXELEMENT, G3);
//Print values
printf("{");
for (unsigned long long k = 0; k < Gsize;k++)
{
printf("%llu,", GROUP[k]);
}
printf("}");
return 0;
}
If you are looking for a faster prime number check, here is one that is reasonably fast and eliminates all calls to computationally intensive functions (e.g. sqrt, etc..)
int isprime (int v)
{
int i;
if (v < 0) v = -v; /* insure v non-negative */
if (v < 2 || !((unsigned)v & 1)) /* 0, 1 + even > 2 are not prime */
return 0;
for (i = 2; i * i <= v; i++)
if (v % i == 0)
return 0;
return 1;
}
(note: You can adjust the type as required if you are looking for numbers above the standard int range.)
Give it a try and let me know how it compares to the once you are currently using.

Computer freezes when more memory is malloced

I am trying to run a C program which mallocs the memory as per the input given by user.
Whenever I input something as big as 1000000000 rather than returning NULL value, my Ubuntu 14.04 machine freezes completely! I am damn sure that malloc is the culprit...
But I am surprised to see Ubuntu freeze!
Does anyone have any idea about why this may be happening?
I have a laptop with 12GB RAM, i5 processor and 500GB harddisk. and Ubutnu 14.04 OS
Here is the code:
#include<stdio.h>
#include<stdlib.h>
#define LEFT(x) (2*(x)+1)
#define RIGHT(x) (2*(x)+2)
long long int *err, *sorted, *size, *id;
short int *repeat;
void max_heapify(long long int *arr, long long int length, long long int index)
{
long long int largest, left, right, temp, flag = 1;
while (flag)
{
left = LEFT(index);
right = RIGHT(index);
if (left < length && arr[left] > arr[index])
largest = left;
else
largest = index;
if (right < length && arr[right] > arr[largest])
largest = right;
if (largest != index)
{
temp = arr[index];
arr[index] = arr[largest];
arr[largest] = temp;
index = largest;
}
else
flag = 0;
}
}
void build_max_heap(long long int *arr, long long int length)
{
long long int i, j;
j = (length / 2) - 1;
for (i = j; i >= 0; i--)
max_heapify(arr, length, i);
}
void heapsort(long long int *arr, long long int length)
{
long long int i, temp, templength;
build_max_heap(arr, length);
templength = length;
for (i = 0; i < templength; i++)
{
temp = arr[0]; // maximum number
arr[0] = arr[length - 1];
arr[length - 1] = temp;
length--;
max_heapify(arr, length, 0);
}
}
int main()
{
long long int n, k, p, i, j;
scanf("%lld%lld%lld",&n, &k, &p);
err = (long long int*)malloc((n + 1) * sizeof(long long int));
//repeat = (short int*)calloc(1000000001 , sizeof(short int));
sorted = (long long int*)malloc((n + 1) * sizeof(long long int));
j = 0;
for(i = 0; i < n; i++)
{
scanf("%lld",&err[i]);
sorted[j++] = err[i];
}
heapsort(sorted, j);
for(i = 0; i < j; i++)
printf("%lld, ",sorted[i]);
//These malloc statements cause the problem!!
id = (long long int*)malloc((sorted[j - 1] + 1) * sizeof(long long int));
size = (long long int*)malloc((sorted[j - 1] + 1) * sizeof(long long int));
for(i = 0; i <= sorted[j - 1]; i++)
{
id[i] = i;
size[i] = 1;
}
return 0;
}
Basically I am trying to sort the numbers and then allocate the array of size of maximum element. This program works for smaller input but when I enter this
5 5 5
1000000000 999999999 999999997 999999995 999999994
It freezes ubuntu ..I even added the condition to check if id or size is NULL but that didn't help! If system is unable to allocate that much memory then it should return NULL but system freezes! And this code works fine on MAC!
Thanks!

Resources