I have 16 GB RAM on my computer at school running Ubuntu 13.10. I wrote a simple C program to get the total RAM but it shows non-precise results, instead of 16 it prints out 15. How to make it to be precise, even for smaller RAM sizes? At home I have a computer with 768 MB RAM and my program shows there wrong results too :(
#include <sys/sysctl.h>
#include <sys/types.h>
#include <stdlib.h>
#include <stdio.h>
void getMemoryInfo()
{
FILE *meminfo = fopen("/proc/meminfo", "r");
int totalMemory = 0;
if(meminfo == NULL)
{
exit(-1);
}
char buff[256];
while(fgets(buff, sizeof(buff), meminfo))
{
int ramKB;
if(sscanf(buff, "MemTotal: %d kB", &ramKB) == 1)
{
totalMemory = ramKB/1024.0;
totalMemory = totalMemory/1024.0;
}
}
if(fclose(meminfo) != 0)
{
exit(-1);
}
printf("%d\n", totalMemory);
}
int main()
{
getMemoryInfo();
return 0;
}
I once used this code . . . Works for most OSes
#if defined(_WIN32)
#include <Windows.h>
#elif defined(__unix__) || defined(__unix) || defined(unix) || (defined(__APPLE__) && defined(__MACH__))
#include <unistd.h>
#include <sys/types.h>
#include <sys/param.h>
#if defined(BSD)
#include <sys/sysctl.h>
#endif
#else
#error "Unable to define getMemorySize( ) for an unknown OS."
#endif
/**
* Returns the size of physical memory (RAM) in bytes.
*/
size_t getMemorySize( )
{
#if defined(_WIN32) && (defined(__CYGWIN__) || defined(__CYGWIN32__))
/* Cygwin under Windows. ------------------------------------ */
/* New 64-bit MEMORYSTATUSEX isn't available. Use old 32.bit */
MEMORYSTATUS status;
status.dwLength = sizeof(status);
GlobalMemoryStatus( &status );
return (size_t)status.dwTotalPhys;
#elif defined(_WIN32)
/* Windows. ------------------------------------------------- */
/* Use new 64-bit MEMORYSTATUSEX, not old 32-bit MEMORYSTATUS */
MEMORYSTATUSEX status;
status.dwLength = sizeof(status);
GlobalMemoryStatusEx( &status );
return (size_t)status.ullTotalPhys;
#elif defined(__unix__) || defined(__unix) || defined(unix) || (defined(__APPLE__) && defined(__MACH__))
/* UNIX variants. ------------------------------------------- */
/* Prefer sysctl() over sysconf() except sysctl() HW_REALMEM and HW_PHYSMEM */
#if defined(CTL_HW) && (defined(HW_MEMSIZE) || defined(HW_PHYSMEM64))
int mib[2];
mib[0] = CTL_HW;
#if defined(HW_MEMSIZE)
mib[1] = HW_MEMSIZE; /* OSX. --------------------- */
#elif defined(HW_PHYSMEM64)
mib[1] = HW_PHYSMEM64; /* NetBSD, OpenBSD. --------- */
#endif
int64_t size = 0; /* 64-bit */
size_t len = sizeof( size );
if ( sysctl( mib, 2, &size, &len, NULL, 0 ) == 0 )
return (size_t)size;
return 0L; /* Failed? */
#elif defined(_SC_AIX_REALMEM)
/* AIX. ----------------------------------------------------- */
return (size_t)sysconf( _SC_AIX_REALMEM ) * (size_t)1024L;
#elif defined(_SC_PHYS_PAGES) && defined(_SC_PAGESIZE)
/* FreeBSD, Linux, OpenBSD, and Solaris. -------------------- */
return (size_t)sysconf( _SC_PHYS_PAGES ) *
(size_t)sysconf( _SC_PAGESIZE );
#elif defined(_SC_PHYS_PAGES) && defined(_SC_PAGE_SIZE)
/* Legacy. -------------------------------------------------- */
return (size_t)sysconf( _SC_PHYS_PAGES ) *
(size_t)sysconf( _SC_PAGE_SIZE );
#elif defined(CTL_HW) && (defined(HW_PHYSMEM) || defined(HW_REALMEM))
/* DragonFly BSD, FreeBSD, NetBSD, OpenBSD, and OSX. -------- */
int mib[2];
mib[0] = CTL_HW;
#if defined(HW_REALMEM)
mib[1] = HW_REALMEM; /* FreeBSD. ----------------- */
#elif defined(HW_PYSMEM)
mib[1] = HW_PHYSMEM; /* Others. ------------------ */
#endif
unsigned int size = 0; /* 32-bit */
size_t len = sizeof( size );
if ( sysctl( mib, 2, &size, &len, NULL, 0 ) == 0 )
return (size_t)size;
return 0L; /* Failed? */
#endif /* sysctl and sysconf variants */
#else
return 0L; /* Unknown OS. */
#endif
}
What you're seeing is caused by floating point inaccuracy. You must be careful when going between floating points and integers and when printing floating points.
Since floating points cannot represent all numbers exactly, it often attempts to get as close as possible. What is likely happening is that instead of 16, totalMemory is something like 15.999999999987 and then when this gets converted to an int, it gets truncated to 15.
There are two ways you can fix this: if you know totalMemory is divisible by 1024*1024, then just use integers (this would not work in the case of non integer gigabytes). Since you're using an integer anyway, you might as well use this approach. (768MB cannot be expressed as an integral amount of GB).
The other option is to add in an epsilon to prevent this. In other words, instead of using totalMemory, you would use something like totalMemory + 1e-7. The epsilon is too insignificant to make a meaningful difference, but it can push something like 15.999... up to 16.
By the way, floating point problems are only part of your problem. If you're going to use an integer, you probably want to use MB instead of GB. How can an integer represent something like 4.5GB (although that's very rare these days)?
Related
There is an API in Linux to get Load Average metric - system call getloadavg().
It obtains 1, 5 and 15 minutes Load Average, but seem that it's a common load for all the CPUs (all CPU cores). But how can I from regular C file obtain load average on single (specific) CPU core on SMP system?
To evaluate average core load of an Intel CPU, you may read IA32_TIME_STAMP_COUNTER and IA32_MPERF MSRs.
Pseudocode:
for id in core_size
read_msr(IA32_TIME_STAMP_COUNTER, TSC_1[id], id)
read_msr(IA32_MPERF, MPERF_1[id], id)
do_work()
for id in core_size
read_msr(IA32_TIME_STAMP_COUNTER, TSC_2[id], id)
read_msr(IA32_MPERF, MPERF_2[id], id)
tsc_d = TSC_2[id] - TSC_1[id]
mperf_d = MPERF_2[id] - MPERF_1[id]
if tsc_d < mperf_d
print "core" id "load: 100.0"
else
print "core" id "load:" 100.0 - ((tsc_d - mperf_d) * 100.0 / tsc_d)
Edit - added C code (just for core 0):
// gcc avg_load.c -std=c11 -o avg
// gcc avg_load.c -std=c11 -lmsr -L$LIBMSR_PATH/lib/ -I$LIBMSR_PATH/include/ -DHAVE_MSR_CORE_H -o avg
#ifdef HAVE_MSR_CORE_H
#include <msr_core.h>
#else
#include <sys/types.h>
#include <fcntl.h>
#include <stdint.h>
#endif
#include <stdio.h>
#include <unistd.h>
#ifndef IA32_MPERF
#define IA32_MPERF 0xE7
#endif
#ifndef IA32_TIME_STAMP_COUNTER
#define IA32_TIME_STAMP_COUNTER 0x10
#endif
static int read_msr(int cpuid, off_t MSR_REGISTER_address, uint64_t *MSR_REGISTER_bits)
{
#ifdef HAVE_MSR_CORE_H
return read_msr_by_idx(cpuid, MSR_REGISTER_address, MSR_REGISTER_bits);
#else
char msr_file_name[64];
sprintf(msr_file_name, "/dev/cpu/%d/msr_safe", cpuid);
int fd = open(msr_file_name, O_RDONLY);
if (fd < 0)
{
fprintf(stderr, "read msr error [%d]\n", cpuid);
return -1;
}
if (pread(fd, MSR_REGISTER_bits, sizeof MSR_REGISTER_bits, MSR_REGISTER_address) != sizeof MSR_REGISTER_bits)
{
fprintf(stderr, "read msr error [%d]\n", cpuid);
return -1;
}
close(fd);
return 0;
#endif
}
int main()
{
#ifdef HAVE_MSR_CORE_H
init_msr();
#endif
uint64_t mperf_start, mperf_stop;
uint64_t tsc_start, tsc_stop;
read_msr(0, IA32_MPERF, &mperf_start);
read_msr(0, IA32_TIME_STAMP_COUNTER, &tsc_start);
sleep(1);
read_msr(0, IA32_MPERF, &mperf_stop);
read_msr(0, IA32_TIME_STAMP_COUNTER, &tsc_stop);
uint64_t tsc_d = tsc_stop - tsc_start;
uint64_t mperf_d = mperf_stop - mperf_start;
if (tsc_d < mperf_d)
printf ("core 0 load: 100.0\n");
else
printf ("core 0 load: %f %\n", 100.0 - ((tsc_d - mperf_d) * 100.0 / tsc_d));
#ifdef HAVE_MSR_CORE_H
finalize_msr();
#endif
return 0;
}
The code expects to use msr_safe with the two used MSRs in the allowlist. Otherwise rewrite the code to read from /dev/cpu/msr with sudo rights.
Works without other dependencies or preferably with libmsr, but then requires compilation with -DHAVE_MSR_CORE_H flag.
I am using low level io functions to fetch the size of a file in bytes and write it to stdout. I am using windows 7 64bit, and I am using visual studio 2017, x64 debugging mode. The functions _filelength and _filelengthi64 are exclusive to the windows operating system however when I use them they both return a 0 for any file I open. Here is the full code, but the issue should only lie with _sopen_s() or _filelengthi64():
Header
#pragma once
// Headers
#include <io.h>
#include <string.h>
#include <sys\stat.h>
#include <share.h>
#include <fcntl.h>
#include <errno.h>
// Constants
#define stdout 1
#define stderr 2
// Macros
#define werror_exit { werror(); return 1; }
#define werr_exit(s) { _write(stderr, (s), (unsigned int)strlen((s))); return 1; }
// Declarations
extern void werror();
extern void wnum(__int64 num);
Source
#include "readbinaryfile.h"
int main(int argc, char **argv)
{
int fhandle;
__int64 fsize;
// open binary file as read only. deny sharing write permissions. allow write permissions if new file
if (_sopen_s(&fhandle, argv[1], _O_RDONLY | _O_BINARY, _SH_DENYWR, _S_IWRITE) == -1)
werror_exit
else if (fhandle == -1)
werr_exit("\nERROR: file does not exist...\n")
if (fsize = _filelengthi64(fhandle) == -1)
{
if (_close(fhandle) == -1)
werror_exit
werror_exit
}
if (_close(fhandle) == -1)
werror_exit
// write the file size to stdout
wnum(fsize);
return 0;
}
// fetch the string representation of the errno global variable and write it to stderr
void werror()
{
char bufstr[95];
size_t buflen = 95; // MSDN suggested number for errno string length
strerror_s(bufstr, buflen, errno);
_write(stderr, bufstr, (unsigned int)buflen);
_set_errno(0);
}
// recursively write the ascii value of each digit in a number to stdout
void wnum(__int64 num)
{
if (num / 10 == 0)
{
_write(stdout, &(num += 48), 1);
return;
}
wnum(num / 10);
_write(stdout, &((num %= 10) += 48), 1);
}
I have tried passing many different filepaths to argv[1] yet they all still show an fsize of 0. In all of those cases, fhandle was assigned a value of 3 after using _sopen_s() which indicates no errors when opening the files. I have verified the operation of wnum() and werror(). I appreciate the help!
_filelengthi64(fhandle) doesn't return 0. The expression _filelengthi64(fhandle) == -1, however, will (assuming a successful call), which is then assigned to fsize. You are ignoring the C operator precedence, dictating that == has higher precedence than =. You will have to use parentheses to change the precedence:
if ((fsize = _filelengthi64(fhandle)) == -1)
{
...
If you want to reduce the amount of mental energy required to write (and especially read) code, it is generally a good idea to isolate normal code logic from error handling, e.g.:
// Normal code flow
fsize = _filelengthi64(fhandle);
// Error handling code
if (fsize == -1)
{
...
I make realtime data processing application (spectrum analyzer using fftw3 lib) using C and Linux. My incoming data is currently taken from h/w audio line input. I use PortAudio libs to talk to h/w. (I do not use PortAudio's callbacks currently). I select Portaudio, due to many audio-recording examples exist for it. RtAudio, while may offer lower latencies, unfortunately written on CPP, not C (so i have multiple portability troubles). (Should i try other wrapper? Is there direct way to catch sound buffer, with examples?).
I have fine working setup unless DFT calculations take more time than enough for filling audio buffer with new data. So data stays and accumulates somewhere in system, and delay between audio input and picture occurs and increases. In spectrum analysis, there is not possible to "throw away" piece of data. So only i can do is warn the user about low CPU horsepower. But here i have problem.
There is Pa_GetStreamReadAvailable function exist to show how many unreaded data available. But it is not working at all for me. I prepare simple example, mostly based on file www.kfr.co.il/files/speed_photo/complete.c
#include <sys/ioctl.h>
#include <linux/parport.h>
#include <linux/ppdev.h>
#include <fcntl.h>
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <signal.h>
#include <portaudio.h>
/* #define SAMPLE_RATE (17932) // Test failure to open with this value. */
#define SAMPLE_RATE (44100)
#define FRAMES_PER_BUFFER (1024)
#define NUM_SECONDS (5)
#define NUM_CHANNELS (2)
/* #define DITHER_FLAG (paDitherOff) */
#define DITHER_FLAG (0) /**/
/* Select sample format. */
#if 1
#define PA_SAMPLE_TYPE paFloat32
typedef float SAMPLE;
#define SAMPLE_SILENCE (0.0f)
#define PRINTF_S_FORMAT "%.8f"
#elif 1
#define PA_SAMPLE_TYPE paInt16
typedef short SAMPLE;
#define SAMPLE_SILENCE (0)
#define PRINTF_S_FORMAT "%d"
#elif 0
#define PA_SAMPLE_TYPE paInt8
typedef char SAMPLE;
#define SAMPLE_SILENCE (0)
#define PRINTF_S_FORMAT "%d"
#else
#define PA_SAMPLE_TYPE paUInt8
typedef unsigned char SAMPLE;
#define SAMPLE_SILENCE (128)
#define PRINTF_S_FORMAT "%d"
#endif
int running = 1;
void signalHandler(int sig)
{
running = 0;
}
/*******************************************************************/
int main(void);
int main(void)
{
printf("Initializing PortAudio...\n");
PaStreamParameters inputParameters, outputParameters;
PaStream *stream;
PaError err;
SAMPLE *recordedSamples;
int i;
int maxFrames;
int numSamples;
int numBytes;
SAMPLE max, average, val;
// Set ctrl-c handler
signal(SIGINT, signalHandler);
//totalFrames = NUM_SECONDS * SAMPLE_RATE; /* Record for a few seconds. */
maxFrames = SAMPLE_RATE*1;
numSamples = maxFrames * NUM_CHANNELS;
numBytes = numSamples * sizeof(SAMPLE);
recordedSamples = (SAMPLE *) malloc( numBytes );
if( recordedSamples == NULL )
{
printf("Could not allocate record array.\n");
exit(1);
}
for( i=0; i<numSamples; i++ ) recordedSamples[i] = 0;
err = Pa_Initialize();
if( err != paNoError ) goto error;
inputParameters.device = Pa_GetDefaultInputDevice(); /* default input device */
if (inputParameters.device == paNoDevice) {
fprintf(stderr,"Error: No default input device.\n");
goto error;
}
inputParameters.channelCount = NUM_CHANNELS;
inputParameters.sampleFormat = PA_SAMPLE_TYPE;
inputParameters.suggestedLatency = Pa_GetDeviceInfo( inputParameters.device )->defaultLowInputLatency;
inputParameters.hostApiSpecificStreamInfo = NULL;
/* Record some audio. -------------------------------------------- */
err = Pa_OpenStream(
&stream,
&inputParameters,
NULL, /* &outputParameters, */
SAMPLE_RATE,
FRAMES_PER_BUFFER,
paClipOff, /* we won't output out of range samples so don't bother clipping them */
NULL, /* no callback, use blocking API */
NULL ); /* no callback, so no callback userData */
if( err != paNoError ) goto error;
printf("Starting!\n\n");
printf("Numbers should increasing:\n");
err = Pa_StartStream( stream );
if( err != paNoError ) goto error;
Pa_ReadStream(stream, recordedSamples, maxFrames);
i = 1;
while (i<8)
{
long toRead = Pa_GetStreamReadAvailable(stream);
printf("%ld %d\n", toRead, maxFrames);
if (toRead > maxFrames)
toRead = maxFrames;
err = Pa_ReadStream(stream, recordedSamples, toRead);
if( err != paNoError ) goto error;
// Here is place for heavy calculations,
// they can be longer than time needed for filling one buffer.
// (So data, awaiting for processing, should be (and really is)
// accumulated somewhere in system/OS buffer.)
// Emulate big delays:
usleep(i*1000000);
i++;
}
printf("Stopping PortAudio...\n");
err = Pa_CloseStream( stream );
if( err != paNoError ) goto error;
free( recordedSamples );
Pa_Terminate();
return 0;
error:
Pa_Terminate();
fprintf( stderr, "An error occured while using the portaudio stream\n" );
fprintf( stderr, "Error number: %d\n", err );
fprintf( stderr, "Error message: %s\n", Pa_GetErrorText( err ) );
return -1;
}
I expect increasing numbers in printout, but my results are clearly wrong:
598 44100
3071 44100
3071 44100
3071 44100
3071 44100
3071 44100
3071 44100
Using 'Pa_OpenDefaultStream' in place of 'Pa_OpenStream' gives other wrong numbers (8191).
Where am i wrong?
Or it is bug in PA, but to be sure, i prefer to ask first, before file a bugreport.
Thanks.
P.S. Regression of PA libs to previous version (for tests) is not possible, i can't compile this example in modern Ubuntu with it.
It is not clear to me that there is a bug here (Apart from your doing a FFT that takes too long on your box).
Usually the audio subsystem has a small number of buffers (Looks like 3 in your case 3072 is 3 * 1024 which you set as FRAMES_PER_BUFFER, 2 is another common value) and if you fail to keep up it simply discards data from the least recently filled buffer, there is no ever growing buffer of audio.
It is your responsibility to copy the data out of those buffers in time and to buffer in ram or on disk of that is what you need to do for your application.
I am somewhat surprised that a modern machine has any trouble with a 1024 point FFT at audio rate.
Regards, Dan.
I used your code as a starting point but my stream needs to stay open. I was able to get this working but you would need to message your FFT processing on a different thread, possibly using RingBuffer or another mechanism as if you increase the thread sleep code, it will throw a Input Underrun error.
while( ( err = Pa_IsStreamActive( stream ) ) == 1 )
{
long toRead = Pa_GetStreamReadAvailable(stream);
if (toRead > FRAMES_PER_BUFFER) toRead = FRAMES_PER_BUFFER;
if (toRead == FRAMES_PER_BUFFER) {
printf("available: %ld frames_per_buffer: %d\n", toRead, FRAMES_PER_BUFFER);
// You may get underruns or overruns if the output is not primed by PortAudio.
err = Pa_ReadStream( stream, recordedSamples, toRead );
if(err) goto xrun;
}
Pa_Sleep(10);
}
available: 1024 frames_per_buffer: 1024
available: 1024 frames_per_buffer: 1024
available: 1024 frames_per_buffer: 1024
available: 1024 frames_per_buffer: 1024
available: 1024 frames_per_buffer: 1024
available: 1024 frames_per_buffer: 1024
available: 1024 frames_per_buffer: 1024
available: 1024 frames_per_buffer: 1024
available: 1024 frames_per_buffer: 1024
available: 1024 frames_per_buffer: 1024
available: 1024 frames_per_buffer: 1024
available: 1024 frames_per_buffer: 1024
As an exercise, I wanted to translate some C code that used many syscalls into Golang. I found this nice code example on Unix & Linux StackExchange:
/*
* List directories using getdents() because ls, find and Python libraries
* use readdir() which is slower (but uses getdents() underneath.
*
* Compile with
* ]$ gcc getdents.c -o getdents
*/
#define _GNU_SOURCE
#include <dirent.h> /* Defines DT_* constants */
#include <fcntl.h>
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#define handle_error(msg) \
do { perror(msg); exit(EXIT_FAILURE); } while (0)
struct linux_dirent {
long d_ino;
off_t d_off;
unsigned short d_reclen;
char d_name[];
};
#define BUF_SIZE 1024*1024*5
int
main(int argc, char *argv[])
{
int fd, nread;
char buf[BUF_SIZE];
struct linux_dirent *d;
int bpos;
char d_type;
fd = open(argc > 1 ? argv[1] : ".", O_RDONLY | O_DIRECTORY);
if (fd == -1)
handle_error("open");
for ( ; ; ) {
nread = syscall(SYS_getdents, fd, buf, BUF_SIZE);
if (nread == -1)
handle_error("getdents");
if (nread == 0)
break;
for (bpos = 0; bpos < nread;) {
d = (struct linux_dirent *) (buf + bpos);
d_type = *(buf + bpos + d->d_reclen - 1);
if( d->d_ino != 0 && d_type == DT_REG ) {
printf("%s\n", (char *)d->d_name );
}
bpos += d->d_reclen;
}
}
exit(EXIT_SUCCESS);
}
I have only been able to code this using ioutil.ReadDir which defeats the purpose. Does anyone have an idea on how to translate this?
I would avoid using this code. As written, it's wrong: on 32-bit systems and maybe even some 64-bit ones, SYS_getdents is the legacy syscall that doesn't provide d_type and lacks support for 64-bit inode numbers, which means you get gratuitous errors on modern filesystems. Even if you fix it, it's no better than inlining readdir, which does basically exactly the same thing internally.
My question is somewhat different from others that have asked about fault addresses. I'm trying to implement a horrible hack to determine, from a signal handler, whether the signal interrupted a syscall or ordinary user code by inspecting the code at the saved instruction pointer and comparing it against the possible syscall entry instructions for the host architecture it's running on. This is part of implementing correct POSIX thread cancellation that does not suffer from the race condition and resource leak described in my old question:
How are POSIX cancellation points supposed to behave?
If this approach is unreliable or otherwise wrong, I'd also like to hear reasons.
/* sigsegv.c */
/**
* This source file is used to print out a stack-trace when your program
* segfaults. It is relatively reliable and spot-on accurate.
*
* This code is in the public domain. Use it as you see fit, some credit
* would be appreciated, but is not a prerequisite for usage. Feedback
* on it's use would encourage further development and maintenance.
*
* Due to a bug in gcc-4.x.x you currently have to compile as C++ if you want
* demangling to work.
*
* Please note that it's been ported into my ULS library, thus the check for
* HAS_ULSLIB and the use of the sigsegv_outp macro based on that define.
*
* Author: Jaco Kroon <jaco#kroon.co.za>
*
* Copyright (C) 2005 - 2010 Jaco Kroon
*/
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
/* Bug in gcc prevents from using CPP_DEMANGLE in pure "C" */
#if !defined(__cplusplus) && !defined(NO_CPP_DEMANGLE)
#define NO_CPP_DEMANGLE
#endif
#include <memory.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <signal.h>
#include <ucontext.h>
#include <dlfcn.h>
#ifndef NO_CPP_DEMANGLE
#include <cxxabi.h>
#ifdef __cplusplus
using __cxxabiv1::__cxa_demangle;
#endif
#endif
#ifdef HAS_ULSLIB
#include "uls/logger.h"
#define sigsegv_outp(x) sigsegv_outp(,gx)
#else
#define sigsegv_outp(x, ...) fprintf(stderr, x "\n", ##__VA_ARGS__)
#endif
#if defined(REG_RIP)
# define SIGSEGV_STACK_IA64
# define REGFORMAT "%016lx"
#elif defined(REG_EIP)
# define SIGSEGV_STACK_X86
# define REGFORMAT "%08x"
#else
# define SIGSEGV_STACK_GENERIC
# define REGFORMAT "%x"
#endif
static void signal_segv(int signum, siginfo_t* info, void*ptr) {
static const char *si_codes[3] = {"", "SEGV_MAPERR", "SEGV_ACCERR"};
int i, f = 0;
ucontext_t *ucontext = (ucontext_t*)ptr;
Dl_info dlinfo;
void **bp = 0;
void *ip = 0;
sigsegv_outp("Segmentation Fault!");
sigsegv_outp("info.si_signo = %d", signum);
sigsegv_outp("info.si_errno = %d", info->si_errno);
sigsegv_outp("info.si_code = %d (%s)", info->si_code, si_codes[info->si_code]);
sigsegv_outp("info.si_addr = %p", info->si_addr);
for(i = 0; i < NGREG; i++)
sigsegv_outp("reg[%02d] = 0x" REGFORMAT, i, ucontext->uc_mcontext.gregs[i]);
#ifndef SIGSEGV_NOSTACK
#if defined(SIGSEGV_STACK_IA64) || defined(SIGSEGV_STACK_X86)
#if defined(SIGSEGV_STACK_IA64)
ip = (void*)ucontext->uc_mcontext.gregs[REG_RIP];
bp = (void**)ucontext->uc_mcontext.gregs[REG_RBP];
#elif defined(SIGSEGV_STACK_X86)
ip = (void*)ucontext->uc_mcontext.gregs[REG_EIP];
bp = (void**)ucontext->uc_mcontext.gregs[REG_EBP];
#endif
sigsegv_outp("Stack trace:");
while(bp && ip) {
if(!dladdr(ip, &dlinfo))
break;
const char *symname = dlinfo.dli_sname;
#ifndef NO_CPP_DEMANGLE
int status;
char * tmp = __cxa_demangle(symname, NULL, 0, &status);
if (status == 0 && tmp)
symname = tmp;
#endif
sigsegv_outp("% 2d: %p <%s+%lu> (%s)",
++f,
ip,
symname,
(unsigned long)ip - (unsigned long)dlinfo.dli_saddr,
dlinfo.dli_fname);
#ifndef NO_CPP_DEMANGLE
if (tmp)
free(tmp);
#endif
if(dlinfo.dli_sname && !strcmp(dlinfo.dli_sname, "main"))
break;
ip = bp[1];
bp = (void**)bp[0];
}
#else
sigsegv_outp("Stack trace (non-dedicated):");
sz = backtrace(bt, 20);
strings = backtrace_symbols(bt, sz);
for(i = 0; i < sz; ++i)
sigsegv_outp("%s", strings[i]);
#endif
sigsegv_outp("End of stack trace.");
#else
sigsegv_outp("Not printing stack strace.");
#endif
_exit (-1);
}
static void __attribute__((constructor)) setup_sigsegv() {
struct sigaction action;
memset(&action, 0, sizeof(action));
action.sa_sigaction = signal_segv;
action.sa_flags = SA_SIGINFO;
if(sigaction(SIGSEGV, &action, NULL) < 0)
perror("sigaction");
}
$ g++ -fPIC -shared -o libsigsegv.so -ldl sigsegv
$ export LD_PRELOAD=/path/to/libsigsegv.so
I found this code on a LUG. Couldn't get to the page to point the URL here, so pasted the whole code. This code prints a small stack trace when SIGSEGV occurs. Not sure if there is some other way that does not use ucontext_t.