why does my debouncer not work? - c

I'm trying to write a debouncer that will only return a valid argument (>0) if it has been debounced (-1 four bouncing).
I've come up with this so far but it always returns -1, why is that I'm wondering:
#include <stdio.h>
#include <time.h>
#include <unistd.h>
#define BOUNCETIME 500000000 //(500ms)
#define SetTime(x) clock_gettime(CLOCK_REALTIME, (x)); // set time
static struct timespec tset;
struct timespec tnow;
int DeBounce(unsigned int arg)
{
static int val = -1;
long long nsec = 0;
if (val < 0) {
val = arg;
SetTime(&tset);
return arg;
} else {
SetTime(&tnow);
if (tnow.tv_nsec < tset.tv_nsec)
nsec = tnow.tv_nsec + 1000000000;
else
nsec = tnow.tv_nsec;
if (tnow.tv_nsec - tset.tv_nsec > BOUNCETIME) {
printf("arg okay\n");
val = -1;
return arg;
}
else
printf("bounce, ignore!\n");
return -1;
}
}
int main (void)
{
printf("#1 %d\n",DeBounce(0));
usleep(1);
printf("#2 %d\n",DeBounce(1));
usleep(200);
printf("#3 %d\n",DeBounce(1));
sleep(1);
printf("#4 %d\n",DeBounce(1));
}
the ouput I get is:
$ ./debounce
#1 0
bounce, ignore!
#2 -1
bounce, ignore!
#3 -1
bounce, ignore!
#4 -1
$

usleep(600); is 600 microseconds. But your debounce period is 500 milliseconds.
Furthermore tnow.tv_nsec - tset.tv_nsec is not correct as tv_nsec is not the full time value but only the number of nanoseconds past the second. The correct way to calculate elapsed time in nanoseconds is something like this:
(tnow.tv_sec * 1.0e-9 + tnow.tv_nsec) - (tset.tv_sec * 1.0e-9 + tset.tv_nsec)

Related

information available after the `wait` call

I wrote a program that verifies that the getrusage () RUSAGE_CHILDREN flag retrieves certain information only about children for whom a wait call was made,
How can I modify the program so that it is more efficient in terms of handling errors? There is also danger of a 2038 bug occurring on this line: printf (" before: user CPU seconds =% ld \ n ", (long) usg.ru_utime.tv_sec);?
#include <stdio.h>
#include <unistd.h>
#include <time.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/time.h>
#include <sys/resource.h>
#include "print_rlimit.h"
int
main (void)
{
switch (fork ()) {
case -1:
perror ("fork()");
return 1;
case 0: { // child
time_t start, now;
alarm (10);
start = time (NULL);
while (1) {
now = time (NULL);
if ((now - start) > 5)
break;
}
_exit (0);
}
default: { // parent
int ret;
struct rusage usg;
pid_t pid;
sleep (2);
ret = getrusage (RUSAGE_CHILDREN, &usg);
if (ret == -1) {
perror ("getrusage()");
return 1;
}
printf ("before: user CPU seconds = %ld\n", (long)usg.ru_utime.tv_sec);
pid = wait (NULL);
if (pid == (pid_t)-1) {
perror ("wait()");
return 1;
}
ret = getrusage (RUSAGE_CHILDREN, &usg);
if (ret == -1) {
perror ("getrusage()");
return 1;
}
printf ("after: user CPU seconds = %ld\n", (long)usg.ru_utime.tv_sec);
break;
}
}
return 0;
}
header:
#ifndef _PRINT_RLIMITS
#define _PRINT_RLIMITS
void print_rlimit (int resource);
#endif
2038 bug
time_t is not specified as a long, so do not cast to long which may only be 32-bit and narrow the time value.
// printf (" before: user CPU seconds =% ld \ n ", (long) usg.ru_utime.tv_sec);
time_t is not certainly even an integer count of seconds. A reasonable alternative is to cast to the widest integer type. This will accommodate an implementation that uses a wider integer type for time_t to handle the 2038 bug.
#include <inttypes.h>
printf (" before: user CPU seconds = %jd\n", (intmax_t) usg.ru_utime.tv_sec);
time_t quanta assumed
Rather than assume time_t is an integer count of seconds, use standard double difftime(time_t time1, time_t time0) which returns a difference in seconds regardless of time_t encoding.
// (now - start) > 5
difftime(now, start) > 5.0
Burning CPU
while (1) { now = time (NULL); ... burns lots of CPU ticks while waiting. A more advanced idea would sleep for some time before trying again.
while (1) {
now = time (NULL);
double diff = difftime(now, start);
if (diff > 5) {
break;
}
diff *= 1000000; // microseconds
usleep(diff/2); // Sleep for half of that
}
A more advanced approach would use other system alarm routines.
Missing error handling
Below is an infinite loop should time() return -1 if the calendar time is not available. To fix, test for -1.
start = time (NULL);
while (1) {
now = time (NULL);
if ((now - start) > 5)
break;
}
Minor: cast needed?
On many implementations pid_t is a signed integer type so the cast is superfluous.
// pid == (pid_t)-1
pid == -1

Why is C nanosleep() not sleeping here?

Im trying to write to the terminal one line at a time but it just prints the whole thing without sleeping. It works if I use sleep(1). Am I just not understanding how nanosleep is suppose to work?
void
display_all(int fdin, int fdout)
{
struct timespec tm1,tm2;
tm1.tv_sec = 0;
tm1.tv_nsec = 1000000000L;
while (display_line(fdin, fdout) == 80)
{
nanosleep(&tm1,&tm2);
}
}
display_line is using the function write to write to STDOUT.
From the nanosleep man page:
The value of the nanoseconds field must be in the range 0 to 999999999
#include <stdio.h>
#include <time.h>
#define MILISECONDS 300
#define NLOOPS 10
void nsleep(long miliseconds) {
struct timespec ts = {0, miliseconds * 1000000L};
nanosleep(&ts, NULL);
}
int main() {
short i;
for (i = 0; i < NLOOPS; i++)
fflush(stdout),
nsleep((long) MILISECONDS),
printf("%d miliseconds\n", MILISECONDS);
return 0;
}
Or:
void nsleep(long miliseconds) {
struct timespec ts;
ts.tv_sec = 0;
ts.tv_nsec = miliseconds * 1000000L;
nanosleep(&ts, NULL);
}

Poor performance when multi-processes write one msg on linux

I write a test program as follows:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <sys/msg.h>
#include <time.h>
#define PACKET_SIZE 500
#define LOOP_COUNT 30000
int g_sndsucc = 0;
int g_sndfail = 0;
const int C_IPC_KEY = 0x00231a95;
const int COUNT_SIZE = 10000;
unsigned long g_count = 0;
unsigned long g_t1 = 0;
struct timeval s1, s2, s3, s4;
int main(int argc, char* argv[])
{
int ipckey = C_IPC_KEY;
if(argc > 1)
{
ipckey = atoi(argv[1]);
printf("ipckey is %d\n", ipckey);
}
int qid = msgget(ipckey, IPC_CREAT | 0666);
if(qid <= 0)
{
printf("msgget err: %d \n", errno);
return 0;
}
char data[PACKET_SIZE];
memset(data, 'a', PACKET_SIZE-1);
data[PACKET_SIZE-1] = '\0';
*((long *)data) = 0;
int ret = 0;
struct timeval start;
gettimeofday (&start, NULL);
while(1)
{
*((long *)data) +=1;
gettimeofday (&s1, NULL);
ret = msgsnd(qid, data, PACKET_SIZE,0);
gettimeofday (&s2, NULL);
if(ret != 0)
{
g_sndfail ++;
}
else
{
g_sndsucc ++;
}
g_count++;
g_t1 += (s2.tv_sec-s1.tv_sec)*1000000 + (s2.tv_usec-s1.tv_usec);
if ( g_count >= 10000)
{
printf("STAT1: t1 : %f\n",
10000000000.0 / g_t1);
g_count = 0;
g_t1 = 0;
}
usleep(1000);
}
return 0;
}
I create 100 same processes to msgsnd , and on suse, each process's msgsnd tps only reaches 50/s.
But on AIX5 the msgsnd tps can reaches 10000/s.
Does anyone know why the performance of IPC on linux when multi-processes is so poor?
And how to increase the performance on linux??
BTW, the kenel version of suse is linux 3.0.13
I checked the source code of the msgget in linux3.8.
When the thread did not get the msg lock, it is not release cpu and sleep some time.
Instead it will call ipc_lock_by_ptr(&msq->q_perm); frequently.
So the cpu usage will be very high, and the collision rate will grow rapidly when the threads increas.

POSIX timer runs at twice the expected frequency

In order to create a high accuracy timer, I have written a module that instantiates a POSIX timer using the timer_create() function. It uses CLOCK_REALTIME as its clock kind, SIGEV_SIGNAL as notification method and SIGRTMIN as the signal number. Its signal handler does nothing but a sem_post(). The timer is started using timer_settime(), with any number of milliseconds as the timer interval.
The user of the module can wait for a timer-tick; the wait functionality is essentially implemented by a sem_wait(). My single-threaded test application creates the timer and starts it with the desired interval of i milliseconds. Then it loops, waiting x times for the timer to trigger. It uses gettimeofday() to time all this.
The expectation is that the total time for the loop would be x*i milliseconds. In stead, it only takes exactly 0.5*x*i milliseconds. I have tried several combinations of x and i, with the total execution time of the test ranging from a few seconds to tens of seconds. The result is consistently that the timer runs at twice the expected/desired frequency.
This runs on CentOS 5.5 Linux 2.6.18-194.el5 #1 SMP Fri Apr 2 14:58:14 EDT 2010 x86_64 x86_64 x86_64 GNU/Linux with gcc 4.1.2
I have uploaded a stripped down version of the code which includes a script to compile the code and a test to reproduce the issue.
The code of the timer class itself is as follows:
/* PosixTimer: simple class for high-accuracy timer functionality */
/* Interface */
#include "PosixTimer.h"
/* Implementation */
#include <pthread.h>
#include <time.h>
#include <signal.h>
#include <semaphore.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#define TIMER_SIGNAL SIGRTMIN
#define ALLOCATE_AND_CLEAR(pVar) \
pVar = malloc(sizeof(*pVar)); \
memset(pVar, 0, sizeof(*pVar))
#define FREE_AND_NULL(pVar) \
free(pVar); \
pVar = NULL
struct PosixTimerImpl {
timer_t timerId;
struct itimerspec timeOut;
sem_t semaphore;
};
static void
PosixTimer_sigHandler(
int sig,
siginfo_t *info,
void *ptr)
{
PosixTimer *self = (PosixTimer *)(info->si_value.sival_ptr);
if (NULL != self) {
sem_post(&self->semaphore);
}
}
static void
PosixTimer_setTimeoutValue(
PosixTimer *self,
unsigned int msecInterval)
{
if (NULL != self) {
self->timeOut.it_value.tv_sec = msecInterval / 1000;
self->timeOut.it_value.tv_nsec = (msecInterval % 1000) * 1000000;
self->timeOut.it_interval.tv_sec = msecInterval / 1000;
self->timeOut.it_interval.tv_nsec = (msecInterval % 1000) * 1000000;
}
}
/* Public methods */
/**
* Constructor for the PosixTimer class. Ticks happen every <interval> and are not queued
*/
PosixTimer *
PosixTimer_new(
unsigned int msecInterval)
{
PosixTimer *self = NULL;
int clockId = CLOCK_REALTIME;
struct sigevent evp;
int status;
/* Construction */
ALLOCATE_AND_CLEAR(self);
/* Initialization */
PosixTimer_setTimeoutValue(self, msecInterval);
evp.sigev_signo = TIMER_SIGNAL;
evp.sigev_notify = SIGEV_SIGNAL;
evp.sigev_value.sival_ptr = self;
status = timer_create(clockId, &evp, &self->timerId);
if (0 == status) {
sem_init(&self->semaphore, 0, 0);
} else {
printf("Error creating timer, retVal = %d\n", status);
FREE_AND_NULL(self);
}
return self;
}
/**
* Destructor
*/
void
PosixTimer_delete(
PosixTimer *self)
{
int status;
sem_post(&self->semaphore);
status = sem_destroy(&self->semaphore);
if (0 != status) {
printf("sem_destroy failed\n");
}
status = timer_delete(self->timerId);
if (0 != status) {
printf("timer_delete failed\n");
}
FREE_AND_NULL(self);
}
/**
* Kick off timer
*/
void
PosixTimer_start(
PosixTimer *self)
{
#define FLAG_RELATIVE 0
int status;
struct sigaction sa;
sigemptyset(&sa.sa_mask);
sigaddset(&sa.sa_mask, TIMER_SIGNAL);
sa.sa_flags = SA_SIGINFO;
sa.sa_sigaction = PosixTimer_sigHandler;
status = sigaction(TIMER_SIGNAL, &sa, NULL);
if (0 != status) {
printf("sigaction failed\n");
} else {
status = timer_settime(self->timerId, FLAG_RELATIVE,
&self->timeOut, NULL);
if (0 != status) {
printf("timer_settime failed\n");
}
}
}
/**
* Wait for next timer tick
*/
void
PosixTimer_wait(
PosixTimer *self)
{
/* Just wait for the semaphore */
sem_wait(&self->semaphore);
}
The test used to show the issue:
/* Simple test app to test PosixTimer */
#include "PosixTimer.h"
#include <sys/time.h>
#include <stdio.h>
int main(
int argc,
const char ** argv)
{
#define USEC_PER_MSEC (1000)
#define NSEC_PER_MSEC (1000000)
#define MSEC_PER_SEC (1000)
PosixTimer *timer1 = NULL;
struct timeval before, after;
double dElapsedMsecs;
int elapsedMsecs;
int iCount1;
printf("Running PosixTimer tests\n");
#define DURATION_MSEC (10000)
#define INTERVAL_MSEC_TEST1 (5)
#define ACCURACY_MSEC_TEST1 (100)
timer1 = PosixTimer_new(INTERVAL_MSEC_TEST1);
iCount1 = DURATION_MSEC/INTERVAL_MSEC_TEST1;
printf("Running test: %d milliseconds in %d cycles\n", DURATION_MSEC, iCount1);
gettimeofday(&before, NULL);
PosixTimer_start(timer1);
while (0 < iCount1) {
PosixTimer_wait(timer1);
//printf(".");
iCount1--;
}
gettimeofday(&after, NULL);
//printf("\n");
dElapsedMsecs = (after.tv_sec - before.tv_sec) * MSEC_PER_SEC;
dElapsedMsecs += (after.tv_usec - before.tv_usec) / USEC_PER_MSEC;
elapsedMsecs = dElapsedMsecs+0.5;
if ((ACCURACY_MSEC_TEST1 > (elapsedMsecs - DURATION_MSEC)) &&
(ACCURACY_MSEC_TEST1 > (DURATION_MSEC - elapsedMsecs))) {
printf("success");
} else {
printf("failure");
}
printf(" (expected result in range (%d -- %d), got %d)\n",
DURATION_MSEC - ACCURACY_MSEC_TEST1,
DURATION_MSEC + ACCURACY_MSEC_TEST1,
elapsedMsecs);
return 0;
}
The result is
-bash-3.2$ ./DesignBasedTest
Running PosixTimer tests
Running test: 10000 milliseconds in 2000 cycles
failure (expected result in range (9900 -- 10100), got 5000)
The root cause of this problem was that sem_wait() was woken up twice: once because it was interrupted by the signal, and once because it really needed to wake up due to the semaphore being released by sem_post(). Checking for the return value of sem_wait() and errno = EINTR resolved the issue:
/**
* Wait for next timer tick
*/
int
PosixTimer_wait(
PosixTimer *self)
{
int result;
/* Just wait for the semaphore */
do {
result = (0 == sem_wait(&self->semaphore));
if (!result) {
result = errno;
}
} while (EINTR == result);
return result;
}
Thanks to Basile Starynkevitch for suggesting the use of strace, which revealed the cause of the problem.

How to use nanosleep() in C? What are `tim.tv_sec` and `tim.tv_nsec`?

What is the use of tim.tv_sec and tim.tv_nsec in the following?
How can I sleep execution for 500000 microseconds?
#include <stdio.h>
#include <time.h>
int main()
{
struct timespec tim, tim2;
tim.tv_sec = 1;
tim.tv_nsec = 500;
if(nanosleep(&tim , &tim2) < 0 )
{
printf("Nano sleep system call failed \n");
return -1;
}
printf("Nano sleep successfull \n");
return 0;
}
Half a second is 500,000,000 nanoseconds, so your code should read:
tim.tv_sec = 0;
tim.tv_nsec = 500000000L;
As things stand, you code is sleeping for 1.0000005s (1s + 500ns).
tv_nsec is the sleep time in nanoseconds. 500000us = 500000000ns, so you want:
nanosleep((const struct timespec[]){{0, 500000000L}}, NULL);
500000 microseconds are 500000000 nanoseconds. You only wait for 500 ns = 0.5 µs.
This worked for me ....
#include <stdio.h>
#include <time.h> /* Needed for struct timespec */
int mssleep(long miliseconds)
{
struct timespec rem;
struct timespec req= {
(int)(miliseconds / 1000), /* secs (Must be Non-Negative) */
(miliseconds % 1000) * 1000000 /* nano (Must be in range of 0 to 999999999) */
};
return nanosleep(&req , &rem);
}
int main()
{
int ret = mssleep(2500);
printf("sleep result %d\n",ret);
return 0;
}
I usually use some #define and constants to make the calculation easy:
#define NANO_SECOND_MULTIPLIER 1000000 // 1 millisecond = 1,000,000 Nanoseconds
const long INTERVAL_MS = 500 * NANO_SECOND_MULTIPLIER;
Hence my code would look like this:
timespec sleepValue = {0};
sleepValue.tv_nsec = INTERVAL_MS;
nanosleep(&sleepValue, NULL);
More correct variant:
{
struct timespec delta = {5 /*secs*/, 135 /*nanosecs*/};
while (nanosleep(&delta, &delta));
}
POSIX 7
First find the function: http://pubs.opengroup.org/onlinepubs/9699919799/functions/nanosleep.html
That contains a link to a time.h, which as a header should be where structs are defined:
The header shall declare the timespec structure, which shall > include at least the following members:
time_t tv_sec Seconds.
long tv_nsec Nanoseconds.
man 2 nanosleep
Pseudo-official glibc docs which you should always check for syscalls:
struct timespec {
time_t tv_sec; /* seconds */
long tv_nsec; /* nanoseconds */
};

Resources