I wrote a program that verifies that the getrusage () RUSAGE_CHILDREN flag retrieves certain information only about children for whom a wait call was made,
How can I modify the program so that it is more efficient in terms of handling errors? There is also danger of a 2038 bug occurring on this line: printf (" before: user CPU seconds =% ld \ n ", (long) usg.ru_utime.tv_sec);?
#include <stdio.h>
#include <unistd.h>
#include <time.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <sys/time.h>
#include <sys/resource.h>
#include "print_rlimit.h"
int
main (void)
{
switch (fork ()) {
case -1:
perror ("fork()");
return 1;
case 0: { // child
time_t start, now;
alarm (10);
start = time (NULL);
while (1) {
now = time (NULL);
if ((now - start) > 5)
break;
}
_exit (0);
}
default: { // parent
int ret;
struct rusage usg;
pid_t pid;
sleep (2);
ret = getrusage (RUSAGE_CHILDREN, &usg);
if (ret == -1) {
perror ("getrusage()");
return 1;
}
printf ("before: user CPU seconds = %ld\n", (long)usg.ru_utime.tv_sec);
pid = wait (NULL);
if (pid == (pid_t)-1) {
perror ("wait()");
return 1;
}
ret = getrusage (RUSAGE_CHILDREN, &usg);
if (ret == -1) {
perror ("getrusage()");
return 1;
}
printf ("after: user CPU seconds = %ld\n", (long)usg.ru_utime.tv_sec);
break;
}
}
return 0;
}
header:
#ifndef _PRINT_RLIMITS
#define _PRINT_RLIMITS
void print_rlimit (int resource);
#endif
2038 bug
time_t is not specified as a long, so do not cast to long which may only be 32-bit and narrow the time value.
// printf (" before: user CPU seconds =% ld \ n ", (long) usg.ru_utime.tv_sec);
time_t is not certainly even an integer count of seconds. A reasonable alternative is to cast to the widest integer type. This will accommodate an implementation that uses a wider integer type for time_t to handle the 2038 bug.
#include <inttypes.h>
printf (" before: user CPU seconds = %jd\n", (intmax_t) usg.ru_utime.tv_sec);
time_t quanta assumed
Rather than assume time_t is an integer count of seconds, use standard double difftime(time_t time1, time_t time0) which returns a difference in seconds regardless of time_t encoding.
// (now - start) > 5
difftime(now, start) > 5.0
Burning CPU
while (1) { now = time (NULL); ... burns lots of CPU ticks while waiting. A more advanced idea would sleep for some time before trying again.
while (1) {
now = time (NULL);
double diff = difftime(now, start);
if (diff > 5) {
break;
}
diff *= 1000000; // microseconds
usleep(diff/2); // Sleep for half of that
}
A more advanced approach would use other system alarm routines.
Missing error handling
Below is an infinite loop should time() return -1 if the calendar time is not available. To fix, test for -1.
start = time (NULL);
while (1) {
now = time (NULL);
if ((now - start) > 5)
break;
}
Minor: cast needed?
On many implementations pid_t is a signed integer type so the cast is superfluous.
// pid == (pid_t)-1
pid == -1
Related
I want to implement a robust timer for an embedded linux application. The goal of this is to control over functions's time of execution and if they take too long, generate an interruption to stop the function's loop.
I searched all over the internet and the firs proposition was to use clock() function.
The solution with clock() function could be :
#include <time.h>
int func(void){
//the starting time of the function
clock_t initial_time;
clock_t elapsed_time;
initial_time = clock()*1000/CLOCKS_PER_SEC;
do{
//some stuff
elapsed_time = clock()*1000/CLOCKS_PER_SEC - initial_time;
}while(elapsed_time < timeout_ms);
printf("time to get command : %ld\n", elapsed_time);
//send an error if a timeout was reached
if(elapsed_time >= timeout_ms){
return -1;
}
else{
return 1;
}
}
But this is not really robust as clock() could cause an overflow in between the function calculations and so, elapsed time will go negative and it will never get out of the loop. This was corrected in the edit section bellow
Second solution was to use the linux kernel timers as following :
#include <linux/module.h> /* Needed by all modules */
#include <linux/kernel.h> /* Needed for KERN_INFO */
#include <linux/init.h> /* Needed for the macros */
#include <linux/timer.h>
int g_time_interval = 10000;
struct timer_list g_timer;
void timer_handler (unsigned long data)
{
// do your timer stuff here
}
int init_timer(void)
{
setup_timer(&g_timer, timer_handler, 0);
mod_timer( &g_timer, jiffies + msecs_to_jiffies(g_time_interval));
return 0;
}
void close_timer(void)
{
del_timer(&g_timer);
}
This option seems ok, but I did some research and jiffies (the number of ticks since startup) could overflow too and I don't know if this could affect my usage of this timer. This was corrected in the edit section bellow
Finally, the last option I found was to use timer_create with a signal. As far as I know, this does not has the overflow issue if used with CLOCK_MONOTONIC :
#include <stdlib.h>
#include <unistd.h>
#include <stdio.h>
#include <signal.h>
#include <time.h>
#include <stdbool.h>
#define SIG SIG_RTMIN
int init_timer((void *) handler(int, siginfo_t, void*)){
// Establish handler for timer signal
sa.sa_flags = SA_SIGINFO;
sa.sa_sigaction = handler;
sigemptyset(&sa.sa_mask);
if (sigaction(SIG, &sa, NULL) == -1)
printf("Error initializing timer\n");
// Block timer signal temporarily
printf("Blocking signal %d\n", SIG);
sigemptyset(&mask);
sigaddset(&mask, SIG);
// Create the timer
sev.sigev_notify = SIGEV_SIGNAL;
sev.sigev_signo = SIG;
sev.sigev_value.sival_ptr = &timerid;
}
static void handler(int sig, siginfo_t *si, void *uc)
{
//put a flag to 1 for example
signal(sig, SIG_IGN);
}
//Much other stuff ...
But google told me that we can only set one handler per signal and I dont know if the other processus that are in my linux board use SIG_RTMIN. And as I do not want to break everything by redefining its handler, it is not a convinient solution.
Am I getting something wrong here?
Is there a way to define a timer in linux without having this issues?
Thank you very much to all :)
Edit
Overflow will not cause an issue so option 1 and 2 are valid. Now which one would be the most robust?
Here is the explanation on why I was wrong about overflow. Giving the case where we want to calculate elapsed_time and the maximum clock value is MAX. We have as above :
elapsed_time = clock()*1000/CLOCKS_PER_SEC - initial_time;
Lets rename clock()*1000/CLOCKS_PER_SEC as x. If there is overflow, then theorically theoric_x > MAX, but as there was overflow, x = theoric_x - MAX (hope is clear ':D). So :
elapsed_time = (theoric_x - MAX) - initial_time;
Which can be written as :
elapsed_time = (theoric_x - initial_time) - MAX;
And this is equivalent to : elapsed_time = (theoric_x - initial_time) because substracting the maximum value is like getting back to the same value (it works like modulo). This is ok while the theoric_x is below initial_time + MAX, if we get over, the elapsed time will reset.
I hope it was clear enough.
But google told me that we can only set one handler per signal and I dont know if the other processus that are in my linux board use SIG_RTMIN.
No, it is one handler per signal per process.
That is, having a signal handler for SIGRTMIN in your own program will not interfere with SIGRTMIN handlers of any other processes. Similarly, creating a timer will not affect any other processes' timers either. All you need to worry about, is your own process.
(Technically, there are only a limited number of timers available, so you don't want to create hundreds of them in a single process.)
If you have only one thread in the process, consider the following timeout scheme:
// SPDX-License-Identifier: CC0-1.0
#define _POSIX_C_SOURCE 200809L
#include <stdlib.h>
#include <signal.h>
#include <string.h>
#include <time.h>
#include <errno.h>
#include <stdio.h>
#define TIMEOUT_SIGNAL (SIGRTMIN+0)
#define TIMEOUT_REPEAT_NS 1000000 /* Repeat every millisecond until canceled */
static volatile sig_atomic_t timeout_elapsed; /* Nonzero if timeout has elapsed */
static timer_t timeout_timer;
static void timeout_handler(int signum)
{
(void)signum; /* Silences warning about unused parameter; generates no code. */
timeout_elapsed = 1;
}
static int timeout_init(void)
{
struct sigaction act;
struct sigevent evt;
memset(&act, 0, sizeof act);
sigemptyset(&act.sa_mask);
act.sa_handler = timeout_handler;
act.sa_flags = 0;
if (sigaction(TIMEOUT_SIGNAL, &act, NULL) == -1)
return errno;
memset(&evt, 0, sizeof evt);
evt.sigev_notify = SIGEV_SIGNAL;
evt.sigev_signo = TIMEOUT_SIGNAL;
evt.sigev_value.sival_ptr = (void *)0;
if (timer_create(CLOCK_BOOTTIME, &evt, &timeout_timer) == -1)
return errno;
timeout_elapsed = 0;
return 0;
}
static void timeout_cancel(void)
{
struct itimerspec zero;
zero.it_value.tv_sec = 0;
zero.it_value.tv_nsec = 0;
zero.it_interval.tv_sec = 0;
zero.it_interval.tv_nsec = 0;
timer_settime(timeout_timer, 0, &zero, NULL);
}
static void timeout_set(double seconds)
{
struct itimerspec when;
sigset_t mask;
/* Block the timeout signal for now. */
sigemptyset(&mask);
sigaddset(&mask, TIMEOUT_SIGNAL);
sigprocmask(SIG_BLOCK, &mask, NULL);
/* Make sure any previous timeouts have been canceled. */
timeout_cancel();
/* Calculate the next (relative) timeout. */
if (seconds >= 0.000000001) {
long sec = (long)seconds;
long nsec = (long)(1000000000.0*(seconds - (double)sec));
if (nsec < 0)
nsec = 0;
if (nsec > 999999999) {
nsec = 0;
sec++;
}
when.it_value.tv_sec = sec;
when.it_value.tv_nsec = nsec;
} else {
when.it_value.tv_sec = 0;
when.it_value.tv_nsec = 1;
}
/* Set it to repeat, so that it is not easily missed. */
when.it_interval.tv_sec = 0;
when.it_interval.tv_nsec = TIMEOUT_REPEAT_NS;
/* Update the timer. */
timer_settime(timeout_timer, 0, &when, NULL);
/* Clear the flag, and unblock the signal. */
timeout_elapsed = 0;
sigprocmask(SIG_UNBLOCK, &mask, NULL);
}
int main(void)
{
char *line_ptr = NULL;
size_t line_max = 0;
ssize_t line_len;
if (timeout_init()) {
fprintf(stderr, "Cannot set up timeouts: %s.\n", strerror(errno));
return EXIT_FAILURE;
}
timeout_set(5.0);
printf("Please type input lines. This will timeout in five seconds.\n");
fflush(stdout);
while (!timeout_elapsed) {
line_len = getline(&line_ptr, &line_max, stdin);
if (line_len > 0) {
/* Remove trailing newlines */
line_ptr[strcspn(line_ptr, "\r\n")] = '\0';
printf("Read %zd bytes: \"%s\".\n", line_len, line_ptr);
fflush(stdout);
}
}
timeout_cancel();
free(line_ptr);
line_ptr = NULL;
line_max = 0;
printf("Done.\n");
return EXIT_SUCCESS;
}
Compile using gcc -Wall -Wextra -O2 example1.c -lrt -o example1 and run ./example1.
For a multithreaded process, the signal must be delivered to a specific thread, almost always the thread that sets the timeout in the first place. Here, I recommend a different approach: use a helper thread, a list or an array or a binary min-heap of CLOCK_REALTIME absolute times of the respective timeouts, waiting in pthread_cond_timedwait() for the next soonest timeout, or for a signal on the condition variable indicating the timeout list/array/heap has been updated.
POSIX defines clock_gettime. Linux also has extensions for it.
The functions clock_gettime() and clock_settime() retrieve and set the time of the specified clock clockid.
You can simply do the following:
#include <time.h>
struct timespec start, end;
clock_gettime(CLOCK_MONOTONIC, &start);
// Your code here...
clock_gettime(CLOCK_MONOTONIC, &end);
Then end.tv_nsec - start.tv_nsec will provide you nanoseconds with the resolution as specified by clock_getres. Sometimes this is just microseconds or even mere milliseconds. Make sure to check the value and adjust accordingly.
struct timespec res;
clock_getres(CLOCK_MONOTONIC, &res);
switch (res.tv_nsec) {
case 1000: // microseconds
case 10000000: // milliseconds
// cases ...
}
EDIT:
Rereading the original person's post I realize that this doesn't quite answer it. Still, I am leaving it here as it might be useful if applied to the problem. You are free to downvote this if you like as to allow actual answers to rise to the top.
I'm trying to write a debouncer that will only return a valid argument (>0) if it has been debounced (-1 four bouncing).
I've come up with this so far but it always returns -1, why is that I'm wondering:
#include <stdio.h>
#include <time.h>
#include <unistd.h>
#define BOUNCETIME 500000000 //(500ms)
#define SetTime(x) clock_gettime(CLOCK_REALTIME, (x)); // set time
static struct timespec tset;
struct timespec tnow;
int DeBounce(unsigned int arg)
{
static int val = -1;
long long nsec = 0;
if (val < 0) {
val = arg;
SetTime(&tset);
return arg;
} else {
SetTime(&tnow);
if (tnow.tv_nsec < tset.tv_nsec)
nsec = tnow.tv_nsec + 1000000000;
else
nsec = tnow.tv_nsec;
if (tnow.tv_nsec - tset.tv_nsec > BOUNCETIME) {
printf("arg okay\n");
val = -1;
return arg;
}
else
printf("bounce, ignore!\n");
return -1;
}
}
int main (void)
{
printf("#1 %d\n",DeBounce(0));
usleep(1);
printf("#2 %d\n",DeBounce(1));
usleep(200);
printf("#3 %d\n",DeBounce(1));
sleep(1);
printf("#4 %d\n",DeBounce(1));
}
the ouput I get is:
$ ./debounce
#1 0
bounce, ignore!
#2 -1
bounce, ignore!
#3 -1
bounce, ignore!
#4 -1
$
usleep(600); is 600 microseconds. But your debounce period is 500 milliseconds.
Furthermore tnow.tv_nsec - tset.tv_nsec is not correct as tv_nsec is not the full time value but only the number of nanoseconds past the second. The correct way to calculate elapsed time in nanoseconds is something like this:
(tnow.tv_sec * 1.0e-9 + tnow.tv_nsec) - (tset.tv_sec * 1.0e-9 + tset.tv_nsec)
I am writing the following C code to get the time taken to perform a simple operation using getitimer and setitimer.
#include <sys/time.h>
#include <stdlib.h>
#include <stdio.h>
#include <limits.h>
#include <string.h>
#define INTERVAL 1 /* number of milliseconds to go off */
int main() {
double sum = 0;
struct itimerval initial, updated;
initial.it_value.tv_sec = INTERVAL;
initial.it_value.tv_usec = 999999;
initial.it_interval = initial.it_value;
memcpy(&(initial.it_interval), &(initial.it_value), sizeof( initial.it_value ));
printf("%ld\n", initial.it_value.tv_usec);
if (setitimer(ITIMER_VIRTUAL, &initial, NULL) == -1) {
perror("error calling setitimer()");
exit(1);
}
for (unsigned int i; i < 100; i++)
sum += 1./i;
if (getitimer(ITIMER_REAL, &updated) == -1) {
perror("error calling getitimer()");
exit(1);
}
printf("Time started = %ld\n; Time ended = %ld\n: Time taken = %ld\n",
initial.it_value.tv_usec, updated.it_value.tv_usec,
initial.it_value.tv_usec - updated.it_value.tv_usec);
return 0;
}
I have compiled with:
$ gcc -o timer -std=c99 -Wall -pedantic getitimer.c -lrt -03
However, my answer is always 999999 (I have raised and decreased the 100):
./timer
999999
Time started = 999999
; Time endd = 0
: Time taken = 999999
What is my error? Also, I wanted to ask what is the highest precision I can get using a progrma like this?
Thanks very much!
the main thing I see is the division operations are using integer division. So:
initial.it_value.tv_sec = INTERVAL/1000000;
places 0 in tv.sec
initial.it_value.tv_usec = (INTERVAL/1000000) * 1000000;
places 0 in tv_usec
initial.it_interval = initial.it_value;
in general, when assigning a multi field struct, use memcpy() rather than a direct assignment. (direct assignment will work for initialization but not for assignment.)
So, the posted code sets the 'interval' to 0
so of course, the resulting values are 0
this is a key statement from the man page for setitimer() and getitimer()
"Timers decrement from it_value to zero, generate a signal, and reset to
it_interval. A timer which is set to zero (it_value is zero or the timer expires and it_interval is zero) stops."
Suggest: following edited
initial.it_value.tv_set = INTERVAL;
initial.it_value.tv_usec = 0;
memcpy( &(initial.it_interval), &(initial.it_value), sizeof( initial.it_value ) );
...
the main problem with the latest code is that the call to getitimer() is referencing a different timer than the call to setitime()
However, the following code makes it simple to use
#include <sys/time.h>
#include <stdlib.h>
#include <stdio.h>
//#include <limits.h>
//#include <string.h>
// do not place comments on same line as #define statement
// always surround numeric values with parens to avoid 'text replacement' errors
// wrong comment: number of milliseconds to go off
// number of seconds in interval
#define INTERVAL (1)
// number of usec in interval
#define MICRO_INTERVAL (999999)
int main() {
//double sum = 0;
struct itimerval initial;
struct itimerval updated;
initial.it_value.tv_sec = INTERVAL;
initial.it_value.tv_usec = MICRO_INTERVAL;
initial.it_interval.tv_sec = INTERVAL;
initial.it_interval.tv_usec = MICRO_INTERVAL;
// remove this line: initial.it_interval = initial.it_value;
// remove this line: memcpy(&(initial.it_interval), &(initial.it_value), sizeof( initial.it_value ));
//printf("%ld\n", initial.it_value.tv_usec);
printf( "Time value: %ld.%ld\n", initial.it_value.tv_sec, initial.it_value.tv_usec );
printf( "Time interval: %ld.%ld\n", initial.it_interval.tv_sec, initial.it_interval.tv_usec );
if (setitimer(ITIMER_VIRTUAL, &initial, &updated) == -1)
{
perror("error calling setitimer()");
exit(1);
}
//for (unsigned int i=0; i < 10; i++) // must initialize the 'i' variable
// sum += 1./i;
// the 'which' parameter should be 'ITIMER_VIRTUAL'
// as that is what was started in the call to setitimer()
//if (getitimer(ITIMER_REAL, &updated) == -1)
//{
// perror("error calling getitimer()");
// exit(1);
//}
if (setitimer(ITIMER_VIRTUAL, &initial, &updated) == -1)
{
perror("error calling setitimer()");
exit(1);
}
printf( "end interval counter: %ld.%ld\n", updated.it_interval.tv_sec, updated.it_interval.tv_usec );
printf( "end value counter: %ld.%ld\n", updated.it_value.tv_sec, updated.it_value.tv_usec );
//printf("Time started = %ld\n; Time ended = %ld\n: Time taken = %ld\n",
// initial.it_value.tv_usec, updated.it_value.tv_usec,
// initial.it_value.tv_usec - updated.it_value.tv_usec);
return 0;
}
// accuracy is +/-1 microsecond, not millisecond
The resulting output, even with nothing being done between the two calls to setitimer() is:
Time value: 1.999999
Time interval: 1.999999
end interval counter: 1.999999
end value counter: 2.3999
setitimer and getitimer are not the right functions to use for profiling. They relate to interval timers which are timers that generate an alarm (signal more accurately) when the timer expires.
The main options for achieving what you want are the clock or clock_gettime APIs.
Could somebody please explain how to make a countdown timer using clock_gettime, under Linux. I know you can use the clock() function to get cpu time, and multiply it by CLOCKS_PER_SEC to get actual time, but I'm told the clock() function is not well suited for this.
So far I have attempted this (a billion is to pause for one second)
#include <stdio.h>
#include <time.h>
#define BILLION 1000000000
int main()
{
struct timespec rawtime;
clock_gettime(CLOCK_MONOTONIC_RAW, &rawtime);
unsigned long int current = ( rawtime.tv_sec + rawtime.tv_nsec );
unsigned long int end = (( rawtime.tv_sec + rawtime.tv_nsec ) + BILLION );
while ( current < end )
{
clock_gettime(CLOCK_MONOTONIC_RAW, &rawtime);
current = ( rawtime.tv_sec + rawtime.tv_nsec );
}
return 0;
}
I know this wouldn't be very useful on its own, but once I've found out how to time correctly I can use this in my projects. I know that sleep() can be used for this purpose, but I want to code the timer myself so that I can better integrate it in my projects - such as the possibility of it returning the time left, as opposed to pausing the whole program.
Please, do not do that. You're burning CPU power for nothing in a busy loop.
Why not use the nanosleep() function instead? It's perfectly suited to the use case you outlined. Or, if you want an easier interface, perhaps something like
#define _POSIX_C_SOURCE 200809L
#include <time.h>
#include <errno.h>
/* Sleep for the specified number of seconds,
* and return the time left over.
*/
double dsleep(const double seconds)
{
struct timespec req, rem;
/* No sleep? */
if (seconds <= 0.0)
return 0.0;
/* Convert to seconds and nanoseconds. */
req.tv_sec = (time_t)seconds;
req.tv_nsec = (long)((seconds - (double)req.tv_sec) * 1000000000.0);
/* Take care of any rounding errors. */
if (req.tv_nsec < 0L)
req.tv_nsec = 0L;
else
if (req.tv_nsec > 999999999L)
req.tv_nsec = 999999999L;
/* Do the nanosleep. */
if (nanosleep(&req, &rem) != -1)
return 0.0;
/* Error? */
if (errno != EINTR)
return 0.0;
/* Return remainder. */
return (double)rem.tv_sec + (double)rem.tv_nsec / 1000000000.0;
}
The difference is that using this one the CPU is free to do something else, rather than spin like a crazed squirrel on speed.
This is not an answer, but an example of how to use signals and a POSIX timer to implement a timeout timer; intended as a response to the OP's followup question in a comment to the accepted answer.
#define _POSIX_C_SOURCE 200809L
#include <stdlib.h>
#include <signal.h>
#include <time.h>
#include <errno.h>
#include <string.h>
#include <stdio.h>
/* Timeout timer.
*/
static timer_t timeout_timer;
static volatile sig_atomic_t timeout_state = 0;
static volatile sig_atomic_t timeout_armed = 2;
static const int timeout_signo = SIGALRM;
#define TIMEDOUT() (timeout_state != 0)
/* Timeout signal handler.
*/
static void timeout_handler(int signo, siginfo_t *info, void *context __attribute__((unused)))
{
if (timeout_armed == 1)
if (signo == timeout_signo && info && info->si_code == SI_TIMER)
timeout_state = ~0;
}
/* Unset timeout.
* Returns nonzero if timeout had expired, zero otherwise.
*/
static int timeout_unset(void)
{
struct itimerspec t;
const int retval = timeout_state;
/* Not armed? */
if (timeout_armed != 1)
return retval;
/* Disarm. */
t.it_value.tv_sec = 0;
t.it_value.tv_nsec = 0;
t.it_interval.tv_sec = 0;
t.it_interval.tv_nsec = 0;
timer_settime(timeout_timer, 0, &t, NULL);
return retval;
}
/* Set timeout (in wall clock seconds).
* Cancels any pending timeouts.
*/
static int timeout_set(const double seconds)
{
struct itimerspec t;
/* Uninitialized yet? */
if (timeout_armed == 2) {
struct sigaction act;
struct sigevent evt;
/* Use timeout_handler() for timeout_signo signal. */
sigemptyset(&act.sa_mask);
act.sa_sigaction = timeout_handler;
act.sa_flags = SA_SIGINFO;
if (sigaction(timeout_signo, &act, NULL) == -1)
return errno;
/* Create a monotonic timer, delivering timeout_signo signal. */
evt.sigev_value.sival_ptr = NULL;
evt.sigev_signo = timeout_signo;
evt.sigev_notify = SIGEV_SIGNAL;
if (timer_create(CLOCK_MONOTONIC, &evt, &timeout_timer) == -1)
return errno;
/* Timeout is initialzied but unarmed. */
timeout_armed = 0;
}
/* Disarm timer, if armed. */
if (timeout_armed == 1) {
/* Set zero timeout, disarming the timer. */
t.it_value.tv_sec = 0;
t.it_value.tv_nsec = 0;
t.it_interval.tv_sec = 0;
t.it_interval.tv_nsec = 0;
if (timer_settime(timeout_timer, 0, &t, NULL) == -1)
return errno;
timeout_armed = 0;
}
/* Clear timeout state. It should be safe (no pending signals). */
timeout_state = 0;
/* Invalid timeout? */
if (seconds <= 0.0)
return errno = EINVAL;
/* Set new timeout. Check for underflow/overflow. */
t.it_value.tv_sec = (time_t)seconds;
t.it_value.tv_nsec = (long)((seconds - (double)t.it_value.tv_sec) * 1000000000.0);
if (t.it_value.tv_nsec < 0L)
t.it_value.tv_nsec = 0L;
else
if (t.it_value.tv_nsec > 999999999L)
t.it_value.tv_nsec = 999999999L;
/* Set it repeat once every millisecond, just in case the initial
* interrupt is missed. */
t.it_interval.tv_sec = 0;
t.it_interval.tv_nsec = 1000000L;
if (timer_settime(timeout_timer, 0, &t, NULL) == -1)
return errno;
timeout_armed = 1;
return 0;
}
int main(void)
{
char *line = NULL;
size_t size = 0;
ssize_t len;
fprintf(stderr, "Please supply input. The program will exit automatically if\n");
fprintf(stderr, "it takes more than five seconds for the next line to arrive.\n");
fflush(stderr);
while (1) {
if (timeout_set(5.0)) {
const char *const errmsg = strerror(errno);
fprintf(stderr, "Cannot set timeout: %s.\n", errmsg);
return 1;
}
len = getline(&line, &size, stdin);
if (len == (ssize_t)-1)
break;
if (len < (ssize_t)1) {
/* This should never occur (except for -1, of course). */
errno = EIO;
break;
}
/* We do not want *output* to be interrupted,
* so we cancel the timeout. */
timeout_unset();
if (fwrite(line, (size_t)len, 1, stdout) != 1) {
fprintf(stderr, "Error writing to standard output.\n");
fflush(stderr);
return 1;
}
fflush(stdout);
/* Next line. */
}
/* Remember to cancel the timeout. Also check it. */
if (timeout_unset())
fprintf(stderr, "Timed out.\n");
else
if (ferror(stdin) || !feof(stdin))
fprintf(stderr, "Error reading standard input.\n");
else
fprintf(stderr, "End of input.\n");
fflush(stderr);
/* Free line buffer. */
free(line);
line = NULL;
size = 0;
/* Done. */
return 0;
}
If you save the above as timer.c, you can compile it using e.g.
gcc -W -Wall -O3 -std=c99 -pedantic timer.c -lrt -o timer
and run it using ./timer.
If you read the code above carefully, you'll see that it is actually a periodic timer signal (at millisecond intervals), with a variable delay before the first signal. That is just a technique I like to use to make sure I don't miss the signal. (The signal repeats until the timeout is unset.)
Note that although you can do computation in an signal handler, you should only use functions that are async-signal-safe; see man 7 signal. Also, only the sig_atomic_t type is atomic wrt. normal single-threaded code and a signal handler. So, it is better to just use the signal as an indicator, and do the actual code in your own program.
If you wanted to e.g. update monster coordinates in a signal handler, it is possible but a bit tricky. I'd use three arrays containing the monster information, and use GCC __sync_bool_compare_and_swap() to update the array pointers -- very much the same technique as triple-buffering in graphics.
If you need more than one concurrent timeout, you could use multiple timers (there is a number of them available), but the best option is to define timeout slots. (You can use generation counters to detect "forgotten" timeouts, and so on.) Whenever a new timeout is set or unset, you update the timeout to reflect the next timeout that expires. It's a bit more code, but really a straightforward extension of the above.
I want to create a timer in our C program so that it can print the variable after every 1 second.
Can anybody help me in doing this?
Don't use busy waiting, because you've got 100% CPU utilization.
You must use system function which turns process into sleeping mode for example select():
#include <stdio.h>
#include <sys/select.h>
void your_callback()
{
printf("%s\n", __FUNCTION__);
}
int main()
{
struct timeval t;
while (1) {
t.tv_sec = 1;
t.tv_usec = 0;
select(0, NULL, NULL, NULL, &t);
your_callback();
}
return 0;
}
If all you are interested in doing is printing the value of a variable at a one second interval, using time(2) or clock(3) as suggested in the other answers might suffice. In general, I would not recommend these busy-waiting techniques.
If your program is more complex, I suggest you investigate using the alarm(2) or settimer(2) function to asynchronously deliver a signal to your application at a one second interval.
The following example uses select(2) to block indefinitely in order to minimize CPU usage associated with busy-waiting techniques. The blocking select() call is interrupted and returns when a signal is caught. In the case of the SIGALRM signal, the print_variable flag is set and the value of variable is printed.
Example 1: using alarm()
#include <signal.h>
#include <stdio.h>
#include <sys/select.h>
#include <unistd.h>
volatile unsigned int variable = 0;
volatile unsigned int print_variable = 0;
void alarm_handler(int signum)
{
variable++;
print_variable = 1;
alarm(1);
}
int main()
{
signal(SIGALRM, alarm_handler);
alarm(1);
for (;;)
{
select(0, NULL, NULL, NULL, NULL);
if (print_variable)
{
printf("Variable = %u\n", variable);
}
}
}
Note: Error checking was omitted from the above code for simplicity.
A printf() function could have been called inside the SIGALRM handler, but calling non-reentrant functions in a signal handler is generally discouraged.
A timeout of one second can also be passed to select(), but if it were interrupted by any signal, additional logic is necessary to ensure that the remainder of the one second timeout is honored. Fortunately on Linux, select() modifies the timeout value to reflect the amount of time not slept. This allows interruption cases to be detected followed by subsequent call(s) select() to complete the timeout.
Example 2: using select()
#include <errno.h>
#include <stdio.h>
#include <sys/select.h>
volatile unsigned int variable = 0;
int main()
{
struct timeval tv;
int val;
for (;;)
{
tv.tv_sec = 1;
tv.tv_usec = 0;
do
{
val = select(0, NULL, NULL, NULL, &tv);
} while (val != 0 && errno == EINTR);
printf("Variable = %u\n", ++variable);
}
}
If you want only second precision. Use time(0) which returns current time if time.h is included.
update:
Adding simple example which prints 10 in every second during 20 seconds:
#include <time.h>
#include <stdio.h>
int main()
{
int a = 10;
int num = 20;
int c = time(0);
while(n--)
{
printf("%d\n", a);
while(!(time(0) - c));
c = time(0);
}
return 0;
}
use time(0) see this example
/* timer.c */
#include <stdio.h>
#include <time.h>
void delay_sec( int seconds ){
clock_t endwait;
endwait = clock () + seconds * CLOCKS_PER_SEC;
while (clock() < endwait) {}
}
int main (void){
time_t rawtime, ini_time, now;
struct tm *ptm;
time ( &ini_time );
for(;;){
time ( &rawtime );
//ptm = gmtime ( &rawtime );
//printf ("%2d:%02d:%02d\n", ptm_2->tm_hour, ptm_2->tm_min, ptm_2->tm_sec);
now = rawtime - ini_time;
ptm = gmtime ( &now );
printf ("%2d:%02d:%02d\n", ptm->tm_hour, ptm->tm_min, ptm->tm_sec);
delay_sec(1);
}
return 0;
}
I believe you know 1000 Milliseconds equals to 1 Second.
#include <stdio.h>
#include <time.h>
#define mydelay 1000
void delay(int mseconds)
{
clock_t wait = mseconds + clock();
while (wait > clock());
}
int main()
{
int i=100;
while(1)
{
printf("%d\n",i);
delay(mydelay);
}
return 0;
}
A simple example which prints the value of the variable a for every 1 sec:
#include<stdio.h>
void main(void)
{
int a = 10;
while(a--)
{
printf("Value of a = %d\n", a);
sleep(1);
}
}
Output:
Value of a = 9
...
value of a = 0