how to solve the linux timer error? - c

#include <features.h>
#include <time.h>
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <signal.h>
#include <unistd.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netdb.h>
#include <netinet/in.h>
#include <arpa/inet.h>
typedef unsigned int uint32;
#define million 1000000L
long duration2ms, duration10ms, duration100ms;
double Task2ms_Raster, Task10ms_Raster, Task100ms_Raster;
timer_t firstTimerID, secondTimerID, thirdTimerID;
void TASK1(Task2ms_Raster) {
struct timespec start, stop;
int a, b, c;
uint32 StartTime, StopTime;
a=1, b=2, c=3;
if((StartTime = clock_gettime(CLOCK_REALTIME, &start)) == -1) {
perror("clock gettime");
}
a= b+c;
b = c+a;
c= a+b;
b = c+a;
c= a+b; a= b+c;
b = c+a;
c= a+b; a= b+c;
b = c+a;
c= a+b; a= b+c;
b = c+a;
c= a+b; a= b+c;
b = c+a;
c= a+b; a= b+c;
b = c+a;
c= a+b; a= b+c;
b = c+a;
c= a+b;
// I did several times like this.
printf("ETAS\n");
printf("ETAS1\n");
if((StopTime = clock_gettime( CLOCK_REALTIME, &stop)) == -1) {
perror("clock gettime");
}
duration2ms = (stop.tv_sec - start.tv_sec) +
(double)(stop.tv_nsec - start.tv_nsec) /
(double)million;
printf("time difference is= %ld\n", duration2ms);
}
void TASK2(Task10ms_Raster) {
struct timespec start, stop;
if(clock_gettime( CLOCK_REALTIME, &start) == -1) {
perror("clock gettime");
}
printf("ETAS2\n");
printf("ETAS3\n");
if(clock_gettime(CLOCK_REALTIME, &stop) == -1) {
perror("clock gettime");
}
duration10ms = (stop.tv_sec - start.tv_sec) +
(double)( stop.tv_nsec - start.tv_nsec) /
(double)million;
printf("time difference is= %ld\n", duration10ms);
}
void TASK3(Task100ms_Raster) {
struct timespec start, stop;
if(clock_gettime( CLOCK_REALTIME, &start) == -1) {
perror("clock gettime");
}
printf("ETAS4\n");
printf("ETAS5\n");
if((clock_gettime(CLOCK_REALTIME, &stop)) == -1) {
perror("clock gettime");
}
duration100ms = (stop.tv_sec - start.tv_sec) +
(double)(stop.tv_nsec - start.tv_nsec) /
(double)million;
printf( "time difference is= %ld\n", duration100ms );
}
static void timerHandler(int sig, siginfo_t *si, void *uc) {
timer_t *tidp;
tidp = si->si_value.sival_ptr;
if (*tidp == firstTimerID)
TASK1(Task2ms_Raster);
else if(*tidp == secondTimerID)
TASK2(Task10ms_Raster);
else if(*tidp == thirdTimerID)
TASK3(Task100ms_Raster);
}
static int makeTimer(char *name,
timer_t *timerID,
int expireMS,
int intervalMS) {
struct sigevent te;
struct itimerspec its;
struct sigaction sa;
int sigNo = SIGRTMIN;
/* Set up signal handler. */
sa.sa_flags = SA_SIGINFO;
sa.sa_sigaction = timerHandler;
sigemptyset(&sa.sa_mask);
if(sigaction(sigNo, &sa, NULL) == -1) {
perror("sigaction");
}
/* Set and enable alarm */
te.sigev_notify = SIGEV_SIGNAL;
te.sigev_signo = sigNo;
te.sigev_value.sival_ptr = timerID;
timer_create(CLOCK_REALTIME, &te, timerID);
its.it_interval.tv_sec = 0;
its.it_interval.tv_nsec = intervalMS * 1000000;
its.it_value.tv_sec = 0;
its.it_value.tv_nsec = expireMS * 1000000;
timer_settime(*timerID, 0, &its, NULL);
return 1;
}
int main(void) {
makeTimer("First Timer", &firstTimerID, 2, 2); //2ms
makeTimer("Second Timer", &secondTimerID, 10, 10); //10ms
makeTimer("Third Timer", &thirdTimerID, 100, 100); //100ms
while(1) {
sleep(100);
}
}
I created a timer to call the task for every 2ms, 10ms and 100ms. The tasks are just printing the value and calculating the start time and stop time for printing the value. when i run the above program, it is not displaying time difference between the start time and stop time (i.e duration2ms, duration 10ms nd duration100ms). could someone please help me.

The problem is that you don't save your timing values in TASK*() anywhere. This means clock_gettime() gets called twice in a row, having little or no time spent in between. What you should do is something along the lines of following:
#include <features.h>
#include <time.h>
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <signal.h>
#include <unistd.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netdb.h>
#include <netinet/in.h>
#include <arpa/inet.h>
typedef unsigned int uint32;
timer_t tmr;
struct timespec prev;
static void handle_timer(int sig, siginfo_t *si, void *uc) {
timer_t *tidp;
struct timespec current;
tidp = si->si_value.sival_ptr;
clock_gettime(CLOCK_REALTIME, &current);
printf("dif between calls to handle_timer: %ld\n",
current.tv_sec - prev.tv_sec);
prev = current;
}
int main(int argc, char **argv) {
struct sigevent se;
struct itimerspec its;
struct sigaction sa;
clock_gettime(CLOCK_REALTIME, &prev);
/* Set up signal handler. */
sa.sa_flags = SA_SIGINFO;
sa.sa_sigaction = handle_timer;
sigemptyset(&sa.sa_mask);
if (sigaction(SIGRTMIN, &sa, NULL) == -1)
perror("sigaction");
/* Set and enable alarm */
se.sigev_notify = SIGEV_SIGNAL;
se.sigev_signo = SIGRTMIN;
se.sigev_value.sival_ptr = &tmr;
timer_create(CLOCK_REALTIME, &se, &tmr);
its.it_interval.tv_sec = 1;
its.it_value.tv_sec = 1;
its.it_value.tv_nsec = 0;
timer_settime(&tmr, 0, &its, NULL);
while(1)
sleep(100);
return 0;
}
The difference here is that we're actually saving the time when handle_time() was called and then calculating the difference against the saved time.

The time difference is going to show zero in all cases because you are measuring the time it takes to print two lines, which is very fast. You are not timing the time between invocations of each task.
If you want to measure the time between invocations of the task, you need to preserve the time. As an example, I'll show one task:
void TASK3(Task100ms_Raster) {
static struct timespec start, stop = { .tv_sec = -1 }; // static duration!
if (stop.tv_sec < 0) {
(void) clock_gettime( CLOCK_REALTIME, &stop); // first time run
}
start = stop; // start from previous stop time
// do whatever here
(void) clock_gettime( CLOCK_REALTIME, &stop);
duration100ms = (stop.tv_sec - start.tv_sec)
+ (double)(stop.tv_nsec - start.tv_nsec)
/ (double)million;
printf( "time difference is= %ld\n", duration100ms );
}

Related

Why my program takes so much CPU time though most of the time in sleep?

I needed some timer for my program, and I decided to write it with pthreads.
My timer needed to update some info via update callback every update_interval ticks.
I've done it like this:
timer.h:
#include <pthread.h>
enum timer_messages
{
TIMER_START,
TIMER_STOP,
TIMER_PAUSE,
TIMER_EXIT
};
typedef void (*callback)(void *);
struct timer
{
pthread_t thread_id;
struct timeval *interval;
struct timeval *update_interval;
struct timeval *start;
int ls;
int wr;
int enabled;
int exit;
callback update;
callback on_time;
};
struct timer *my_timer_create();
void timer_destroy(struct timer *t);
void timer_set_update_interval(struct timer *t, int seconds, int microseconds);
void timer_set_interval(struct timer *t, int seconds, int microseconds);
void timer_set_update_func(struct timer *t, callback update);
void timer_set_ontime_func(struct timer *t, callback on_time);
void timer_stop(struct timer *t);
void timer_start(struct timer *t);
void timer_exit(struct timer *t);
void timer_pause(struct timer *t);
timer.c:
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <sys/time.h>
#include <string.h>
#include "timer.h"
#define TIMEVAL_TO_MICROSECONDS(tv) ((long long)((tv).tv_sec * 1000000 + (tv).tv_usec))
#define GET_TIME_PASSED(start, now) ((TIMEVAL_TO_MICROSECONDS(now) - TIMEVAL_TO_MICROSECONDS(start)))
static int passed(struct timeval *start, struct timeval *interval);
static void fill_timeval(struct timeval *tv, int sec, int microsec);
static void timer_count(struct timer *t);
static void timer_message(struct timer *t);
static void *main_func(void *data);
static void timer_send_msg(struct timer *t, enum timer_messages message);
static struct timeval DEFAULT_TIMEOUT = { 0, 500000 };
static int passed(struct timeval *start, struct timeval *interval)
{
struct timeval cur, sub;
int check;
check = gettimeofday(&cur, NULL);
if(-1 == check)
{
perror("gettimeofday");
return 0;
}
if(GET_TIME_PASSED(*start, cur) < TIMEVAL_TO_MICROSECONDS(*interval))
return 0;
return 1;
}
static void fill_timeval(struct timeval *tv, int sec, int microsec)
{
tv->tv_sec = sec;
tv->tv_usec = microsec;
}
static void timer_count(struct timer *t)
{
int check;
fd_set readfds;
struct timeval timeout;
check = gettimeofday(t->start, NULL);
while(1)
{
if(!t->enabled)
return;
FD_ZERO(&readfds);
FD_SET(t->ls, &readfds);
if(t->update_interval)
memcpy(&timeout, t->update_interval, sizeof(*(t->update_interval)));
else
memcpy(&timeout, &DEFAULT_TIMEOUT, sizeof(DEFAULT_TIMEOUT));
check = select(t->ls + 1, &readfds, NULL, NULL, &timeout);
if(-1 == check)
{
perror("select");
return;
}
if(FD_ISSET(t->ls, &readfds))
timer_message(t);
else
if(t->update)
t->update(t);
if(passed(t->start, t->interval))
{
t->on_time(t);
break;
}
}
}
static void timer_message(struct timer *t)
{
int read_bytes;
char message;
read_bytes = read(t->ls, &message, sizeof(message));
if(-1 == read_bytes)
{
perror("timer_message read");
return;
}
switch(message)
{
case TIMER_START: t->enabled = 1; break;
case TIMER_STOP: t->enabled = 0; t->interval = NULL; t->start = NULL; break;
case TIMER_EXIT: t->enabled = 0; t->exit = 1; break;
case TIMER_PAUSE: break;
default: break;
}
}
static void *main_func(void *data)
{
struct timer *t = data;
fd_set readfds;
int check;
while(!t->exit)
{
if(t->enabled)
{
timer_count(t);
}
else
{
FD_ZERO(&readfds);
FD_SET(t->ls, &readfds);
check = select(t->ls + 1, &readfds, NULL, NULL, NULL);
if(-1 == check)
{
perror("select");
return NULL;
}
if(FD_ISSET(t->ls, &readfds))
timer_message(t);
}
}
return NULL;
}
static void timer_send_msg(struct timer *t, enum timer_messages message)
{
int check;
char msg;
msg = message;
check = write(t->wr, &msg, sizeof(msg));
if(-1 == check)
{
perror("timer_send_msg write");
}
}
struct timer *my_timer_create()
{
int check;
struct timer *t;
int fd[2];
t = malloc(sizeof(*t));
t->interval = malloc(sizeof(*(t->interval)));
t->update_interval = malloc(sizeof(*(t->update_interval)));
t->start = malloc(sizeof(*(t->start)));
check = pipe(fd);
if(-1 == check)
{
perror("pipe");
return NULL;
}
t->ls = fd[0];
t->wr = fd[1];
t->enabled = 0;
t->exit = 0;
t->update = NULL;
t->on_time = NULL;
check = pthread_create(&(t->thread_id), NULL, main_func, t);
if(-1 == check)
{
perror("pthread_create");
return NULL;
}
return t;
}
void timer_destroy(struct timer *t)
{
free(t->interval);
free(t->update_interval);
free(t->start);
close(t->ls);
close(t->wr);
free(t);
}
void timer_set_update_interval(struct timer *t, int seconds, int microseconds)
{
fill_timeval(t->update_interval, seconds, microseconds);
}
void timer_set_interval(struct timer *t, int seconds, int microseconds)
{
fill_timeval(t->interval, seconds, microseconds);
}
void timer_set_update_func(struct timer *t, callback update)
{
t->update = update;
}
void timer_set_ontime_func(struct timer *t, callback on_time)
{
t->on_time = on_time;
}
void timer_stop(struct timer *t)
{
timer_send_msg(t, TIMER_STOP);
}
void timer_start(struct timer *t)
{
timer_send_msg(t, TIMER_START);
}
void timer_exit(struct timer *t)
{
timer_send_msg(t, TIMER_EXIT);
}
void timer_pause(struct timer *t)
{
timer_send_msg(t, TIMER_PAUSE);
}
And then in main file invoked it like this:
#include <stdio.h>
#include <assert.h>
#include <string.h>
#include <sys/time.h>
#include "../timer.h"
#define BUF_SIZE 4096
#define TIMEVAL_TO_MICROSECONDS(tv) ((long long)((tv).tv_sec * 1000000 + (tv).tv_usec))
#define GET_TIME_PASSED(start, now) ((TIMEVAL_TO_MICROSECONDS(now) - TIMEVAL_TO_MICROSECONDS(start)))
void progress_bar(int percent, int bar_len)
{
char buf[BUF_SIZE];
int inside = bar_len - 2;
int filled = inside * percent / 100;
int not_filled = inside - filled;
assert(percent <= 100);
assert(bar_len < BUF_SIZE);
buf[0] = '[';
memset(buf + 1, '#', filled);
memset(buf + 1 + filled, '-', not_filled);
buf[bar_len - 1] = ']';
buf[bar_len] = 0;
printf("\r%s %d%%", buf, percent);
fflush(stdout);
}
void timer_ontime(void *data)
{
struct timer *t = data;
puts("");
puts("That's all folks!");
timer_exit(t);
}
void timer_update(void *data)
{
struct timer *t = data;
struct timeval now;
long long passed;
int percent;
gettimeofday(&now, NULL);
passed = GET_TIME_PASSED(*(t->start), now);
percent = passed * 100 / (t->interval->tv_sec * 1000000);
progress_bar(percent, 50);
}
int main(int argc, char **argv)
{
struct timer *t;
int seconds;
int check;
if(argc != 2)
{
fprintf(stderr, "Usage: %s <seconds>\n", argv[0]);
return 1;
}
check = sscanf(argv[1], "%d", &seconds);
if(check != 1)
{
fprintf(stderr, "Couldn't parse number of seconds\n");
return 1;
}
t = my_timer_create();
if(t == NULL)
{
fprintf(stderr, "Couldn't create timer\n");
return 1;
}
timer_set_interval(t, seconds, 0);
timer_set_ontime_func(t, timer_ontime);
timer_set_update_func(t, timer_update);
timer_start(t);
printf("Started timer(%d seconds)\n", seconds);
pthread_join(t->thread_id, NULL);
}
Then i run it with:
[udalny#bulba test]$ time ./timer_check 3
Started timer(3 seconds)
[###############################################-] 99%
That's all folks!
./timer_check 3 0.48s user 1.22s system 56% cpu 3.002 total
So as you can see it takes 56% CPU time. Why so much?
It updates only twice per second(DEFAULT_CALLBACK is 500000 microseconds). And all
other time it is sleeping.
How could I change it so it takes less?
Also I would appreciate any tips on the code.
Your program spends most of its time in timer_count, looping busily - if you add a simple printf before your select:
printf("?\n");
check = select(t->ls + 1, &readfds, NULL, NULL, &timeout);
and run ./timer_check 3 | wc -l you should get millions of lines - meaning the CPU hard-loops on this loop. This is because of the way you initialize your timeout:
if(t->update_interval)
memcpy(&timeout, t->update_interval, sizeof(*(t->update_interval)));
this actually sets your timeout to zero - because you never initialized your t->update_interval in main. This effectively turns your loop into a busy loop.
Add the following line to your main function to fix this:
timer_set_update_interval(t, seconds, 0);
after which you get your desired behavior:
Started timer(3 seconds)
[################################################] 100%
That's all folks!
0.00user 0.00system 0:03.00elapsed 0%CPU (0avgtext+0avgdata 1932maxresident)k
0inputs+0outputs (0major+77minor)pagefaults 0swaps

C, timer_settime, disarm timer and overwrite associated data?

I have to do for University a project about UDP, where i have to guarantee reliable communication; for packets, i want use timer_gettime() and timer_Settime() functions, because i can queue signals and i can associate to them a timer; in particular, struct sigevent has a field which union sigval where i can pass value to handler when signal arrived; I would like to take advantage of this passing to handler number of packets for which timer expired; I have a problem, and I've done a simple program to verify this; when I start timer, i can disarm it setting it_value of struct sigevent to 0; but data doesn't change; if I send 100 signal, header receives only data of first signal. This is my code:
#include <signal.h>
#include <time.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include <unistd.h>
int d;
void err_exit(char* str)
{
perror(str);
exit(EXIT_FAILURE);
}
void sighandler(int sig, siginfo_t *si, void *uc)
{
(void) sig;
(void) uc;
d = si->si_value.sival_int;
}
void handle_signal(struct sigaction* sa)
{
sa->sa_flags = SA_SIGINFO;
sa->sa_sigaction = sighandler;
sigemptyset(&sa->sa_mask);
if (sigaction(SIGRTMAX,sa,NULL) == -1)
err_exit("sigaction");
}
void create_timer(struct sigevent* sev,timer_t* timer_id,int i)
{
union sigval s;
s.sival_int = i;
printf("value: %d\n",i);
sev->sigev_notify = SIGEV_SIGNAL;
sev->sigev_signo = SIGRTMAX;
sev->sigev_value = s;
timer_create(CLOCK_REALTIME,sev,timer_id);
}
void set_timer(timer_t timer_id,struct itimerspec* ts)
{
if(ts == NULL)
printf("itimerspec null\n");
if (timer_settime(timer_id, 0, ts, NULL) == -1){
printf("errno code: %d\n",errno);
err_exit("timer_settime");
}
}
void initialize_timerspec(struct itimerspec* ts)
{
ts->it_value.tv_sec = 2;
ts->it_value.tv_nsec = 5;
ts->it_interval.tv_sec = 0;
ts->it_interval.tv_nsec = 0;
}
void reset_timer(timer_t timer_id, struct itimerspec* ts)
{
ts->it_value.tv_sec = 0;
ts->it_value.tv_nsec = 0;
ts->it_interval.tv_sec = 0;
ts->it_interval.tv_nsec = 0;
if (timer_settime(timer_id, 0, ts, NULL) == -1){
printf("errno code: %d\n",errno);
err_exit("timer_settime");
}
}
int main()
{
struct sigaction sa;
struct itimerspec ts[2];
struct sigevent sev[2];
timer_t timer_id[2];
handle_signal(&sa);
create_timer(sev,timer_id,0);
initialize_timerspec(ts);
set_timer(timer_id,ts);
reset_timer(timer_id,ts);
create_timer(sev + 1,timer_id + 1,1);
initialize_timerspec(ts + 1);
set_timer(timer_id,ts + 1);
printf("id1: %ju id2: %ju\n",timer_id[0],timer_id[1]);
sleep(10);
printf("d = %d\n",d);
exit(EXIT_SUCCESS);
}
I disarm first timer, and send another signal; but handler receives data associated to first signal, because it prints 0. Is there a way to send to overwrite data, sending to handler data of second signal(in this case 1)?

Poor performance when multi-processes write one msg on linux

I write a test program as follows:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <sys/msg.h>
#include <time.h>
#define PACKET_SIZE 500
#define LOOP_COUNT 30000
int g_sndsucc = 0;
int g_sndfail = 0;
const int C_IPC_KEY = 0x00231a95;
const int COUNT_SIZE = 10000;
unsigned long g_count = 0;
unsigned long g_t1 = 0;
struct timeval s1, s2, s3, s4;
int main(int argc, char* argv[])
{
int ipckey = C_IPC_KEY;
if(argc > 1)
{
ipckey = atoi(argv[1]);
printf("ipckey is %d\n", ipckey);
}
int qid = msgget(ipckey, IPC_CREAT | 0666);
if(qid <= 0)
{
printf("msgget err: %d \n", errno);
return 0;
}
char data[PACKET_SIZE];
memset(data, 'a', PACKET_SIZE-1);
data[PACKET_SIZE-1] = '\0';
*((long *)data) = 0;
int ret = 0;
struct timeval start;
gettimeofday (&start, NULL);
while(1)
{
*((long *)data) +=1;
gettimeofday (&s1, NULL);
ret = msgsnd(qid, data, PACKET_SIZE,0);
gettimeofday (&s2, NULL);
if(ret != 0)
{
g_sndfail ++;
}
else
{
g_sndsucc ++;
}
g_count++;
g_t1 += (s2.tv_sec-s1.tv_sec)*1000000 + (s2.tv_usec-s1.tv_usec);
if ( g_count >= 10000)
{
printf("STAT1: t1 : %f\n",
10000000000.0 / g_t1);
g_count = 0;
g_t1 = 0;
}
usleep(1000);
}
return 0;
}
I create 100 same processes to msgsnd , and on suse, each process's msgsnd tps only reaches 50/s.
But on AIX5 the msgsnd tps can reaches 10000/s.
Does anyone know why the performance of IPC on linux when multi-processes is so poor?
And how to increase the performance on linux??
BTW, the kenel version of suse is linux 3.0.13
I checked the source code of the msgget in linux3.8.
When the thread did not get the msg lock, it is not release cpu and sleep some time.
Instead it will call ipc_lock_by_ptr(&msq->q_perm); frequently.
So the cpu usage will be very high, and the collision rate will grow rapidly when the threads increas.

SUN RPC (ONC/RPC): Calculating round trip time (or pinging) using null procedure in C

How can I calculate or estimate the RTT (Round Trip Time) between client and server?
A tutorial or sample addressing this can also help.
Here what I do:
#include <rpc/rpc.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/times.h>
#include <fcntl.h>
#include <time.h>
int main(int argc, char *argv[]) {
enum clnt_stat status;
CLIENT *handle;
struct timeval t;
clock_t rtime;
struct tms dumm;
int count = 100000;
int i;
time_t now;
char stamp[27];
int programm;
int version;
if (argc != 4) {
printf("Usage: rpcping <host> <program> <version>\n");
exit(1);
}
/*
* Create Client Handle
*/
programm = atoi(argv[2]);
version = atoi(argv[3]);
handle = clnt_create(argv[1], programm, version, "tcp");
if (handle == NULL) {
printf("clnt failed\n");
exit(1);
}
/*
* use 30 seconds timeout
*/
t.tv_sec = 30;
t.tv_usec = 0;
while (1) {
rtime = times(&dumm);
for (i = 0; i < count; i++) {
status = clnt_call(handle, 0, (xdrproc_t) xdr_void,
NULL, (xdrproc_t) xdr_void, NULL, t);
if (status == RPC_SUCCESS) { /* NOP */ }
}
now = time(NULL);
ctime_r(&now, stamp);
stamp[strlen(stamp) - 1] = '\0';
fprintf(stdout, "[%s]: Speed: %2.4fs.\n", stamp,
count / ((double) (times(&dumm) - rtime) / (double) sysconf(_SC_CLK_TCK)));
fflush(stdout);
}
clnt_destroy(handle);
}
I have a multithread version as well
https://gist.github.com/2401404
tigran.

Using pthread_cond_timedwait to print a specified time

Hey, I've got just a small problem related to pthread_cond_timedwait. I've tried implementing it into this piece of code. I can't get the arguments right for timedwait, because I am not too sure what I'm doing. If anyone could point me in the right direction it would be much appreciated!
#include <time.h>
#include <stdio.h>
#include <pthread.h>
#include <stdlib.h>
pthread_mutex_t timeLock = PTHREAD_MUTEX_INITIALIZER;
pthread_cond_t c = PTHREAD_COND_INITIALIZER;
int timeIndex = 0;
time_t times[100];
void print_time(time_t tt)
{
char buf[80];
struct tm* st = localtime(&tt);
strftime(buf, 80, "%c", st);
printf("Call me on: ");
printf("%s\n", buf);
}
void *add_time(time_t tt){
if(timeIndex == 100)
timeIndex = 0;
struct timespec ts;
times[timeIndex] = tt;
timeIndex++;
ts.tv_sec = tt;
print_time(tt); // print element
}
void * call_time()
{
while(1)
{
const time_t c_time = time(NULL);
int i;
for(i = 0; i <= 100; i++)
{
if(c_time == times[i])
{
printf("\nWake me up!\n");
times[i] = 0;
}
}
}
}
void * newTime()
{
while(1)
{
time_t f_time;
f_time = time(NULL);
srand ( time(NULL) );
f_time += rand()%100;
add_time(f_time);
sleep(1);
}
}
int main(void)
{
pthread_t timeMet;
pthread_t time;
pthread_create(&time, NULL, newTime, NULL);
pthread_create(&timeMet, NULL, call_time, NULL);
pthread_join(time, NULL);
pthread_join(timeMet, NULL);
return 0;
}

Resources