How do I measure a time interval in C? - c

I would like to measure time in C, and I am having a tough time figuring it out, all I want is something like this:
start a timer
run a method
stop the timer
report the time taken (at least to micro accuracy)
Any help would be appreciated.
(I am compiling in windows using mingw)

High resolution timers that provide a resolution of 1 microsecond are system-specific, so you will have to use different methods to achieve this on different OS platforms. You may be interested in checking out the following article, which implements a cross-platform C++ timer class based on the functions described below:
[Song Ho Ahn - High Resolution Timer][1]
Windows
The Windows API provides extremely high resolution timer functions: QueryPerformanceCounter(), which returns the current elapsed ticks, and QueryPerformanceFrequency(), which returns the number of ticks per second.
Example:
#include <stdio.h>
#include <windows.h> // for Windows APIs
int main(void)
{
LARGE_INTEGER frequency; // ticks per second
LARGE_INTEGER t1, t2; // ticks
double elapsedTime;
// get ticks per second
QueryPerformanceFrequency(&frequency);
// start timer
QueryPerformanceCounter(&t1);
// do something
// ...
// stop timer
QueryPerformanceCounter(&t2);
// compute and print the elapsed time in millisec
elapsedTime = (t2.QuadPart - t1.QuadPart) * 1000.0 / frequency.QuadPart;
printf("%f ms.\n", elapsedTime);
}
Linux, Unix, and Mac
For Unix or Linux based system, you can use gettimeofday(). This function is declared in "sys/time.h".
Example:
#include <stdio.h>
#include <sys/time.h> // for gettimeofday()
int main(void)
{
struct timeval t1, t2;
double elapsedTime;
// start timer
gettimeofday(&t1, NULL);
// do something
// ...
// stop timer
gettimeofday(&t2, NULL);
// compute and print the elapsed time in millisec
elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; // sec to ms
elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; // us to ms
printf("%f ms.\n", elapsedTime);
}

On Linux you can use clock_gettime():
clock_gettime(CLOCK_REALTIME, &start); // get initial time-stamp
// ... do stuff ... //
clock_gettime(CLOCK_REALTIME, &end); // get final time-stamp
double t_ns = (double)(end.tv_sec - start.tv_sec) * 1.0e9 +
(double)(end.tv_nsec - start.tv_nsec);
// subtract time-stamps and
// multiply to get elapsed
// time in ns

Here's a header file I wrote to do some simple performance profiling (using manual timers):
#ifndef __ZENTIMER_H__
#define __ZENTIMER_H__
#ifdef ENABLE_ZENTIMER
#include <stdio.h>
#ifdef WIN32
#include <windows.h>
#else
#include <sys/time.h>
#endif
#ifdef HAVE_STDINT_H
#include <stdint.h>
#elif HAVE_INTTYPES_H
#include <inttypes.h>
#else
typedef unsigned char uint8_t;
typedef unsigned long int uint32_t;
typedef unsigned long long uint64_t;
#endif
#ifdef __cplusplus
extern "C" {
#pragma }
#endif /* __cplusplus */
#define ZTIME_USEC_PER_SEC 1000000
/* ztime_t represents usec */
typedef uint64_t ztime_t;
#ifdef WIN32
static uint64_t ztimer_freq = 0;
#endif
static void
ztime (ztime_t *ztimep)
{
#ifdef WIN32
QueryPerformanceCounter ((LARGE_INTEGER *) ztimep);
#else
struct timeval tv;
gettimeofday (&tv, NULL);
*ztimep = ((uint64_t) tv.tv_sec * ZTIME_USEC_PER_SEC) + tv.tv_usec;
#endif
}
enum {
ZTIMER_INACTIVE = 0,
ZTIMER_ACTIVE = (1 << 0),
ZTIMER_PAUSED = (1 << 1),
};
typedef struct {
ztime_t start;
ztime_t stop;
int state;
} ztimer_t;
#define ZTIMER_INITIALIZER { 0, 0, 0 }
/* default timer */
static ztimer_t __ztimer = ZTIMER_INITIALIZER;
static void
ZenTimerStart (ztimer_t *ztimer)
{
ztimer = ztimer ? ztimer : &__ztimer;
ztimer->state = ZTIMER_ACTIVE;
ztime (&ztimer->start);
}
static void
ZenTimerStop (ztimer_t *ztimer)
{
ztimer = ztimer ? ztimer : &__ztimer;
ztime (&ztimer->stop);
ztimer->state = ZTIMER_INACTIVE;
}
static void
ZenTimerPause (ztimer_t *ztimer)
{
ztimer = ztimer ? ztimer : &__ztimer;
ztime (&ztimer->stop);
ztimer->state |= ZTIMER_PAUSED;
}
static void
ZenTimerResume (ztimer_t *ztimer)
{
ztime_t now, delta;
ztimer = ztimer ? ztimer : &__ztimer;
/* unpause */
ztimer->state &= ~ZTIMER_PAUSED;
ztime (&now);
/* calculate time since paused */
delta = now - ztimer->stop;
/* adjust start time to account for time elapsed since paused */
ztimer->start += delta;
}
static double
ZenTimerElapsed (ztimer_t *ztimer, uint64_t *usec)
{
#ifdef WIN32
static uint64_t freq = 0;
ztime_t delta, stop;
if (freq == 0)
QueryPerformanceFrequency ((LARGE_INTEGER *) &freq);
#else
#define freq ZTIME_USEC_PER_SEC
ztime_t delta, stop;
#endif
ztimer = ztimer ? ztimer : &__ztimer;
if (ztimer->state != ZTIMER_ACTIVE)
stop = ztimer->stop;
else
ztime (&stop);
delta = stop - ztimer->start;
if (usec != NULL)
*usec = (uint64_t) (delta * ((double) ZTIME_USEC_PER_SEC / (double) freq));
return (double) delta / (double) freq;
}
static void
ZenTimerReport (ztimer_t *ztimer, const char *oper)
{
fprintf (stderr, "ZenTimer: %s took %.6f seconds\n", oper, ZenTimerElapsed (ztimer, NULL));
}
#ifdef __cplusplus
}
#endif /* __cplusplus */
#else /* ! ENABLE_ZENTIMER */
#define ZenTimerStart(ztimerp)
#define ZenTimerStop(ztimerp)
#define ZenTimerPause(ztimerp)
#define ZenTimerResume(ztimerp)
#define ZenTimerElapsed(ztimerp, usec)
#define ZenTimerReport(ztimerp, oper)
#endif /* ENABLE_ZENTIMER */
#endif /* __ZENTIMER_H__ */
The ztime() function is the main logic you need — it gets the current time and stores it in a 64bit uint measured in microseconds. You can then later do simple math to find out the elapsed time.
The ZenTimer*() functions are just helper functions to take a pointer to a simple timer struct, ztimer_t, which records the start time and the end time. The ZenTimerPause()/ZenTimerResume() functions allow you to, well, pause and resume the timer in case you want to print out some debugging information that you don't want timed, for example.
You can find a copy of the original header file at http://www.gnome.org/~fejj/code/zentimer.h in the off chance that I messed up the html escaping of <'s or something. It's licensed under MIT/X11 so feel free to copy it into any project you do.

The following is a group of versatile C functions for timer management based on the gettimeofday() system call. All the timer properties are contained in a single ticktimer struct - the interval you want, the total running time since the timer initialization, a pointer to the desired callback you want to call, the number of times the callback was called. A callback function would look like this:
void your_timer_cb (struct ticktimer *t) {
/* do your stuff here */
}
To initialize and start a timer, call ticktimer_init(your_timer, interval, TICKTIMER_RUN, your_timer_cb, 0).
In the main loop of your program call ticktimer_tick(your_timer) and it will decide whether the appropriate amount of time has passed to invoke the callback.
To stop a timer, just call ticktimer_ctl(your_timer, TICKTIMER_STOP).
ticktimer.h:
#ifndef __TICKTIMER_H
#define __TICKTIMER_H
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/time.h>
#include <sys/types.h>
#define TICKTIMER_STOP 0x00
#define TICKTIMER_UNCOMPENSATE 0x00
#define TICKTIMER_RUN 0x01
#define TICKTIMER_COMPENSATE 0x02
struct ticktimer {
u_int64_t tm_tick_interval;
u_int64_t tm_last_ticked;
u_int64_t tm_total;
unsigned ticks_total;
void (*tick)(struct ticktimer *);
unsigned char flags;
int id;
};
void ticktimer_init (struct ticktimer *, u_int64_t, unsigned char, void (*)(struct ticktimer *), int);
unsigned ticktimer_tick (struct ticktimer *);
void ticktimer_ctl (struct ticktimer *, unsigned char);
struct ticktimer *ticktimer_alloc (void);
void ticktimer_free (struct ticktimer *);
void ticktimer_tick_all (void);
#endif
ticktimer.c:
#include "ticktimer.h"
#define TIMER_COUNT 100
static struct ticktimer timers[TIMER_COUNT];
static struct timeval tm;
/*!
#brief
Initializes/sets the ticktimer struct.
#param timer
Pointer to ticktimer struct.
#param interval
Ticking interval in microseconds.
#param flags
Flag bitmask. Use TICKTIMER_RUN | TICKTIMER_COMPENSATE
to start a compensating timer; TICKTIMER_RUN to start
a normal uncompensating timer.
#param tick
Ticking callback function.
#param id
Timer ID. Useful if you want to distinguish different
timers within the same callback function.
*/
void ticktimer_init (struct ticktimer *timer, u_int64_t interval, unsigned char flags, void (*tick)(struct ticktimer *), int id) {
gettimeofday(&tm, NULL);
timer->tm_tick_interval = interval;
timer->tm_last_ticked = tm.tv_sec * 1000000 + tm.tv_usec;
timer->tm_total = 0;
timer->ticks_total = 0;
timer->tick = tick;
timer->flags = flags;
timer->id = id;
}
/*!
#brief
Checks the status of a ticktimer and performs a tick(s) if
necessary.
#param timer
Pointer to ticktimer struct.
#return
The number of times the timer was ticked.
*/
unsigned ticktimer_tick (struct ticktimer *timer) {
register typeof(timer->tm_tick_interval) now;
register typeof(timer->ticks_total) nticks, i;
if (timer->flags & TICKTIMER_RUN) {
gettimeofday(&tm, NULL);
now = tm.tv_sec * 1000000 + tm.tv_usec;
if (now >= timer->tm_last_ticked + timer->tm_tick_interval) {
timer->tm_total += now - timer->tm_last_ticked;
if (timer->flags & TICKTIMER_COMPENSATE) {
nticks = (now - timer->tm_last_ticked) / timer->tm_tick_interval;
timer->tm_last_ticked = now - ((now - timer->tm_last_ticked) % timer->tm_tick_interval);
for (i = 0; i < nticks; i++) {
timer->tick(timer);
timer->ticks_total++;
if (timer->tick == NULL) {
break;
}
}
return nticks;
} else {
timer->tm_last_ticked = now;
timer->tick(timer);
timer->ticks_total++;
return 1;
}
}
}
return 0;
}
/*!
#brief
Controls the behaviour of a ticktimer.
#param timer
Pointer to ticktimer struct.
#param flags
Flag bitmask.
*/
inline void ticktimer_ctl (struct ticktimer *timer, unsigned char flags) {
timer->flags = flags;
}
/*!
#brief
Allocates a ticktimer struct from an internal
statically allocated list.
#return
Pointer to the newly allocated ticktimer struct
or NULL when no more space is available.
*/
struct ticktimer *ticktimer_alloc (void) {
register int i;
for (i = 0; i < TIMER_COUNT; i++) {
if (timers[i].tick == NULL) {
return timers + i;
}
}
return NULL;
}
/*!
#brief
Marks a previously allocated ticktimer struct as free.
#param timer
Pointer to ticktimer struct, usually returned by
ticktimer_alloc().
*/
inline void ticktimer_free (struct ticktimer *timer) {
timer->tick = NULL;
}
/*!
#brief
Checks the status of all allocated timers from the
internal list and performs ticks where necessary.
#note
Should be called in the main loop.
*/
inline void ticktimer_tick_all (void) {
register int i;
for (i = 0; i < TIMER_COUNT; i++) {
if (timers[i].tick != NULL) {
ticktimer_tick(timers + i);
}
}
}

Using the time.h library, try something like this:
long start_time, end_time, elapsed;
start_time = clock();
// Do something
end_time = clock();
elapsed = (end_time - start_time) / CLOCKS_PER_SEC * 1000;

If your Linux system supports it, clock_gettime(CLOCK_MONOTONIC) should be a high resolution timer that is unaffected by system date changes (e.g. NTP daemons).

Great answers for GNU environments above and below...
But... what if you're not running on an OS? (or a PC for that matter, or you need to time your timer interrupts themselves?) Here's a solution that uses the x86 CPU timestamp counter directly... Not because this is good practice, or should be done, ever, when running under an OS...
Caveat: Only works on x86, with frequency scaling disabled.
Under Linux, only works on non-tickless kernels
rdtsc.c:
#include <sys/time.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
typedef unsigned long long int64;
static __inline__ int64 getticks(void)
{
unsigned a, d;
asm volatile("rdtsc" : "=a" (a), "=d" (d));
return (((int64)a) | (((int64)d) << 32));
}
int main(){
int64 tick,tick1;
unsigned time=0,mt;
// mt is the divisor to give microseconds
FILE *pf;
int i,r,l,n=0;
char s[100];
// time how long it takes to get the divisors, as a test
tick = getticks();
// get the divisors - todo: for max performance this can
// output a new binary or library with these values hardcoded
// for the relevant CPU - if you use the equivalent assembler for
// that CPU
pf = fopen("/proc/cpuinfo","r");
do {
r=fscanf(pf,"%s",&s[0]);
if (r<0) {
n=5; break;
} else if (n==0) {
if (strcmp("MHz",s)==0) n=1;
} else if (n==1) {
if (strcmp(":",s)==0) n=2;
} else if (n==2) {
n=3;
};
} while (n<3);
fclose(pf);
s[9]=(char)0;
strcpy(&s[4],&s[5]);
mt=atoi(s);
printf("#define mt %u // (%s Hz) hardcode this for your a CPU-specific binary ;-)\n",mt,s);
tick1 = getticks();
time = (unsigned)((tick1-tick)/mt);
printf("%u ms\n",time);
// time the duration of sleep(1) - plus overheads ;-)
tick = getticks();
sleep(1);
tick1 = getticks();
time = (unsigned)((tick1-tick)/mt);
printf("%u ms\n",time);
return 0;
}
compile and run with
$ gcc rdtsc.c -o rdtsc && ./rdtsc
It reads the divisor for your CPU from /proc/cpuinfo and shows how long it took to read that in microseconds, as well as how long it takes to execute sleep(1) in microseconds... Assuming the Mhz rating in /proc/cpuinfo always contains 3 decimal places :-o

Related

using gettimeofday() equivalents on windows

I'm trying to use 2 different equivalents for UNIX's gettimeofday() function on Windows, using Visual Studio 2013.
I took the first one from here. As the second one, I'm using the _ftime64_s function, as explained here.
They work, but not as I expected. I want to get different values when printing the seconds, or at least the milliseconds, but I get the same value for the printings with gettimeofday() (mytime1 & mytime2) and with _ftime64_s (mytime3 & mytime4).
However, it worth mentioning that the value of the milliseconds is indeed different between these two functions (that is, the milliseconds value of mytime1/mytime2 is different from mytime3/mytime4).
Here's my code:
#include <stdio.h>
#include <Windows.h>
#include <stdint.h>
#include <sys/timeb.h>
#include <time.h>
#define WIN32_LEAN_AND_MEAN
int gettimeofday(struct timeval * tp, struct timezone * tzp)
{
// Note: some broken versions only have 8 trailing zero's, the correct epoch has 9 trailing zero's
static const uint64_t EPOCH = ((uint64_t)116444736000000000ULL);
SYSTEMTIME system_time;
FILETIME file_time;
uint64_t time;
GetSystemTime(&system_time);
SystemTimeToFileTime(&system_time, &file_time);
time = ((uint64_t)file_time.dwLowDateTime);
time += ((uint64_t)file_time.dwHighDateTime) << 32;
tp->tv_sec = (long)((time - EPOCH) / 10000000L);
tp->tv_usec = (long)(system_time.wMilliseconds * 1000);
return 0;
}
int main()
{
/* working with struct timeval and gettimeofday equivalent */
struct timeval mytime1;
struct timeval mytime2;
gettimeofday(&(mytime1), NULL);
gettimeofday(&(mytime2), NULL);
printf("Seconds: %d\n", (int)(mytime1.tv_sec));
printf("Milliseconds: %d\n", (int)(mytime1.tv_usec));
printf("Seconds: %d\n", (int)(mytime2.tv_sec));
printf("Milliseconds: %d\n", (int)(mytime2.tv_usec));
/* working with _ftime64_s */
struct _timeb mytime3;
struct _timeb mytime4;
_ftime64_s(&mytime3);
_ftime64_s(&mytime4);
printf("Seconds: %d\n", mytime3.time);
printf("Milliseconds: %d\n", mytime3.millitm);
printf("Seconds: %d\n", mytime4.time);
printf("Milliseconds: %d\n", mytime4.millitm);
return (0);
}
I tried other format specifiers (%f, %lu) and castings ((float), (double), (long), (size_t)), but it didn't matter. Suggestions will be welcomed.
QueryPerformanceCounter is used for accurate timing on windows. Usage can be as follows:
uint64_t microseconds()
{
LARGE_INTEGER fq, t;
QueryPerformanceFrequency(&fq);
QueryPerformanceCounter(&t);
return (1000000 * t.QuadPart) / fq.QuadPart;
}
This does not work with any EPOCH as far as I know. For that you need GetSystemTimePreciseAsFileTime which is only available on Windows 8 and higher.
uint64_t MyGetSystemTimePreciseAsFileTime()
{
HMODULE lib = LoadLibraryW(L"kernel32.dll");
if (!lib) return 0;
FARPROC fp = GetProcAddress(lib, "GetSystemTimePreciseAsFileTime");
ULARGE_INTEGER largeInt;
largeInt.QuadPart = 0;
if (fp)
{
T_GetSystemTimePreciseAsFileTime* pfn = (T_GetSystemTimePreciseAsFileTime*)fp;
FILETIME fileTime = { 0 };
pfn(&fileTime);
largeInt.HighPart = fileTime.dwHighDateTime;
largeInt.LowPart = fileTime.dwLowDateTime;
}
FreeLibrary(lib);
return largeInt.QuadPart;
}
int main()
{
uint64_t t1 = microseconds();
uint64_t t2 = microseconds();
printf("t1: %llu\n", t1);
printf("t2: %llu\n", t2);
return (0);
}

clock_gettime API is giving negative values

I want current system time in micro seconds, so i have written a program using clock_gettime But it is returning negative values some times. Can someone help me on this.
int main(void) {
struct timespec tms;
/* The C11 way */
/* if (! timespec_get(&tms, TIME_UTC)) { */
/* POSIX.1-2008 way */
if (clock_gettime(CLOCK_REALTIME,&tms)) {
return -1;
}
/* seconds, multiplied with 1 million */
long long micros = tms.tv_sec * 1000000;
/* Add full microseconds */
micros += tms.tv_nsec/1000;
printf("Microseconds: %lld\n",micros);
return 0;
}
Hope the below code helps you:
#include<stdio.h>
#include<math.h>
#include<time.h>
void get_time_in_ms()
{
long ms;
time_t time;
struct timespec spec;
char tm[14];
clock_gettime(CLOCK_REALTIME, &spec);
time = spec.tv_sec;
ms = round(spec.tv_nsec / 1000000 ); // Convert nanoseconds to milliseconds
printf("Current time: %lu.%03ld seconds since the Epoch\n", time, ms);
sprintf(tm,"%lu%03ld",time, ms);
printf("Time : %s\n", tm);
}
void main() {
get_time_in_ms();
}

How to make a microsecond-precise timer on the STM32L-Discovery ARM board?

I'm trying to implement the Dallas OneWire protocol, but I'm having trouble generating a microsecond delay on the STM32l-Discovery.
How do I implement a timer accurate enough to delay the program for x microseconds?
For start I must tell you that there is no way to accomplish a precise usec delay using software. Even if you use an interrupt based system you will have latencies. Off course you can achieve a better accuracy with a larger CPU frequencies.
In order to connect with a 1-Wire device you can use:
A external interface like DS2482-100
A software 1-wire implementation using pin polling.
For the second solution you have to call a software based delay. You can make a flag polling delay or an interrupt based flag polling delay. In both cases you will be sure that a certain amount of time has passed but you can not be sure how match more time has passed. This is because of the CPU latency, the CPU clock etc...
For example consider the following implementation. We program a HW TIMER to continuously count up and we check TIMER's value. We name "jiffy" the time between each TIMER's ticks and jiffies the TIMERS max value:
Low level driver part (ex: driver.h)
// ...
#define JF_TIM_VALUE (TIM7->CNT)
int JF_setfreq (uint32_t jf_freq, uint32_t jiffies);
// ...
Low level driver part (ex: driver.c)
// ...
#include <stm32l1xx.h>
#include <misc.h>
#include <stm32l1xx_rcc.h>
#include <stm32l1xx_tim.h>
/*
* Time base configuration using the TIM7
* \param jf_freq The TIMER's frequency
* \param jiffies The TIMER's max count value
*/
int JF_setfreq (uint32_t jf_freq, uint32_t jiffies) {
uint32_t psc=0;
RCC_APB1PeriphClockCmd(RCC_APB1Periph_TIM7, ENABLE);
SystemCoreClockUpdate ();
if (jf_freq)
psc = (SystemCoreClock / jf_freq) - 1;
if (psc < 0xFFFF) TIM7->PSC = psc;
else return 1;
if (jiffies < 0xFFFF) TIM7->ARR = jiffies;
else return 1;
TIM7->CR1 |= TIM_CR1_CEN;
return 0;
}
// ...
Middleware jiffy system with some delay implementations
jiffy.h:
#include "string.h"
typedef int32_t jiffy_t; // Jiffy type 4 byte integer
typedef int (*jf_setfreq_pt) (uint32_t, uint32_t); //Pointer to setfreq function
typedef volatile struct {
jf_setfreq_pt setfreq; // Pointer to driver's timer set freq function
jiffy_t *value; // Pointer to timers current value
uint32_t freq; // timer's frequency
uint32_t jiffies; // jiffies max value (timer's max value)
jiffy_t jpus; // Variable for the delay function
}jf_t;
/*
* ============= PUBLIC jiffy API =============
*/
/*
* Link functions
*/
void jf_link_setfreq (jf_setfreq_pt pfun);
void jf_link_value (jiffy_t* v);
/*
* User Functions
*/
void jf_deinit (void);
int jf_init (uint32_t jf_freq, uint32_t jiffies);
jiffy_t jf_per_usec (void);
void jf_delay_us (int32_t usec);
int jf_check_usec (int32_t usec);
jiffy.c:
#include "jiffy.h"
static jf_t _jf;
#define JF_MAX_TIM_VALUE (0xFFFF) // 16bit counters
//Connect the Driver's Set frequency function
void jf_link_setfreq (jf_setfreq_pt pfun) {
_jf.setfreq = pfun;
}
// Connect the timer's value to jiffy struct
void jf_link_value (jiffy_t* v) {
_jf.value = v;
}
// De-Initialize the jf data and un-connect the functions
// from the driver
void jf_deinit (void) {
memset ((void*)&_jf, 0, sizeof (jf_t));
}
// Initialise the jf to a desired jiffy frequency f
int jf_init (uint32_t jf_freq, uint32_t jiffies) {
if (_jf.setfreq) {
if ( _jf.setfreq (jf_freq, jiffies) )
return 1;
_jf.jiffies = jiffies;
_jf.freq = jf_freq;
_jf.jpus = jf_per_usec ();
return 0;
}
return 1;
}
// Return the systems best approximation for jiffies per usec
jiffy_t jf_per_usec (void) {
jiffy_t jf = _jf.freq / 1000000;
if (jf <= _jf.jiffies)
return jf;
else
// We can not count beyond timer's reload
return 0;
}
/*!
* \brief
* A code based delay implementation, using jiffies for timing.
* This is NOT accurate but it ensures that the time passed is always
* more than the requested value.
* The delay values are multiplications of 1 usec.
* \param
* usec Time in usec for delay
*/
void jf_delay_us (int32_t usec) {
jiffy_t m, m2, m1 = *_jf.value;
usec *= _jf.jpus;
if (*_jf.value - m1 > usec) // Very small delays will return here.
return;
// Delay loop: Eat the time difference from usec value.
while (usec>0) {
m2 = *_jf.value;
m = m2 - m1;
usec -= (m>0) ? m : _jf.jiffies + m;
m1 = m2;
}
}
/*!
* \brief
* A code based polling version delay implementation, using jiffies for timing.
* This is NOT accurate but it ensures that the time passed is always
* more than the requested value.
* The delay values are multiplications of 1 usec.
* \param
* usec Time in usec for delay
*/
int jf_check_usec (int32_t usec) {
static jiffy_t m1=-1, cnt;
jiffy_t m, m2;
if (m1 == -1) {
m1 = *_jf.value;
cnt = _jf.jpus * usec;
}
if (cnt>0) {
m2 = *_jf.value;
m = m2-m1;
cnt-= (m>0) ? m : _jf.jiffies + m;
m1 = m2;
return 1; // wait
}
else {
m1 = -1;
return 0; // do not wait any more
}
}
Hmmm you made it till here. Nice
So now you can use it in your application like this:
main.c:
#include "driver.h"
#include "jiffy.h"
void do_some_job1 (void) {
// job 1
}
void do_some_job2 (void) {
// job 2
}
int main (void) {
jf_link_setfreq ((jf_setfreq_pt)JF_setfreq); // link with driver
jf_link_value ((jiffy_t*)&JF_TIM_VALUE);
jf_init (1000000, 1000); // 1MHz timer, 1000 counts, 1 usec per count
// use delay version
do_some_job1 ();
jf_delay_us (300); // wait for at least 300 usec
do_some_job1 ();
// use polling version
do_some_job1 ();
while (jf_check_usec (300)) {
do_some_job2 (); // keep calling for at least 300 usec
}
}

Visual Studio missing header file "sys/time.h" [duplicate]

I would like to measure time in C, and I am having a tough time figuring it out, all I want is something like this:
start a timer
run a method
stop the timer
report the time taken (at least to micro accuracy)
Any help would be appreciated.
(I am compiling in windows using mingw)
High resolution timers that provide a resolution of 1 microsecond are system-specific, so you will have to use different methods to achieve this on different OS platforms. You may be interested in checking out the following article, which implements a cross-platform C++ timer class based on the functions described below:
[Song Ho Ahn - High Resolution Timer][1]
Windows
The Windows API provides extremely high resolution timer functions: QueryPerformanceCounter(), which returns the current elapsed ticks, and QueryPerformanceFrequency(), which returns the number of ticks per second.
Example:
#include <stdio.h>
#include <windows.h> // for Windows APIs
int main(void)
{
LARGE_INTEGER frequency; // ticks per second
LARGE_INTEGER t1, t2; // ticks
double elapsedTime;
// get ticks per second
QueryPerformanceFrequency(&frequency);
// start timer
QueryPerformanceCounter(&t1);
// do something
// ...
// stop timer
QueryPerformanceCounter(&t2);
// compute and print the elapsed time in millisec
elapsedTime = (t2.QuadPart - t1.QuadPart) * 1000.0 / frequency.QuadPart;
printf("%f ms.\n", elapsedTime);
}
Linux, Unix, and Mac
For Unix or Linux based system, you can use gettimeofday(). This function is declared in "sys/time.h".
Example:
#include <stdio.h>
#include <sys/time.h> // for gettimeofday()
int main(void)
{
struct timeval t1, t2;
double elapsedTime;
// start timer
gettimeofday(&t1, NULL);
// do something
// ...
// stop timer
gettimeofday(&t2, NULL);
// compute and print the elapsed time in millisec
elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; // sec to ms
elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; // us to ms
printf("%f ms.\n", elapsedTime);
}
On Linux you can use clock_gettime():
clock_gettime(CLOCK_REALTIME, &start); // get initial time-stamp
// ... do stuff ... //
clock_gettime(CLOCK_REALTIME, &end); // get final time-stamp
double t_ns = (double)(end.tv_sec - start.tv_sec) * 1.0e9 +
(double)(end.tv_nsec - start.tv_nsec);
// subtract time-stamps and
// multiply to get elapsed
// time in ns
Here's a header file I wrote to do some simple performance profiling (using manual timers):
#ifndef __ZENTIMER_H__
#define __ZENTIMER_H__
#ifdef ENABLE_ZENTIMER
#include <stdio.h>
#ifdef WIN32
#include <windows.h>
#else
#include <sys/time.h>
#endif
#ifdef HAVE_STDINT_H
#include <stdint.h>
#elif HAVE_INTTYPES_H
#include <inttypes.h>
#else
typedef unsigned char uint8_t;
typedef unsigned long int uint32_t;
typedef unsigned long long uint64_t;
#endif
#ifdef __cplusplus
extern "C" {
#pragma }
#endif /* __cplusplus */
#define ZTIME_USEC_PER_SEC 1000000
/* ztime_t represents usec */
typedef uint64_t ztime_t;
#ifdef WIN32
static uint64_t ztimer_freq = 0;
#endif
static void
ztime (ztime_t *ztimep)
{
#ifdef WIN32
QueryPerformanceCounter ((LARGE_INTEGER *) ztimep);
#else
struct timeval tv;
gettimeofday (&tv, NULL);
*ztimep = ((uint64_t) tv.tv_sec * ZTIME_USEC_PER_SEC) + tv.tv_usec;
#endif
}
enum {
ZTIMER_INACTIVE = 0,
ZTIMER_ACTIVE = (1 << 0),
ZTIMER_PAUSED = (1 << 1),
};
typedef struct {
ztime_t start;
ztime_t stop;
int state;
} ztimer_t;
#define ZTIMER_INITIALIZER { 0, 0, 0 }
/* default timer */
static ztimer_t __ztimer = ZTIMER_INITIALIZER;
static void
ZenTimerStart (ztimer_t *ztimer)
{
ztimer = ztimer ? ztimer : &__ztimer;
ztimer->state = ZTIMER_ACTIVE;
ztime (&ztimer->start);
}
static void
ZenTimerStop (ztimer_t *ztimer)
{
ztimer = ztimer ? ztimer : &__ztimer;
ztime (&ztimer->stop);
ztimer->state = ZTIMER_INACTIVE;
}
static void
ZenTimerPause (ztimer_t *ztimer)
{
ztimer = ztimer ? ztimer : &__ztimer;
ztime (&ztimer->stop);
ztimer->state |= ZTIMER_PAUSED;
}
static void
ZenTimerResume (ztimer_t *ztimer)
{
ztime_t now, delta;
ztimer = ztimer ? ztimer : &__ztimer;
/* unpause */
ztimer->state &= ~ZTIMER_PAUSED;
ztime (&now);
/* calculate time since paused */
delta = now - ztimer->stop;
/* adjust start time to account for time elapsed since paused */
ztimer->start += delta;
}
static double
ZenTimerElapsed (ztimer_t *ztimer, uint64_t *usec)
{
#ifdef WIN32
static uint64_t freq = 0;
ztime_t delta, stop;
if (freq == 0)
QueryPerformanceFrequency ((LARGE_INTEGER *) &freq);
#else
#define freq ZTIME_USEC_PER_SEC
ztime_t delta, stop;
#endif
ztimer = ztimer ? ztimer : &__ztimer;
if (ztimer->state != ZTIMER_ACTIVE)
stop = ztimer->stop;
else
ztime (&stop);
delta = stop - ztimer->start;
if (usec != NULL)
*usec = (uint64_t) (delta * ((double) ZTIME_USEC_PER_SEC / (double) freq));
return (double) delta / (double) freq;
}
static void
ZenTimerReport (ztimer_t *ztimer, const char *oper)
{
fprintf (stderr, "ZenTimer: %s took %.6f seconds\n", oper, ZenTimerElapsed (ztimer, NULL));
}
#ifdef __cplusplus
}
#endif /* __cplusplus */
#else /* ! ENABLE_ZENTIMER */
#define ZenTimerStart(ztimerp)
#define ZenTimerStop(ztimerp)
#define ZenTimerPause(ztimerp)
#define ZenTimerResume(ztimerp)
#define ZenTimerElapsed(ztimerp, usec)
#define ZenTimerReport(ztimerp, oper)
#endif /* ENABLE_ZENTIMER */
#endif /* __ZENTIMER_H__ */
The ztime() function is the main logic you need — it gets the current time and stores it in a 64bit uint measured in microseconds. You can then later do simple math to find out the elapsed time.
The ZenTimer*() functions are just helper functions to take a pointer to a simple timer struct, ztimer_t, which records the start time and the end time. The ZenTimerPause()/ZenTimerResume() functions allow you to, well, pause and resume the timer in case you want to print out some debugging information that you don't want timed, for example.
You can find a copy of the original header file at http://www.gnome.org/~fejj/code/zentimer.h in the off chance that I messed up the html escaping of <'s or something. It's licensed under MIT/X11 so feel free to copy it into any project you do.
The following is a group of versatile C functions for timer management based on the gettimeofday() system call. All the timer properties are contained in a single ticktimer struct - the interval you want, the total running time since the timer initialization, a pointer to the desired callback you want to call, the number of times the callback was called. A callback function would look like this:
void your_timer_cb (struct ticktimer *t) {
/* do your stuff here */
}
To initialize and start a timer, call ticktimer_init(your_timer, interval, TICKTIMER_RUN, your_timer_cb, 0).
In the main loop of your program call ticktimer_tick(your_timer) and it will decide whether the appropriate amount of time has passed to invoke the callback.
To stop a timer, just call ticktimer_ctl(your_timer, TICKTIMER_STOP).
ticktimer.h:
#ifndef __TICKTIMER_H
#define __TICKTIMER_H
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/time.h>
#include <sys/types.h>
#define TICKTIMER_STOP 0x00
#define TICKTIMER_UNCOMPENSATE 0x00
#define TICKTIMER_RUN 0x01
#define TICKTIMER_COMPENSATE 0x02
struct ticktimer {
u_int64_t tm_tick_interval;
u_int64_t tm_last_ticked;
u_int64_t tm_total;
unsigned ticks_total;
void (*tick)(struct ticktimer *);
unsigned char flags;
int id;
};
void ticktimer_init (struct ticktimer *, u_int64_t, unsigned char, void (*)(struct ticktimer *), int);
unsigned ticktimer_tick (struct ticktimer *);
void ticktimer_ctl (struct ticktimer *, unsigned char);
struct ticktimer *ticktimer_alloc (void);
void ticktimer_free (struct ticktimer *);
void ticktimer_tick_all (void);
#endif
ticktimer.c:
#include "ticktimer.h"
#define TIMER_COUNT 100
static struct ticktimer timers[TIMER_COUNT];
static struct timeval tm;
/*!
#brief
Initializes/sets the ticktimer struct.
#param timer
Pointer to ticktimer struct.
#param interval
Ticking interval in microseconds.
#param flags
Flag bitmask. Use TICKTIMER_RUN | TICKTIMER_COMPENSATE
to start a compensating timer; TICKTIMER_RUN to start
a normal uncompensating timer.
#param tick
Ticking callback function.
#param id
Timer ID. Useful if you want to distinguish different
timers within the same callback function.
*/
void ticktimer_init (struct ticktimer *timer, u_int64_t interval, unsigned char flags, void (*tick)(struct ticktimer *), int id) {
gettimeofday(&tm, NULL);
timer->tm_tick_interval = interval;
timer->tm_last_ticked = tm.tv_sec * 1000000 + tm.tv_usec;
timer->tm_total = 0;
timer->ticks_total = 0;
timer->tick = tick;
timer->flags = flags;
timer->id = id;
}
/*!
#brief
Checks the status of a ticktimer and performs a tick(s) if
necessary.
#param timer
Pointer to ticktimer struct.
#return
The number of times the timer was ticked.
*/
unsigned ticktimer_tick (struct ticktimer *timer) {
register typeof(timer->tm_tick_interval) now;
register typeof(timer->ticks_total) nticks, i;
if (timer->flags & TICKTIMER_RUN) {
gettimeofday(&tm, NULL);
now = tm.tv_sec * 1000000 + tm.tv_usec;
if (now >= timer->tm_last_ticked + timer->tm_tick_interval) {
timer->tm_total += now - timer->tm_last_ticked;
if (timer->flags & TICKTIMER_COMPENSATE) {
nticks = (now - timer->tm_last_ticked) / timer->tm_tick_interval;
timer->tm_last_ticked = now - ((now - timer->tm_last_ticked) % timer->tm_tick_interval);
for (i = 0; i < nticks; i++) {
timer->tick(timer);
timer->ticks_total++;
if (timer->tick == NULL) {
break;
}
}
return nticks;
} else {
timer->tm_last_ticked = now;
timer->tick(timer);
timer->ticks_total++;
return 1;
}
}
}
return 0;
}
/*!
#brief
Controls the behaviour of a ticktimer.
#param timer
Pointer to ticktimer struct.
#param flags
Flag bitmask.
*/
inline void ticktimer_ctl (struct ticktimer *timer, unsigned char flags) {
timer->flags = flags;
}
/*!
#brief
Allocates a ticktimer struct from an internal
statically allocated list.
#return
Pointer to the newly allocated ticktimer struct
or NULL when no more space is available.
*/
struct ticktimer *ticktimer_alloc (void) {
register int i;
for (i = 0; i < TIMER_COUNT; i++) {
if (timers[i].tick == NULL) {
return timers + i;
}
}
return NULL;
}
/*!
#brief
Marks a previously allocated ticktimer struct as free.
#param timer
Pointer to ticktimer struct, usually returned by
ticktimer_alloc().
*/
inline void ticktimer_free (struct ticktimer *timer) {
timer->tick = NULL;
}
/*!
#brief
Checks the status of all allocated timers from the
internal list and performs ticks where necessary.
#note
Should be called in the main loop.
*/
inline void ticktimer_tick_all (void) {
register int i;
for (i = 0; i < TIMER_COUNT; i++) {
if (timers[i].tick != NULL) {
ticktimer_tick(timers + i);
}
}
}
Using the time.h library, try something like this:
long start_time, end_time, elapsed;
start_time = clock();
// Do something
end_time = clock();
elapsed = (end_time - start_time) / CLOCKS_PER_SEC * 1000;
If your Linux system supports it, clock_gettime(CLOCK_MONOTONIC) should be a high resolution timer that is unaffected by system date changes (e.g. NTP daemons).
Great answers for GNU environments above and below...
But... what if you're not running on an OS? (or a PC for that matter, or you need to time your timer interrupts themselves?) Here's a solution that uses the x86 CPU timestamp counter directly... Not because this is good practice, or should be done, ever, when running under an OS...
Caveat: Only works on x86, with frequency scaling disabled.
Under Linux, only works on non-tickless kernels
rdtsc.c:
#include <sys/time.h>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
typedef unsigned long long int64;
static __inline__ int64 getticks(void)
{
unsigned a, d;
asm volatile("rdtsc" : "=a" (a), "=d" (d));
return (((int64)a) | (((int64)d) << 32));
}
int main(){
int64 tick,tick1;
unsigned time=0,mt;
// mt is the divisor to give microseconds
FILE *pf;
int i,r,l,n=0;
char s[100];
// time how long it takes to get the divisors, as a test
tick = getticks();
// get the divisors - todo: for max performance this can
// output a new binary or library with these values hardcoded
// for the relevant CPU - if you use the equivalent assembler for
// that CPU
pf = fopen("/proc/cpuinfo","r");
do {
r=fscanf(pf,"%s",&s[0]);
if (r<0) {
n=5; break;
} else if (n==0) {
if (strcmp("MHz",s)==0) n=1;
} else if (n==1) {
if (strcmp(":",s)==0) n=2;
} else if (n==2) {
n=3;
};
} while (n<3);
fclose(pf);
s[9]=(char)0;
strcpy(&s[4],&s[5]);
mt=atoi(s);
printf("#define mt %u // (%s Hz) hardcode this for your a CPU-specific binary ;-)\n",mt,s);
tick1 = getticks();
time = (unsigned)((tick1-tick)/mt);
printf("%u ms\n",time);
// time the duration of sleep(1) - plus overheads ;-)
tick = getticks();
sleep(1);
tick1 = getticks();
time = (unsigned)((tick1-tick)/mt);
printf("%u ms\n",time);
return 0;
}
compile and run with
$ gcc rdtsc.c -o rdtsc && ./rdtsc
It reads the divisor for your CPU from /proc/cpuinfo and shows how long it took to read that in microseconds, as well as how long it takes to execute sleep(1) in microseconds... Assuming the Mhz rating in /proc/cpuinfo always contains 3 decimal places :-o

nanosleep does not work for values less than a second

I have a program (mixed C and Fortran, although that doesn't seem to be relevant) that uses nanosleep. However, if my timespec has a tv_sec value of 0, it simply doesn't sleep. The tv_nsec value can be microseconds shy of a full second, but it does not sleep. (If tv_sec is 1, it has no problem sleeping for a second.) Why would this be?
To make things more confusing, usleep with an appropriate value (i.e. 995000 usec) sleeps for just about a second as expected.
I'm seeing this problem with a RHEL 5.8 and a RHEL 6.4 box. Both are using gcc.
Here's the function that calls nanosleep:
void msleep(int *milliseconds)
{
long usec;
struct timespec sleep;
usec = (*milliseconds) % 1000;
sleep.tv_sec = (*milliseconds) / 1000;
sleep.tv_nsec = 1000*usec;
nanosleep(&sleep, NULL);
}
Obviously, I don't actually need nanosecond precision!
I've also tested a version in which I did check the return value; it was always 0 (success), and thus the rem output parameter (remaining time if interrupted) never got set.
You are missing a factor of 1000.
Try this:
#define _POSIX_C_SOURCE 199309L /* shall be >= 199309L */
#include <time.h>
void msleep(int *milliseconds)
{
int ms_remaining = (*milliseconds) % 1000;
long usec = ms_remaining * 1000;
struct timespec ts_sleep;
ts_sleep.tv_sec = (*milliseconds) / 1000;
ts_sleep.tv_nsec = 1000*usec;
nanosleep(&ts_sleep, NULL);
}
More compact:
#define _POSIX_C_SOURCE 199309L /* shall be >= 199309L */
#include <time.h>
void msleep(int * pmilliseconds)
{
struct timespec ts_sleep =
{
*pmilliseconds / 1000,
(*pmilliseconds % 1000) * 1000000L
};
nanosleep(&ts_sleep, NULL);
}
Finally a complete implementation including error handling and the case of nanosleep() being interrupted early:
#define _POSIX_C_SOURCE 199309L
#include <time.h>
#include <errno.h>
#include <stdio.h>
int ms_sleep(unsigned int ms)
{
int result = 0;
{
struct timespec ts_remaining =
{
ms / 1000,
(ms % 1000) * 1000000L
};
do
{
struct timespec ts_sleep = ts_remaining;
result = nanosleep(&ts_sleep, &ts_remaining);
}
while ((EINTR == errno) && (-1 == result));
}
if (-1 == result)
{
perror("nanosleep() failed");
}
return result;
}
Following a wrapper to fulfil the OP's requirements:
#include <errno.h>
#include <stdio.h>
int ms_sleep(unsigned int);
void msleep(int * pms)
{
int result = 0;
if ((NULL == pms) || (0 > *pms)) /* Check for valid input. */
{
errno = EINVAL;
result = -1;
}
else
{
result = ms_sleep(*pms));
}
if (-1 == result)
{
perror("ms_sleep() failed");
/* Exit and/or log error here. */
}
}
Update (referring to chux's comment below):
Assuming at least C99, this part of the above code
struct timespec ts_sleep =
{
*pmilliseconds / 1000,
(*pmilliseconds % 1000) * 1000000L
};
might better be written like this
struct timespec ts_sleep =
{
.tv_sec = *pmilliseconds / 1000,
.tv_nsec = (*pmilliseconds % 1000) * 1000000L
};
to not rely on the order of struct timespec's members.
I did it like below and it worked...
#include <stdio.h>
#include <time.h> /* Needed for struct timespec */
int nsleep(long miliseconds)
{
struct timespec req, rem;
if(miliseconds > 999)
{
req.tv_sec = (int)(miliseconds / 1000); /* Must be Non-Negative */
req.tv_nsec = (miliseconds - ((long)req.tv_sec * 1000)) * 1000000; /* Must be in range of 0 to 999999999 */
}
else
{
req.tv_sec = 0; /* Must be Non-Negative */
req.tv_nsec = miliseconds * 1000000; /* Must be in range of 0 to 999999999 */
}
return nanosleep(&req , &rem);
}
int main()
{
int ret = nsleep(2500);
printf("sleep result %d\n",ret);
return 0;
}
Here is the method
static void Sleep(long lMs){
//Calculate the nanosecond
long lRemainingMilliSecond = (lMs) % 1000;
long lNanoSecond = lRemainingMilliSecond * 1000000;
struct timespec ts_sleep,ts_remaining;
ts_sleep.tv_sec = (lMs) / 1000;
ts_sleep.tv_nsec = lNanoSecond;
nanosleep(&ts_sleep, &ts_remaining);
}
The concept is explained better in the following page
Convert milliseconds to timespec - GNU Porting

Resources