In the following program, I have tried to measure the execution time of a job(for loop). Most of the time it works fine, however, sometimes, it returns negative values!! My first guess is that the variable may get overflowed. Can anyone please let me whether I am right or not? How can I solve the problem?
Thanks
int main(int argc, char **argv)
{
long int ST;
long int ET;
struct timespec gettime_now;
clock_gettime(CLOCK_REALTIME, &gettime_now);
ST= gettime_now.tv_nsec;
for (i=0; i < 1000; i++)
a[i]=b[i];
clock_gettime(CLOCK_REALTIME, &gettime_now);
ET= gettime_now.tv_nsec;
printf("Time diff: %ld\n", ET-ST);
}
You are neglecting tv_sec of struct timespec in both the cases and just using nano-second which is not correct as ST and EV's tv_nsec may be of different second's.
From man,
tv_sec - represents seconds since epoch
tv_nsec - current second in nano-second precision (1/1000000000 sec)
It is better to write own function to find the difference. Sample code (not tested),
timespec diff(timespec start, timespec end)
{
timespec temp;
if ((end.tv_nsec-start.tv_nsec)<0
{
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec;
}
else
{
temp.tv_sec = end.tv_sec-start.tv_sec;
temp.tv_nsec = end.tv_nsec-start.tv_nsec;
}
return temp;
}
Refer this for actual diff function and example.
Related
I'm using C and I'm trying to get Clock resolution but I get this value: 0.000000
Here is the code I'm using
#include <time.h>
#include<stdio.h>
double duration(struct timespec start, struct timespec end) {
return end.tv_sec - start.tv_sec
+ ((end.tv_nsec - start.tv_nsec ) / (double) 1000000000.0);
}
double getResolution(){
struct timespec start, end;
clock_gettime(CLOCK_MONOTONIC, &start);
do {
clock_gettime(CLOCK_MONOTONIC, &end);
} while (duration(start, end) == 0.0);
return duration(start, end);
}
int main(){
printf("%f",getResolution());
return 0;
}
You need to increase the precision in your printf("%f");. Using printf("%.12f"); would probably be enough to show some non-zero decimals.
Calculating the floating point duration in the while loop may cause the program to actually perform that calculation if the compiler isn't clever enough to figure out that you only need to see if the clock has changed at all. You could just do a memcmp to compare start and end instead.
Don't take the struct timespecs by value in your duration function. Supply pointers to the function instead. It should be cheaper.
Use the clock_getres function to get the resolution. The runtime value you get with your homebrewed solution depends on what speed the CPU is currently running at etc.
Example:
#include <stdio.h>
#include <string.h>
#include <time.h>
// taking the arguments via pointers:
double duration(const struct timespec* start, const struct timespec* end) {
return end->tv_sec - start->tv_sec +
((end->tv_nsec - start->tv_nsec) / 1000000000.0);
}
double getResolution() {
struct timespec start = {0}, end = {0};
clock_gettime(CLOCK_MONOTONIC, &start);
do {
clock_gettime(CLOCK_MONOTONIC, &end);
// using memcmp below:
} while (memcmp(&start, &end, sizeof start) == 0);
return duration(&start, &end);
}
int main() {
struct timespec base = {0}, res;
// using the proper function to get the resolution:
clock_getres(CLOCK_MONOTONIC, &res);
// comparing the results:
printf("clock_getres = %.12f\n", duration(&base, &res));
printf("getResolution = %.12f\n", getResolution());
}
Demo
I am writting a wrapper function sleep_new() for clock_nanosleep() which would make thread suspension easier for me.
// POSIX.1-2017 is what compiler is confined to.
#define _XOPEN_SOURCE 700
#include <stdint.h>
#include <time.h>
#include <stdio.h>
#include <string.h>
// POSIX headers.
// Other headers
#include "sleep_new.h"
void sleep_new(long value, const char unit[3]){
// Create a timespec structure and set it's members.
// Members are added together!!! So to set time "1.5 s" we set "t.tv_sec = 1" and "t.tv_sec = 500000000".
// Members ".tv_sec" and ".tv_nsec" represent unit and not value!
struct timespec sleep_time;
// Set flags i.e. TIMER_ABSTIME to 0 to use relative instead of absolute time.
int flags = 0;
// Choose the clock i.e. CLOCK_MONOTONIC is a "clock_id" for the clock started at system start.
int clock_id = CLOCK_MONOTONIC;
// Set timespec structure's members according to the chosen unit.
if (!strcmp(unit, "s")) {
sleep_time.tv_sec = value;
sleep_time.tv_nsec = 0;
}
else if (!strcmp(unit, "ns")){
sleep_time.tv_sec = 0;
sleep_time.tv_nsec = value;
}
else if (!strcmp(unit, "us")){
sleep_time.tv_sec = 0;
sleep_time.tv_nsec = value * 1000;
}
else if (!strcmp(unit, "ms")){
sleep_time.tv_sec = 0;
sleep_time.tv_nsec = value * 1000000;
}
else{
puts("Unit not supported - choose between: s, ms, us, ns\n");
}
// Because last argument is NULL in case of error, remaining time is not stored in "t".
clock_nanosleep(clock_id, flags, &sleep_time, NULL);
}
int main(int argc, char *argv[])
{
// Counter.
uint8_t i;
for(i = 0; i < 256; i++){
// Stdout is newline buffered. This is why we either have to include `\n` at the end or flush() it manually.
// So uncomment one example A or B.
// A
//printf("%d\n", i);
// B
printf("%d, ", i);
fflush(stdout);
// Because last argument is NULL in case of error, remaining time is not stored in "t".
sleep_new(1000, "ms");
}
return 0;
}
If I call this function with sleep_new(1, "s") or sleep_new(2, "s") it works fine, because it sets the sleep_time.tv_nsec = 0; and sleep_time.tv_sec = value;.
In any other scenarios i.e. sleep_new(1000, "ms") something is wrong and sleep is not applied. I debugged application and values are applied to the timespec members just fine but clock_nanosec() just ignores them.
I am using type long for the value because I read in the POSIX here where header time.h defines timespec structure's members tv_nsec who needs long and member tv_sec who uses time_t which is in turn defined in header sys/types.h like this:
time_t shall be an integer type.
So because long can also hold int values I expected this to work, but it doesn't. Does anyone have any suggestion?
The tv_nsec is the number of nanoseconds in a second - 1000 * 1000000 nanoseconds is too much. That's 1 second! tv_nsec should range from 0 to 999999999. The proper calculation could look like:
sleep_time.tv_sec = value / 1000;
sleep_time.tv_nsec = (value % 1000) * 1000000;
I have to track how long a task executes for. I am working on Linux, but I do not have access to the kernel itself.
My task simply busy-loops until the process has been executing for a certain amount of time. Then the process is supposed to break out of this loop.
I had a somewhat working version that used clock_gettime() from time.h. I stored the time since Epoch right before I busy looped in a "start" variable. Then in each iteration of the loop, I checked the time since Epoch again in another variable called "current".
Oh each iteration of the loop, I took the difference between "current" and "start". If that difference was greater than or equal to my requested execution time, I broke out of the loop.
The trouble is clock_gettime() does not factor in suspension of a task. So if my task suspends, the way I am doing this now will treat the time a task is suspended as if it were still executing.
Does anyone have an alternative to clock_gettime() that will allow a timer to somehow ignore the suspension time? Code of my current method below.
//DOES NOT HANDLE TASK SUSPENSION
#include <time.h>
#define BILLION 1E9
//Set execution time to 2 seconds
double executionTime = 2;
//Variable used later to compute difference in time
double elapsedTime = -1;
struct timespec start;
struct timespec current;
//Get time before we busy-loop
clock_gettime(CLOCK_REALTIME, &start);
int i;
for (i = 0; i < 10000000000; i++)
{
//Get time on each busy-loop iteration
clock_gettime(CLOCK_REALTIME, ¤t);
elapsedTime = (current.tv_sec - start.tv_sec) + ((current.tv_nsec - start.tv_nsec) / BILLION);
//If we have been executing for the specified execution time, break.
if (elapsedTime >= executionTime)
{
break;
}
}
Change CLOCK_REALTIME to CLOCK_PROCESS_CPU_TIME.
using sleep() takes several seconds to accumulate a small amount of CPU time.
#include <stdio.h>
#include <unistd.h>
#include <time.h>
#define BILLION 1E9
int main ( void) {
double executionTime = 0.0001;
double elapsedTime = -1;
double elapsedTimertc = -1;
struct timespec startrtc;
struct timespec start;
struct timespec currentrtc;
struct timespec current;
clock_gettime(CLOCK_REALTIME, &startrtc);
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &start);
for (;;)
{
sleep ( 1);
clock_gettime(CLOCK_REALTIME, ¤trtc);
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ¤t);
elapsedTime = (current.tv_sec - start.tv_sec) + ((current.tv_nsec - start.tv_nsec) / BILLION);
elapsedTimertc = (currentrtc.tv_sec - startrtc.tv_sec) + ((currentrtc.tv_nsec - startrtc.tv_nsec) / BILLION);
if (elapsedTime >= executionTime)
{
break;
}
}
printf ( "elapsed time %f\n", elapsedTime);
printf ( "elapsed time %f\n", elapsedTimertc);
}
I want current system time in micro seconds, so i have written a program using clock_gettime But it is returning negative values some times. Can someone help me on this.
int main(void) {
struct timespec tms;
/* The C11 way */
/* if (! timespec_get(&tms, TIME_UTC)) { */
/* POSIX.1-2008 way */
if (clock_gettime(CLOCK_REALTIME,&tms)) {
return -1;
}
/* seconds, multiplied with 1 million */
long long micros = tms.tv_sec * 1000000;
/* Add full microseconds */
micros += tms.tv_nsec/1000;
printf("Microseconds: %lld\n",micros);
return 0;
}
Hope the below code helps you:
#include<stdio.h>
#include<math.h>
#include<time.h>
void get_time_in_ms()
{
long ms;
time_t time;
struct timespec spec;
char tm[14];
clock_gettime(CLOCK_REALTIME, &spec);
time = spec.tv_sec;
ms = round(spec.tv_nsec / 1000000 ); // Convert nanoseconds to milliseconds
printf("Current time: %lu.%03ld seconds since the Epoch\n", time, ms);
sprintf(tm,"%lu%03ld",time, ms);
printf("Time : %s\n", tm);
}
void main() {
get_time_in_ms();
}
Is there a way to get milliseconds precision, accurate (at least within a few ms) times in C using a cross-platform approach?
on a POSIX system I can use sys/time.h, but that is not cross-platform.
the stdlib time() function only gives second level precision
I haven't found a cross-platform solution to measuring time in C, per se. However, what I do is use almost identical functions for Unix and Windows. I created this gist because I always have to re-look this up every time. In short:
Unix
#include <time.h>
long diff_micro(struct timespec *start, struct timespec *end)
{
/* us */
return ((end->tv_sec * (1000000)) + (end->tv_nsec / 1000)) -
((start->tv_sec * 1000000) + (start->tv_nsec / 1000));
}
long diff_milli(struct timespec *start, struct timespec *end)
{
/* ms */
return ((end->tv_sec * 1000) + (end->tv_nsec / 1000000)) -
((start->tv_sec * 1000) + (start->tv_nsec / 1000000));
}
int main(int argc, char **argv)
{
struct timespec start, end;
clock_gettime(CLOCK_MONOTONIC, &start);
// Activity to be timed
sleep(1000);
clock_gettime(CLOCK_MONOTONIC, &end);
printf("%ld us\n", diff_micro(&start, &end));
printf("%ld ms\n", diff_milli(&start, &end));
return 0;
}
source for Unix solution
Win32
#include <Windows.h>
long diff_micro(LARGE_INTEGER *start, LARGE_INTEGER *end)
{
LARGE_INTEGER Frequency, elapsed;
QueryPerformanceFrequency(&Frequency);
elapsed.QuadPart = end->QuadPart - start->QuadPart;
elapsed.QuadPart *= 1000000;
elapsed.QuadPart /= Frequency.QuadPart;
return elapsed.QuadPart;
}
long diff_milli(LARGE_INTEGER *start, LARGE_INTEGER *end)
{
LARGE_INTEGER Frequency, elapsed;
QueryPerformanceFrequency(&Frequency);
elapsed.QuadPart = end->QuadPart - start->QuadPart;
elapsed.QuadPart *= 1000;
elapsed.QuadPart /= Frequency.QuadPart;
return elapsed.QuadPart;
}
int main(int argc, char **argv)
{
LARGE_INTEGER StartingTime, EndingTime;
QueryPerformanceCounter(&StartingTime);
// Activity to be timed
Sleep(1000);
QueryPerformanceCounter(&EndingTime);
printf("%ld us\n", diff_micro(&StartingTime, &EndingTime));
printf("%ld ms\n", diff_milli(&StartingTime, &EndingTime));
return 0;
}
source used for Win32 solution
You can try something like this:-
#include <time.h>
clock_t uptime = clock() / (CLOCKS_PER_SEC / 1000);
See this Link
The best way is using std::chrono
#include <chrono>
...
auto begin = std::chrono::high_resolution_clock::now();
...
auto end = std::chrono::high_resolution_clock::now();
elapsedTime = std::chrono::duration_cast<std::chrono::milliseconds>(end - begin).count();