I have a program in C which has to execute a series of another programs. I need to get the execution time of each of these programs, in order to create a log of these times.
I though of using system() to run each program, but I don't know how to get the execution time. Is there any way to do this?
The programs are "quick", so I need more precision than seconds.
You have at least 4 ways to do it.
(1)
A start point:
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
int main ( void )
{
clock_t start = clock();
system("Test.exe");
printf ("%f\n seconds", ((double)clock() - start) / CLOCKS_PER_SEC);
return 0;
}
(2)
If you are in Windows and you have access to Window APIs you can use GetTickCount() too:
#include <stdio.h>
#include <stdlib.h>
#include <windows.h>
int main ( void )
{
DWORD t1 = GetTickCount();
system("Test.exe");
DWORD t2 = GetTickCount();
printf ("%i\n milisecs", t2-t1);
return 0;
}
(3)
And the best is
#include <stdio.h>
#include <stdlib.h>
#include <windows.h>
int main(void)
{
LARGE_INTEGER frequency;
LARGE_INTEGER start;
LARGE_INTEGER end;
double interval;
QueryPerformanceFrequency(&frequency);
QueryPerformanceCounter(&start);
system("calc.exe");
QueryPerformanceCounter(&end);
interval = (double) (end.QuadPart - start.QuadPart) / frequency.QuadPart;
printf("%f\n", interval);
return 0;
}
(4)
Question is tagged as C but for sake of completeness, I want add C++11 feature:
int main()
{
auto t1 = std::chrono::high_resolution_clock::now();
system("calc.exe");
auto t2 = std::chrono::high_resolution_clock::now();
auto x = std::chrono::duration_cast<std::chrono::nanoseconds>(t2-t1).count();
cout << x << endl;
}
start = clock(); // get number of ticks before loop
/*
Your Program
*/
stop = clock(); // get number of ticks after loop
duration = ( double ) (stop - start ) / CLOCKS_PER_SEC;
Related
So I was timing a function, and fell over a weird timing thing,
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
int main() {
long int time = 0, itrs = 100;
struct timespec start, stop;
for (int i = 0; i < itrs; i++) {
clock_gettime(CLOCK_REALTIME, &start);
f();
clock_gettime(CLOCK_REALTIME, &stop);
time += stop.tv_nsec - start.tv_nsec;
}
printf("%ld", time/itrs);
return 0;
}
When I run the loop a higher number of times, the function runs faster.
When i run it for instance 100 times, the function takes about 100 ns, but when i run 1000000 its about 40 ns.
Can anyone explain why this is so?
I'm trying to get the execution time of my program using the time header and can't find any resources that simply use <time.h> and not <sys/time.h>.
I tried
time_t startTime;
time_t endTime;
long double execTime;
/* Start timer */
time(&startTime);
..STUFF THAT TAKES TIME..
time(&endTime);
execTime = difftime(endTime, startTime);
printf("Computing took %Lf\n", execTime * 1000);
But this prints out 0 every single time.. I'm guessing because time is an integer and my process takes less than a second.
How can I show execution in milliseconds?
Thank you
Although clock_gettime should be the preferred way, it is Posix, not standard C. They have only clock. It has a lot of disadvantages but is good enough for a quick and dirty measurement.
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
int main()
{
int i, j, a[1000000] = { 0 };
clock_t start, stop;
srand(0xdeadbeef);
start = clock();
// play around with these values, especially with j
for (j = 0; j < 100; j++) {
for (i = 0; i < 1000000; i++) {
a[i] = rand() % 123;
a[i] += 123;
}
}
stop = clock();
printf("Time %.10f seconds\n", (double) (stop - start) / CLOCKS_PER_SEC);
exit(EXIT_SUCCESS);
}
The correct way to measure time is to use clock_gettime(CLOCK_MONOTONIC, &ts_current);.
Also, gettimeofday() should be avoided.
A complete example of using clock_gettime() to measure time difference (both seconds and nanoseconds, which you could convert to milliseconds):
#include <stdio.h>
#include <time.h>
struct timespec diff(struct timespec start, struct timespec end)
{
struct timespec temp;
if ((end.tv_nsec-start.tv_nsec)<0) {
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec;
} else {
temp.tv_sec = end.tv_sec-start.tv_sec;
temp.tv_nsec = end.tv_nsec-start.tv_nsec;
}
return temp;
}
int main()
{
struct timespec time1, time2;
int temp = 0;
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time1);
for (int i = 0; i< 242000000; i++)
temp+=temp;
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time2);
printf("Time difference: %ld [s] %ld [ns]", (long) diff(time1,time2).tv_sec, (long) diff(time1,time2).tv_nsec);
return 0;
}
The time function only has 1 second resolution. Instead, use gettimeofday which has microsecond resolution.
struct timeval tstart, tend;
gettimeofday(&tstart, NULL);
// do something that takes time
gettimeofday(&tend,NULL);
#include <stdio.h>
#include <time.h>
clock_t start, stop;
int main(){
start = clock();
function();
stop = clock();
double duration = (stop - start)/CLK_TCK;
return 0;
}
I got a problem in Xcode. CLK_TCK undeclared.
how to fix it?
Use CLOCKS_PER_SEC instead of CLK_TCK on mac.
This question already has answers here:
Get a timestamp in C in microseconds?
(9 answers)
Closed 8 years ago.
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <unistd.h>
#include <time.h>
typedef unsigned int uint32;
uint32 a;
int main()
{
struct timespec start;
if((clock_gettime( CLOCK_REALTIME, &start)) == -1 )
{
perror("clock gettime\n");
}
/* I am calculating the Clock granularity here the granularity is basically how long that timer interrupt will last while it's processing the background task.*/
//micro seconds output
a = (1000000 * start.tv_sec + start.tv_nsec / 1000);
printf( "%u\n", a);
return EXIT_SUCCESS;
}
I created a timer to get the timestamp at any palce, so the above is a free running timer to take the timestamp. I tried to get the output in microseconds and getting the value as : 2847675807 is this right ?? I should get the value in microseconds. I think, getting some bigger values. someone please help me.
typedef unsigned int uint64;
typedef unsigned int uint32;
uint32 a;
uint64 timestamp()
{
struct timespec start;
if((clock_gettime( CLOCK_REALTIME, &start)) == -1 )
{
perror("clock gettime\n");
}
/* I am calculating the Clock granularity here the granularity is basically how long that timer interrupt
* will last while it's processing the background task.*/
//micro seconds output
a = (uint32)(1e6 * start.tv_sec + start.tv_nsec * 1e-3);
printf( "%u\n", a);
return EXIT_SUCCESS;
}
int main()
{
timestamp();
return 1;
}
I modified like above but then also same results like bigger number
Maybe you can try this. It's not in realtime, it's less accurate than gettimeofday.
#include <time.h>
#include <stdio.h>
int main(int ac, char **av)
{
clock_t start, end;
double duration = 0;
start = clock();
for (int i = 0; i < 100000000; i++)
{}
end = clock();
duration = (double)(end - start) / CLOCKS_PER_SEC;
printf("%f ms\n", duration);
return 0;
}
I've searched in the Web but I've only found a way for do it, but in this way it returns in seconds instead of milliseconds.
My code is:
#include <stdio.h>
#include <assert.h>
#include <time.h>
int main(void)
{
int solucion;
time_t start, stop;
clock_t ticks;
long count;
time(&start);
solucion = divisores_it(92000000, 2);
time(&stop);
printf("Finnished in %f seconds. \n", difftime(stop, start));
return 0;
}
A cross platform way is to use ftime.
Windows specific link here: http://msdn.microsoft.com/en-us/library/aa297926(v=vs.60).aspx
Example below.
#include <stdio.h>
#include <sys\timeb.h>
int main()
{
struct timeb start, end;
int diff;
int i = 0;
ftime(&start);
while(i++ < 999) {
/* do something which takes some time */
printf(".");
}
ftime(&end);
diff = (int) (1000.0 * (end.time - start.time)
+ (end.millitm - start.millitm));
printf("\nOperation took %u milliseconds\n", diff);
return 0;
}
I ran the code above and traced through it using VS2008 and saw it actually calls the windows GetSystemTimeAsFileTime function.
Anyway, ftime will give you milliseconds precision.
The solution below seems OK to me. What do you think?
#include <stdio.h>
#include <time.h>
long timediff(clock_t t1, clock_t t2) {
long elapsed;
elapsed = ((double)t2 - t1) / CLOCKS_PER_SEC * 1000;
return elapsed;
}
int main(void) {
clock_t t1, t2;
int i;
float x = 2.7182;
long elapsed;
t1 = clock();
for (i=0; i < 1000000; i++) {
x = x * 3.1415;
}
t2 = clock();
elapsed = timediff(t1, t2);
printf("elapsed: %ld ms\n", elapsed);
return 0;
}
Reference: http://www.acm.uiuc.edu/webmonkeys/book/c_guide/2.15.html#clock
For Windows, GetSystemTime() is what you want. For POSIX, gettimeofday().
GetSystemTime() uses the structure SYSTEMTIME, which provides milli-second resolution.
More on this here.
This code piece works. This is based on the answer from Angus Comber:
#include <sys/timeb.h>
uint64_t system_current_time_millis()
{
#if defined(_WIN32) || defined(_WIN64)
struct _timeb timebuffer;
_ftime(&timebuffer);
return (uint64_t)(((timebuffer.time * 1000) + timebuffer.millitm));
#else
struct timeb timebuffer;
ftime(&timebuffer);
return (uint64_t)(((timebuffer.time * 1000) + timebuffer.millitm));
#endif
}
DWORD start = GetTickCount();
executeSmth();
printf("Elapsed: %i ms", GetTickCount() - start);
P.S. This method has some limitations. See GetTickCount.