I am writting a wrapper function sleep_new() for clock_nanosleep() which would make thread suspension easier for me.
// POSIX.1-2017 is what compiler is confined to.
#define _XOPEN_SOURCE 700
#include <stdint.h>
#include <time.h>
#include <stdio.h>
#include <string.h>
// POSIX headers.
// Other headers
#include "sleep_new.h"
void sleep_new(long value, const char unit[3]){
// Create a timespec structure and set it's members.
// Members are added together!!! So to set time "1.5 s" we set "t.tv_sec = 1" and "t.tv_sec = 500000000".
// Members ".tv_sec" and ".tv_nsec" represent unit and not value!
struct timespec sleep_time;
// Set flags i.e. TIMER_ABSTIME to 0 to use relative instead of absolute time.
int flags = 0;
// Choose the clock i.e. CLOCK_MONOTONIC is a "clock_id" for the clock started at system start.
int clock_id = CLOCK_MONOTONIC;
// Set timespec structure's members according to the chosen unit.
if (!strcmp(unit, "s")) {
sleep_time.tv_sec = value;
sleep_time.tv_nsec = 0;
}
else if (!strcmp(unit, "ns")){
sleep_time.tv_sec = 0;
sleep_time.tv_nsec = value;
}
else if (!strcmp(unit, "us")){
sleep_time.tv_sec = 0;
sleep_time.tv_nsec = value * 1000;
}
else if (!strcmp(unit, "ms")){
sleep_time.tv_sec = 0;
sleep_time.tv_nsec = value * 1000000;
}
else{
puts("Unit not supported - choose between: s, ms, us, ns\n");
}
// Because last argument is NULL in case of error, remaining time is not stored in "t".
clock_nanosleep(clock_id, flags, &sleep_time, NULL);
}
int main(int argc, char *argv[])
{
// Counter.
uint8_t i;
for(i = 0; i < 256; i++){
// Stdout is newline buffered. This is why we either have to include `\n` at the end or flush() it manually.
// So uncomment one example A or B.
// A
//printf("%d\n", i);
// B
printf("%d, ", i);
fflush(stdout);
// Because last argument is NULL in case of error, remaining time is not stored in "t".
sleep_new(1000, "ms");
}
return 0;
}
If I call this function with sleep_new(1, "s") or sleep_new(2, "s") it works fine, because it sets the sleep_time.tv_nsec = 0; and sleep_time.tv_sec = value;.
In any other scenarios i.e. sleep_new(1000, "ms") something is wrong and sleep is not applied. I debugged application and values are applied to the timespec members just fine but clock_nanosec() just ignores them.
I am using type long for the value because I read in the POSIX here where header time.h defines timespec structure's members tv_nsec who needs long and member tv_sec who uses time_t which is in turn defined in header sys/types.h like this:
time_t shall be an integer type.
So because long can also hold int values I expected this to work, but it doesn't. Does anyone have any suggestion?
The tv_nsec is the number of nanoseconds in a second - 1000 * 1000000 nanoseconds is too much. That's 1 second! tv_nsec should range from 0 to 999999999. The proper calculation could look like:
sleep_time.tv_sec = value / 1000;
sleep_time.tv_nsec = (value % 1000) * 1000000;
Related
I'm using C and I'm trying to get Clock resolution but I get this value: 0.000000
Here is the code I'm using
#include <time.h>
#include<stdio.h>
double duration(struct timespec start, struct timespec end) {
return end.tv_sec - start.tv_sec
+ ((end.tv_nsec - start.tv_nsec ) / (double) 1000000000.0);
}
double getResolution(){
struct timespec start, end;
clock_gettime(CLOCK_MONOTONIC, &start);
do {
clock_gettime(CLOCK_MONOTONIC, &end);
} while (duration(start, end) == 0.0);
return duration(start, end);
}
int main(){
printf("%f",getResolution());
return 0;
}
You need to increase the precision in your printf("%f");. Using printf("%.12f"); would probably be enough to show some non-zero decimals.
Calculating the floating point duration in the while loop may cause the program to actually perform that calculation if the compiler isn't clever enough to figure out that you only need to see if the clock has changed at all. You could just do a memcmp to compare start and end instead.
Don't take the struct timespecs by value in your duration function. Supply pointers to the function instead. It should be cheaper.
Use the clock_getres function to get the resolution. The runtime value you get with your homebrewed solution depends on what speed the CPU is currently running at etc.
Example:
#include <stdio.h>
#include <string.h>
#include <time.h>
// taking the arguments via pointers:
double duration(const struct timespec* start, const struct timespec* end) {
return end->tv_sec - start->tv_sec +
((end->tv_nsec - start->tv_nsec) / 1000000000.0);
}
double getResolution() {
struct timespec start = {0}, end = {0};
clock_gettime(CLOCK_MONOTONIC, &start);
do {
clock_gettime(CLOCK_MONOTONIC, &end);
// using memcmp below:
} while (memcmp(&start, &end, sizeof start) == 0);
return duration(&start, &end);
}
int main() {
struct timespec base = {0}, res;
// using the proper function to get the resolution:
clock_getres(CLOCK_MONOTONIC, &res);
// comparing the results:
printf("clock_getres = %.12f\n", duration(&base, &res));
printf("getResolution = %.12f\n", getResolution());
}
Demo
The C11 standard provides the function timespec_get. If I run the example code on cppreference, or on my computer, it works:
#include <stdio.h>
#include <time.h>
int main(void)
{
struct timespec ts;
timespec_get(&ts, TIME_UTC);
char buff[100];
strftime(buff, sizeof buff, "%D %T", gmtime(&ts.tv_sec));
printf("Current time: %s.%09ld UTC\n", buff, ts.tv_nsec);
}
However, if I look at the sources of glibc here, the code is the following:
#include <time.h>
/* Set TS to calendar time based in time base BASE. */
int
timespec_get (struct timespec *ts, int base)
{
switch (base)
{
case TIME_UTC:
/* Not supported. */
return 0;
default:
return 0;
}
return base;
}
stub_warning (timespec_get)
Which... should not work...
Which leads to the question: where is the source code of timespec_get that is actually called?
The timespec_get function's implementation depends on the system the library is running on, so it appears both as a stub in time/timespec_get.c (in case no implementation is available) and as various system-dependent implementations elsewhere.
You can see the Linux implementation in sysdeps/unix/sysv/linux/timespec_get.c,
/* Set TS to calendar time based in time base BASE. */
int
timespec_get (struct timespec *ts, int base)
{
switch (base)
{
int res;
INTERNAL_SYSCALL_DECL (err);
case TIME_UTC:
res = INTERNAL_VSYSCALL (clock_gettime, err, 2, CLOCK_REALTIME, ts);
if (INTERNAL_SYSCALL_ERROR_P (res, err))
return 0;
break;
default:
return 0;
}
return base;
}
This is is just a thin wrapper around a vDSO call, and the vDSO is part of the Linux kernel itself. If you are curious, look for the definition of clock_gettime there. It's unusual that clock_gettime is in the vDSO, only a small number of syscalls are implemented this way.
Here is the x86 implementation for CLOCK_REALTIME, found in arch/x86/entry/vdso/vclock_gettime.c:
/* Code size doesn't matter (vdso is 4k anyway) and this is faster. */
notrace static int __always_inline do_realtime(struct timespec *ts)
{
unsigned long seq;
u64 ns;
int mode;
do {
seq = gtod_read_begin(gtod);
mode = gtod->vclock_mode;
ts->tv_sec = gtod->wall_time_sec;
ns = gtod->wall_time_snsec;
ns += vgetsns(&mode);
ns >>= gtod->shift;
} while (unlikely(gtod_read_retry(gtod, seq)));
ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
ts->tv_nsec = ns;
return mode;
}
Basically, there is some memory in your process which is updated by the kernel, and some registers in your CPU which track the passage of time (or something provided by your hypervisor). The memory in your process is used to translate the value of these CPU registers into the wall clock time. You have to read these in a loop because they can change while you are reading them... the loop logic detects the case when you get a bad read, and tries again.
The timespec_get definition you linked to is a stub (see the stub_warning). The actual implementation will be under sysdeps for your platform. For example, here is the version for sysv: https://github.com/lattera/glibc/blob/a2f34833b1042d5d8eeb263b4cf4caaea138c4ad/sysdeps/unix/sysv/linux/timespec_get.c
int timespec_get (ts, base)
struct timespec *ts;
int base;
{
switch (base)
{
int res;
INTERNAL_SYSCALL_DECL (err);
case TIME_UTC:
res = INTERNAL_GETTIME (CLOCK_REALTIME, ts);
if (INTERNAL_SYSCALL_ERROR_P (res, err))
return 0;
break;
default:
return 0;
}
return base;
}
I made this state machine :
enum states { STATE_ENTRY, STATE_....} current_state;
enum events { EVENT_OK, EVENT_FAIL,EVENT_REPEAT, MAX_EVENTS } event;
void (*const state_table [MAX_STATES][MAX_EVENTS]) (void) = {
{ action_entry , action_entry_fail , action_entry_repeat }, /*
procedures for state 1 */
......}
void main (void){
event = get_new_event (); /* get the next event to process */
if (((event >= 0) && (event < MAX_EVENTS))
&& ((current_state >= 0) && (current_state < MAX_STATES))) {
state_table [current_state][event] (); /* call the action procedure */
printf("OK 0");
} else {
/* invalid event/state - handle appropriately */
}
}
When I modify a global variable in one state the global variable remain the same , and I need that variable in all the states . Do you now what could be the problem ?
My Global variable is this structure:
#if (CPU_TYPE == CPU_TYPE_32)
typedef uint32_t word;
#define word_length 32
typedef struct BigNumber {
word words[64];
} BigNumber;
#elif (CPU_TYPE == CPU_TYPE_16)
typedef uint16_t word;
#define word_length 16
typedef struct BigNumber {
word words[128];
} BigNumber;
#else
#error Unsupported CPU_TYPE
#endif
BigNumber number1 , number2;
Here is how I modify:
//iterator is a number from where I start to modify,
//I already modified on the same way up to the iterator
for(i=iterator+1;i<32;i++){
nr_rand1=661;
nr_rand2=1601;
nr_rand3=1873;
number2.words[i]=(nr_rand1<<21) | (nr_rand2<<11) | (nr_rand3);
}
This is just in case you may want to change your approach for defining the FSM. I'll show you with an example; say you have the following FSM:
You may represent it as:
void function process() {
fsm {
fsmSTATE(S) {
/* do your entry actions heare */
event = getevent();
/* do you actions here */
if (event.char == 'a') fsmGOTO(A);
else fsmGOTO(E);
}
fsmSTATE(A) {
event = getevent();
if (event.char == 'b' || event.char == 'B') fsmGOTO(B);
else fsmGOTO(E);
}
fsmSTATE(B) {
event = getevent();
if (event.char == 'a' ) fsmGOTO(A);
else fsmGOTO(E);
}
fsmSTATE(E) {
/* done with the FSM. Bye bye! */
}
}
}
I do claim (but I believe someone will disagree) that this is simpler, much more readable and directly conveys the structure of the FSM than using a table. Even if I didn't put the image, drawing the FSM diagram would be rather easy.
To get this you just have to define the fsmXXX stuff as follows:
#define fsm
#define fsmGOTO(x) goto fsm_state_##x
#define fsmSTATE(x) fsm_state_##x :
Regarding the code that changese number2:
for(i=iterator+1;i<32;i){
nr_rand1=661;
nr_rand2=1601;
nr_rand3=1873;
number2.words[i]=(nr_rand1<<21) | (nr_rand2<<11) | (nr_rand3);
}
I can't fail to note that:
i is never incremented, so just one element of the array is changed (iterator+1) over an infinite loop;
even if i would be incremented, only the a portion of the words array it's changed depending on the value of iterator (but this might be the intended behaviour).
unless iterator can be -1, the element words[0] is never changed (again this could be the intended behaviour).
I would check if this is really what you intended to do.
If you're sure that it's just a visibility problem (since you said that when you declare it as local it worked as expected), the only other thing that I can think of is that you have the functions in one file and the main (or where you do your checks) in another.
Then you include the same .h header in both files and you end up (due to the linker you're using) with two different number2 because you did not declare it as extern in one of the two files.
Your compiler (or, better, the linker) should have (at least) warned you about this, did you check the compilation messages?
This is not an answer - rather it is a comment. But it is too big to fit the comment field so I post it here for now.
The code posted in the question is not sufficient to find the root cause. You need to post a minimal but complete example that shows the problem.
Something like:
#include<stdio.h>
#include<stdlib.h>
#include <stdint.h>
typedef uint32_t word;
#define word_length 32
typedef struct BigNumber {
word words[4];
} BigNumber;
BigNumber number2;
enum states { STATE_0, STATE_1} current_state;
enum events { EVENT_A, EVENT_B } event;
void f1(void)
{
int i;
current_state = STATE_1;
for (i=0; i<4; ++i) number2.words[i] = i;
}
void f2(void)
{
int i;
current_state = STATE_0;
for (i=0; i<4; ++i) number2.words[i] = 42 + i*i;
}
void (*const state_table [2][2]) (void) =
{
{ f1 , f1 },
{ f2 , f2 }
};
int main (void){
current_state = STATE_0;
event = EVENT_A;
state_table [current_state][event] (); /* call the action procedure */
printf("%u %u %u %u\n", number2.words[0], number2.words[1], number2.words[2], number2.words[3]);
event = EVENT_B;
state_table [current_state][event] (); /* call the action procedure */
printf("%u %u %u %u\n", number2.words[0], number2.words[1], number2.words[2], number2.words[3]);
return 0;
}
The above can be considered minimal and complete. Now update this code with a few of your own functions and post that as the question (if it still fails).
My code doesn't fail.
Output:
0 1 2 3
42 43 46 51
I'm trying to use 2 different equivalents for UNIX's gettimeofday() function on Windows, using Visual Studio 2013.
I took the first one from here. As the second one, I'm using the _ftime64_s function, as explained here.
They work, but not as I expected. I want to get different values when printing the seconds, or at least the milliseconds, but I get the same value for the printings with gettimeofday() (mytime1 & mytime2) and with _ftime64_s (mytime3 & mytime4).
However, it worth mentioning that the value of the milliseconds is indeed different between these two functions (that is, the milliseconds value of mytime1/mytime2 is different from mytime3/mytime4).
Here's my code:
#include <stdio.h>
#include <Windows.h>
#include <stdint.h>
#include <sys/timeb.h>
#include <time.h>
#define WIN32_LEAN_AND_MEAN
int gettimeofday(struct timeval * tp, struct timezone * tzp)
{
// Note: some broken versions only have 8 trailing zero's, the correct epoch has 9 trailing zero's
static const uint64_t EPOCH = ((uint64_t)116444736000000000ULL);
SYSTEMTIME system_time;
FILETIME file_time;
uint64_t time;
GetSystemTime(&system_time);
SystemTimeToFileTime(&system_time, &file_time);
time = ((uint64_t)file_time.dwLowDateTime);
time += ((uint64_t)file_time.dwHighDateTime) << 32;
tp->tv_sec = (long)((time - EPOCH) / 10000000L);
tp->tv_usec = (long)(system_time.wMilliseconds * 1000);
return 0;
}
int main()
{
/* working with struct timeval and gettimeofday equivalent */
struct timeval mytime1;
struct timeval mytime2;
gettimeofday(&(mytime1), NULL);
gettimeofday(&(mytime2), NULL);
printf("Seconds: %d\n", (int)(mytime1.tv_sec));
printf("Milliseconds: %d\n", (int)(mytime1.tv_usec));
printf("Seconds: %d\n", (int)(mytime2.tv_sec));
printf("Milliseconds: %d\n", (int)(mytime2.tv_usec));
/* working with _ftime64_s */
struct _timeb mytime3;
struct _timeb mytime4;
_ftime64_s(&mytime3);
_ftime64_s(&mytime4);
printf("Seconds: %d\n", mytime3.time);
printf("Milliseconds: %d\n", mytime3.millitm);
printf("Seconds: %d\n", mytime4.time);
printf("Milliseconds: %d\n", mytime4.millitm);
return (0);
}
I tried other format specifiers (%f, %lu) and castings ((float), (double), (long), (size_t)), but it didn't matter. Suggestions will be welcomed.
QueryPerformanceCounter is used for accurate timing on windows. Usage can be as follows:
uint64_t microseconds()
{
LARGE_INTEGER fq, t;
QueryPerformanceFrequency(&fq);
QueryPerformanceCounter(&t);
return (1000000 * t.QuadPart) / fq.QuadPart;
}
This does not work with any EPOCH as far as I know. For that you need GetSystemTimePreciseAsFileTime which is only available on Windows 8 and higher.
uint64_t MyGetSystemTimePreciseAsFileTime()
{
HMODULE lib = LoadLibraryW(L"kernel32.dll");
if (!lib) return 0;
FARPROC fp = GetProcAddress(lib, "GetSystemTimePreciseAsFileTime");
ULARGE_INTEGER largeInt;
largeInt.QuadPart = 0;
if (fp)
{
T_GetSystemTimePreciseAsFileTime* pfn = (T_GetSystemTimePreciseAsFileTime*)fp;
FILETIME fileTime = { 0 };
pfn(&fileTime);
largeInt.HighPart = fileTime.dwHighDateTime;
largeInt.LowPart = fileTime.dwLowDateTime;
}
FreeLibrary(lib);
return largeInt.QuadPart;
}
int main()
{
uint64_t t1 = microseconds();
uint64_t t2 = microseconds();
printf("t1: %llu\n", t1);
printf("t2: %llu\n", t2);
return (0);
}
I have a program (mixed C and Fortran, although that doesn't seem to be relevant) that uses nanosleep. However, if my timespec has a tv_sec value of 0, it simply doesn't sleep. The tv_nsec value can be microseconds shy of a full second, but it does not sleep. (If tv_sec is 1, it has no problem sleeping for a second.) Why would this be?
To make things more confusing, usleep with an appropriate value (i.e. 995000 usec) sleeps for just about a second as expected.
I'm seeing this problem with a RHEL 5.8 and a RHEL 6.4 box. Both are using gcc.
Here's the function that calls nanosleep:
void msleep(int *milliseconds)
{
long usec;
struct timespec sleep;
usec = (*milliseconds) % 1000;
sleep.tv_sec = (*milliseconds) / 1000;
sleep.tv_nsec = 1000*usec;
nanosleep(&sleep, NULL);
}
Obviously, I don't actually need nanosecond precision!
I've also tested a version in which I did check the return value; it was always 0 (success), and thus the rem output parameter (remaining time if interrupted) never got set.
You are missing a factor of 1000.
Try this:
#define _POSIX_C_SOURCE 199309L /* shall be >= 199309L */
#include <time.h>
void msleep(int *milliseconds)
{
int ms_remaining = (*milliseconds) % 1000;
long usec = ms_remaining * 1000;
struct timespec ts_sleep;
ts_sleep.tv_sec = (*milliseconds) / 1000;
ts_sleep.tv_nsec = 1000*usec;
nanosleep(&ts_sleep, NULL);
}
More compact:
#define _POSIX_C_SOURCE 199309L /* shall be >= 199309L */
#include <time.h>
void msleep(int * pmilliseconds)
{
struct timespec ts_sleep =
{
*pmilliseconds / 1000,
(*pmilliseconds % 1000) * 1000000L
};
nanosleep(&ts_sleep, NULL);
}
Finally a complete implementation including error handling and the case of nanosleep() being interrupted early:
#define _POSIX_C_SOURCE 199309L
#include <time.h>
#include <errno.h>
#include <stdio.h>
int ms_sleep(unsigned int ms)
{
int result = 0;
{
struct timespec ts_remaining =
{
ms / 1000,
(ms % 1000) * 1000000L
};
do
{
struct timespec ts_sleep = ts_remaining;
result = nanosleep(&ts_sleep, &ts_remaining);
}
while ((EINTR == errno) && (-1 == result));
}
if (-1 == result)
{
perror("nanosleep() failed");
}
return result;
}
Following a wrapper to fulfil the OP's requirements:
#include <errno.h>
#include <stdio.h>
int ms_sleep(unsigned int);
void msleep(int * pms)
{
int result = 0;
if ((NULL == pms) || (0 > *pms)) /* Check for valid input. */
{
errno = EINVAL;
result = -1;
}
else
{
result = ms_sleep(*pms));
}
if (-1 == result)
{
perror("ms_sleep() failed");
/* Exit and/or log error here. */
}
}
Update (referring to chux's comment below):
Assuming at least C99, this part of the above code
struct timespec ts_sleep =
{
*pmilliseconds / 1000,
(*pmilliseconds % 1000) * 1000000L
};
might better be written like this
struct timespec ts_sleep =
{
.tv_sec = *pmilliseconds / 1000,
.tv_nsec = (*pmilliseconds % 1000) * 1000000L
};
to not rely on the order of struct timespec's members.
I did it like below and it worked...
#include <stdio.h>
#include <time.h> /* Needed for struct timespec */
int nsleep(long miliseconds)
{
struct timespec req, rem;
if(miliseconds > 999)
{
req.tv_sec = (int)(miliseconds / 1000); /* Must be Non-Negative */
req.tv_nsec = (miliseconds - ((long)req.tv_sec * 1000)) * 1000000; /* Must be in range of 0 to 999999999 */
}
else
{
req.tv_sec = 0; /* Must be Non-Negative */
req.tv_nsec = miliseconds * 1000000; /* Must be in range of 0 to 999999999 */
}
return nanosleep(&req , &rem);
}
int main()
{
int ret = nsleep(2500);
printf("sleep result %d\n",ret);
return 0;
}
Here is the method
static void Sleep(long lMs){
//Calculate the nanosecond
long lRemainingMilliSecond = (lMs) % 1000;
long lNanoSecond = lRemainingMilliSecond * 1000000;
struct timespec ts_sleep,ts_remaining;
ts_sleep.tv_sec = (lMs) / 1000;
ts_sleep.tv_nsec = lNanoSecond;
nanosleep(&ts_sleep, &ts_remaining);
}
The concept is explained better in the following page
Convert milliseconds to timespec - GNU Porting