Why does my audio output increase of 100Hz on each cycle? - c

I have a bug in my audio code.
Expected behavior: sinewave output, sweeping from 100Hz to 200Hz, resetting to 100Hz every second
Actual behavior: sinewave output, sweeping from 100Hz to 200Hz, but then rising 100Hz on each cycle, so on the second cycle it will sweep from 200Hz to 300Hz, then from 300Hz to 400Hz, and so on...
I'm generating a 1Hz rising sawtooth wave, and scaling and offsetting it so it rises from 100 to 200 every second. I'm also printing its value, which shows that it's behaving as expected.
But for some reason, if I use that value as frequency for my sinewave, the resulting sound rises 100Hz on each cycle.
Plugging a fixed frequency into my sinewave function works as expected.
It's only when I use the two together that I'm getting the bug. The thing I really can't explain is that the bug is only in the output audio -- the printed values are still all fine.
I'm using miniaudio as audio backend, and it's the only dependency. It should compile without errors nor warnings on Win, Linux and Mac.
It's a single header library, you only need to include miniaudio.h, so it should be easy to replicate.
Here is my code:
/*
compiling on Win10 with GCC:
gcc -g0 test_nodep.c -o test_nodep.exe -Wall -Wextra -Wshadow -Wvla -pedantic-errors -ansi
*/
#include <stdio.h>
#include <math.h>
#include <float.h>
#include <stdint.h>
#define MA_NO_DECODING
#define MA_NO_ENCODING
#define MINIAUDIO_IMPLEMENTATION
#include "miniaudio.h" /* https://github.com/mackron/miniaudio - single header file audio os backend library */
/* global variables */
int32_t DEVICE_FORMAT = ma_format_f32; /* 32-bit float */
int32_t DEVICE_CHANNELS = 1; /* mono */
int32_t DEVICE_SAMPLE_RATE = 48000;
float clock = 0;
float time = 0;
static __inline__ float tik(float interval, float len, float offset){
return (len<=0)*(fmod(time-offset, interval)==0) +
(len>0)*((fmod(time-offset, interval)>=0)&&(fmod(time-offset, interval)<=(len)));
}
void data_callback(ma_device* pDevice, void* pOutput, const void* pInput, ma_uint32 frameCount){
float* Samples = pOutput;
ma_uint32 SampleIndex;
/* audio-callback variable definitions */
float test_saw;
float test_saw_freq = 1.f;
float i;
for(SampleIndex = 0; SampleIndex < frameCount; SampleIndex++){
test_saw = fmod(clock, (DEVICE_SAMPLE_RATE/test_saw_freq))/(DEVICE_SAMPLE_RATE/test_saw_freq); /* 1Hz rising saw, output range [0..1] */
test_saw = test_saw * 100.f + 100.f; /* shift range into [100..200] */
if(tik(.125f,0.f,0.f)){ /* this is to print the test_saw value every 1/8 of a second */
printf("== test_saw: %.2f", test_saw);
for(i=0.f;i<test_saw/10.f;i++){
printf(" ");
}
printf("%c\n", 254);
}
/* this is the output function, a sinewave, with frequency sweeping continuously from 100Hz to 200Hz */
/* f(t) = sin(2*PI * frequency + time) */
/* instead of a fixed frequency, I'm using test_saw, sweeping from 100Hz to 200Hz every second */
*Samples = (float)sin((double)(time * MA_TAU * test_saw));
/* using the same function with a fixed frequency works as expected, no problems */
/* *Samples = (float)sin((double)(time * MA_TAU * 100.f)); */
clock++;
clock*=(clock<FLT_MAX); /* continuously rising value, +1 on each sample, zeroes out when float is at its max value, to prevent float overflow */
time = clock/DEVICE_SAMPLE_RATE; /* same value, in seconds */
Samples++;
}
(void)pDevice;
(void)pInput;
}
int main(){
ma_device_config deviceConfig;
ma_device device;
/* audio output device configuration */
deviceConfig = ma_device_config_init(ma_device_type_playback); /* initialize for playback */
deviceConfig.playback.format = DEVICE_FORMAT;
deviceConfig.playback.channels = DEVICE_CHANNELS;
deviceConfig.sampleRate = DEVICE_SAMPLE_RATE;
deviceConfig.dataCallback = data_callback;
/* audio output device initialization */
if(ma_device_init(NULL, &deviceConfig, &device) != MA_SUCCESS){
printf("Failed to open playback device.\n");
return -4;
}
printf("== Device Name: %s\n", device.playback.name);
printf("== Sample Rate: %u Hz\n", DEVICE_SAMPLE_RATE);
if (ma_device_start(&device) != MA_SUCCESS) {
printf("== Failed to start playback device.\n");
ma_device_uninit(&device);
return -5;
}
printf("~~~ You should hear sound now ~~~\n");
printf("== Press Enter to quit...");
getchar();
ma_device_uninit(&device); /* turn off sound */
return 0;
}

Related

FFT with dsPIC33E returns no frequency

I'm working with a dsPIC33EP128GP502 and try to run a FFT to measure the dominant frequency on the input. The compiler shows no errors and the ADC itself seems to work... (for single values)
I expect as result some frequency value between 0 Hz and ~96 kHz in the variable peakFrequency. With a noise signal (or no signal at all) the value should be more or less random. With an external applied single tone signal I expect to measure the input frequency +/- ~100 Hz. Sadly, my frequency output is always 0.
The test signals are generated by external signal generators and the ADC works fine if I want to measure single values!
The FFT has to run on the DSP-core of the dsPIC33E due to some performance needs.
Has anyone any experience with the dsPIC33E and an idea were my mistake is?
ADC: TAD -> 629.3 ns, Conversion Trigger -> Clearing sample bit ends sampling and starts conversion, Output Format -> Fractional result, signed, Auto Sampling -> enabled
#include "mcc_generated_files/mcc.h"
#include <xc.h>
#include <dsp.h>
#define FFT_BLOCK_LENGTH 1024
#define LOG2_BLOCK_LENGTH 10
#define AUDIO_FS 192042
int16_t peakFrequencyBin;
uint16_t ix_MicADCbuff;
uint16_t peakFrequency;
fractional fftMaxValue;
fractcomplex twiddleFactors[FFT_BLOCK_LENGTH/2] __attribute__ ((space(xmemory)));
fractcomplex sigCmpx[FFT_BLOCK_LENGTH] __attribute__ ((space(ymemory), aligned(FFT_BLOCK_LENGTH * 2 *2)));
bool timeGetAdcSample = false;
void My_ADC_IRS(void)
{
timeGetAdcSample = true;
}
void readOutput(void)//Sample output
{
for(ix_MicADCbuff=0;ix_MicADCbuff<FFT_BLOCK_LENGTH;ix_MicADCbuff++)
{
ADC1_ChannelSelect(mix_output);
ADC1_SoftwareTriggerEnable();
while(!timeGetAdcSample); //wait for TMR1 interrupt (5.2072 us)
timeGetAdcSample = false;
ADC1_SoftwareTriggerDisable();
while(!ADC1_IsConversionComplete(mix_output));
sigCmpx[ix_MicADCbuff].real = ADC1_Channel0ConversionResultGet();
sigCmpx[ix_MicADCbuff].imag = 0;
}
}
void signalFreq(void)//Detect the dominant frequency
{
readOutput();
FFTComplexIP(LOG2_BLOCK_LENGTH, &sigCmpx[0], &twiddleFactors[0], COEFFS_IN_DATA);/
BitReverseComplex(LOG2_BLOCK_LENGTH, &sigCmpx[0]);
SquareMagnitudeCplx(FFT_BLOCK_LENGTH, &sigCmpx[0], &sigCmpx[0].real);
VectorMax(FFT_BLOCK_LENGTH/2, &sigCmpx[0].real, &peakFrequencyBin);
peakFrequency = peakFrequencyBin*(AUDIO_FS/FFT_BLOCK_LENGTH);
}
int main(void)
{
SYSTEM_Initialize();
TwidFactorInit(LOG2_BLOCK_LENGTH, &twiddleFactors[0], 0);
TMR1_SetInterruptHandler(My_ADC_IRS);
TMR1_Start();
while (1)
{
signalFreq();
UART1_32_Write((uint32_T)peakFrequency); // output via UART
}
return 1;
}
Perhaps anyone can figure out the error/problem in my code!

Float to string using PIC16F1824

I have some trouble using a PIC16F1824.
I want to convert an value retrieve from the ADC converter and transmit it to an UART.
I've used MCC to generate my code.
When I debug my code step by step, the voltage value is similar that the one I've obtained with my scope.
But when I want to print this value on the serial interface, I have an error with the argument %f (or %.2f) in printf.
C:\Program
Files\Microchip\xc8\v2.20\pic\sources\c99\common\nf_fputc.c:16::
warning: (1498) pointer (unknown) in expression may have no targets
C:\Program
Files\Microchip\xc8\v2.20\pic\sources\c99\common\doprnt.c:66:: error:
(1250) could not find space (80 bytes) for variable _dbuf (908) exit
status = 1
Here is my code
#include "mcc_generated_files/mcc.h"
void main(void) {
SYSTEM_Initialize();
uint16_t convertedValue, convertedValue2;
float voltage, voltage2;
float temp = 0;
volatile float sensorRH = 0;
char buffer[5];
while (1) {
// ADC_SelectChannel(channel_AN0);
// ADC_StartConversion();
// while (!ADC_IsConversionDone());
// convertedValue = ADC_GetConversionResult();
// voltage = convertedValue * 0.0048875;
// temp = (voltage2 - 0.424) / 0.00625; // LM60
ADC_SelectChannel(channel_AN2);
ADC_StartConversion();
while (!ADC_IsConversionDone());
convertedValue2 = ADC_GetConversionResult();
voltage2 = convertedValue2 * 0.0048875; // Tension 5V - RĂ©solution 10 bits (5/1023)
sensorRH = (voltage2 - 0.852) / 0.031483; // HIH-4000
printf("ADC converted value = %.2f\n", sensorRH);
sprintf(buffer, "%f",sensorRH);
}
}
I've also tried to :
include stdio lib : #include <stdio.h>
change the float value to a volatile float sensorRH;
declare my float outside the main function. This gave me an other error :
C:\Program
Files\Microchip\xc8\v2.20\pic\sources\c99\common\nf_fputc.c:16::
warning: (1498) pointer (unknown) in expression may have no targets
mcc_generated_files/eusart.c:64:: error: (1250) could not find space
(8 bytes) for variable _eusartTxBuffer (908) exit status = 1
try the sprintf function with different size of buffer (same error than the first one).
I'm using a PIC16F1824 with a PIC4kit debugger and XC8 compiler.
Thanks in advance
The error you get means you have either not enough RAM or Flash, or in other words you use too much space. A very simple way to reduce space by a small amount is by enabling optimizer. A way to reduce more is by writing code that does not require so much space. The printf() family takes a significant amount of space and floating point also. If you can do not use printf() or sprintf() and replace the floating point operations with integer operations, this should be possible but it may give you a bit less precise calculations which should not matter since your ADC is not very precise to begin with.
To address the diagnostic message from the compiler:
C:\Program Files\Microchip\xc8\v2.20\pic\sources\c99\common\nf_fputc.c:16:: warning: (1498) pointer (unknown) in expression may have no targets
C:\Program Files\Microchip\xc8\v2.20\pic\sources\c99\common\doprnt.c:66:: error: (1250) could not find space (80 bytes) for variable _dbuf (908) exit status = 1
The first one "warning: (1498)" is a generated because the XC8 compiler is stupid when handling what's sometimes called opaque pointers. But since this is in reference to Microchip library code that you should not change and the code is in fact correct just ignore this warning.
The second one "error: (1250)" is an error in your code. You have declared more object than there is available RAM for.
This is code I just created for you that uses only integer math:
#include "mcc_generated_files/mcc.h"
/*
Main application
*/
void main(void)
{
#define VREF_VALUE (500) // ADC VREF voltage in 1/100 of a volt
uint32_t convertedValue2;
uint16_t voltage2;
// initialize the device
SYSTEM_Initialize();
// When using interrupts, you need to set the Global and Peripheral Interrupt Enable bits
// Use the following macros to:
// Enable the Global Interrupts
//INTERRUPT_GlobalInterruptEnable();
// Enable the Peripheral Interrupts
//INTERRUPT_PeripheralInterruptEnable();
// Disable the Global Interrupts
//INTERRUPT_GlobalInterruptDisable();
// Disable the Peripheral Interrupts
//INTERRUPT_PeripheralInterruptDisable();
while (1)
{
// Add your application code
ADC_SelectChannel(channel_AN2);
__delay_ms(1000);
ADC_StartConversion();
while (!ADC_IsConversionDone());
convertedValue2 = ADC_GetConversionResult();
voltage2 = (uint16_t)( (convertedValue2 * VREF_VALUE + 0x8000ul) >> 16 );
printf("ADC voltage = %1u.%02u\r\n", voltage2/100, voltage2%100);
}
}
As you have not provided any details on the sensor that you are using I cannot show you the integer math to do scaling and offset specific to your sensor.
The complete MPLABX v5.40 project an be found on my git hub repository here.
Finally, I send my ADC values in millivolt over the UART and make the conversion on the PC software.
It's not really clean, but it do the job...
Thanks
int i = 0;
int j = 0;
while (1) {
ADC_SelectChannel(channel_AN2);
ADC_StartConversion();
while (!ADC_IsConversionDone());
convertedValue = ADC_GetConversionResult();
voltage = convertedValue * 0.0048875;
i = voltage * 100;
__delay_ms(500);
ADC_SelectChannel(channel_AN3);
ADC_StartConversion();
while (!ADC_IsConversionDone());
convertedValue2 = ADC_GetConversionResult();
voltage2 = convertedValue2 * 0.0048875;
j = voltage2 * 100;
printf("ADC voltage = %1u - %1u\r\n", i, j);
}
Thanks a lot for the answers.
I think I understand the advice, convert the value in integer and then divide it to get a float value in the serial interface.
I just have a problem with this line
voltage2 = (uint16_t)( (convertedValue2 * VREF_VALUE + 0x8000ul) >> 16 );
It give me something wrong
I'm using the sensors
LM60
HIH-4000
I've made the conversion with this code
voltage = convertedValue * 0.0048875; // VREF : 5V / 10 bits ADC
temp = (voltage - 0.424) / 0.00625; // LM60
sensorRH = (voltage - 0.852) / 0.031483; // HIH-4000
Thanks for your help

How does time.h clock() work under Windows?

I am trying to create a simple queue schedule for an embedded System in C.
The idea is that within a Round Robin some functions are called based on the time constraints declared in the Tasks[] array.
#include <time.h>
#include <stdio.h>
#include <windows.h>
#include <stdint.h>
//Constants
#define SYS_TICK_INTERVAL 1000UL
#define INTERVAL_0MS 0
#define INTERVAL_10MS (100000UL / SYS_TICK_INTERVAL)
#define INTERVAL_50MS (500000UL / SYS_TICK_INTERVAL)
//Function calls
void task_1(clock_t tick);
void task_2(clock_t tick);
uint8_t get_NumberOfTasks(void);
//Define the schedule structure
typedef struct
{
double Interval;
double LastTick;
void (*Function)(clock_t tick);
}TaskType;
//Creating the schedule itself
TaskType Tasks[] =
{
{INTERVAL_10MS, 0, task_1},
{INTERVAL_50MS, 0, task_2},
};
int main(void)
{
//Get the number of tasks to be executed
uint8_t task_number = get_NumberOfTasks();
//Initializing the clocks
for(int i = 0; i < task_number; i++)
{
clock_t myClock1 = clock();
Tasks[i].LastTick = myClock1;
printf("Task %d clock has been set to %f\n", i, myClock1);
}
//Round Robin
while(1)
{
//Go through all tasks in the schedule
for(int i = 0; i < task_number; i++)
{
//Check if it is time to execute it
if((Tasks[i].LastTick - clock()) > Tasks[i].Interval)
{
//Execute it
clock_t myClock2 = clock();
(*Tasks[i].Function)(myClock2);
//Update the last tick
Tasks[i].LastTick = myClock2;
}
}
Sleep(SYS_TICK_INTERVAL);
}
}
void task_1(clock_t tick)
{
printf("%f - Hello from task 1\n", tick);
}
void task_2(clock_t tick)
{
printf("%f - Hello from task 2\n", tick);
}
uint8_t get_NumberOfTasks(void)
{
return sizeof(Tasks) / sizeof(*Tasks);
}
The code compiles without a single warning, but I guess I don't understand how the command clock() work.
Here you can see what I get when I run the program:
F:\AVR Microcontroller>timer
Task 0 clock has been set to 0.000000
Task 1 clock has been set to 0.000000
I tried changing Interval and LastTick from float to double just to make sure this was not a precision error, but still it does not work.
%f is not the right formatting specifier to print out myClock1 as clock_t is likely not double. You shouldn't assume that clock_t is double. If you want to print myClock1 as a floating point number you have to manually convert it to double:
printf("Task %d clock has been set to %f\n", i, (double)myClock1);
Alternatively, use the macro CLOCKS_PER_SEC to turn myClock1 into a number of seconds:
printf("Task %d clock has been set to %f seconds\n", i,
(double)myClock1 / CLOCKS_PER_SEC);
Additionally, your subtraction in the scheduler loop is wrong. Think about it: clock() grows larger with the time, so Tasks[i].LastTick - clock() always yields a negative value. I think you want clock() - Tasks[i].LastTick instead.
The behavior of the clock function is depending on the operating system. On Windows it basically runs of the wall clock, while on e.g. Linux it's the process CPU time.
Also, the result of clock by itself is useless, it's only use is in comparison between two clocks (e.g. clock_end - clock_start).
Finally, the clock_t type (which clock returns) is an integer type, you only get floating point values if you cast a difference (as the one above) to e.g. double and divide by CLOCKS_PER_SEC. Attempting to print a clock_t using the "%f" format will lead to undefined behavior.
Reading a clock reference might help.

Segmentation fault when calling clock()

I am trying to understand the effects of caching programmatically using the following program. I am getting segfault with the code. I used GDB (compiled with -g -O0) and found that it was segmentation faulting on
start = clock() (first occourance)
Am I doing something wrong? The code looks fine to me. Can someone point out the mistake?
#include <stdio.h>
#include <sys/time.h>
#include <time.h>
#include <unistd.h>
#define MAX_SIZE (16*1024*1024)
int main()
{
clock_t start, end;
double cpu_time;
int i = 0;
int arr[MAX_SIZE];
/* CPU clock ticks count start */
start = clock();
/* Loop 1 */
for (i = 0; i < MAX_SIZE; i++)
arr[i] *= 3;
/* CPU clock ticks count stop */
end = clock();
cpu_time = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("CPU time for loop 1 %.6f secs.\n", cpu_time);
/* CPU clock ticks count start */
start = clock();
/* Loop 2 */
for (i = 0; i < MAX_SIZE; i += 16)
arr[i] *= 3;
/* CPU clock ticks count stop */
end = clock();
cpu_time = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("CPU time for loop 2 %.6f secs.\n", cpu_time);
return 0;
}
The array might be too big for the stack. Try making it static instead, so it goes into the global variable space. As an added bonus, static variables are initialized to all zero.
Unlike other kinds of storage, the compiler can check that resources exist for globals at compile time (and the OS can double check at runtime before the program starts) so you don't need to handle out of memory errors. An uninitialized array won't make your executable file bigger.
This is an unfortunate rough edge of the way the stack works. It lives in a fixed-size buffer, set by the program executable's configuration according to the operating system, but its actual size is seldom checked against the available space.
Welcome to Stack Overflow land!
Try to change:
int arr[MAX_SIZE];
to:
int *arr = (int*)malloc(MAX_SIZE * sizeof(int));
As Potatoswatter suggested The array might be too big for the stack... You might allocate on the heap, than on the stack...
More informations.

clock_gettime alternative in Mac OS X

When compiling a program I wrote on Mac OS X after installing the necessary libraries through MacPorts, I get this error:
In function 'nanotime':
error: 'CLOCK_REALTIME' undeclared (first use in this function)
error: (Each undeclared identifier is reported only once
error: for each function it appears in.)
It appears that clock_gettime is not implemented in Mac OS X. Is there an alternative means of getting the epoch time in nanoseconds? Unfortunately gettimeofday is in microseconds.
After hours of perusing different answers, blogs, and headers, I found a portable way to get the current time:
#include <time.h>
#include <sys/time.h>
#ifdef __MACH__
#include <mach/clock.h>
#include <mach/mach.h>
#endif
struct timespec ts;
#ifdef __MACH__ // OS X does not have clock_gettime, use clock_get_time
clock_serv_t cclock;
mach_timespec_t mts;
host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock);
clock_get_time(cclock, &mts);
mach_port_deallocate(mach_task_self(), cclock);
ts.tv_sec = mts.tv_sec;
ts.tv_nsec = mts.tv_nsec;
#else
clock_gettime(CLOCK_REALTIME, &ts);
#endif
or check out this gist: https://gist.github.com/1087739
Hope this saves someone time. Cheers!
None of the solutions above answers the question. Either they don't give you absolute Unix time, or their accuracy is 1 microsecond. The most popular solution by jbenet is slow (~6000ns) and does not count in nanoseconds even though its return suggests so. Below is a test for 2 solutions suggested by jbenet and Dmitri B, plus my take on this. You can run the code without changes.
The 3rd solution does count in nanoseconds and gives you absolute Unix time reasonably fast (~90ns). So if someone find it useful - please let us all know here :-). I will stick to the one from Dmitri B (solution #1 in the code) - it fits my needs better.
I needed commercial quality alternative to clock_gettime() to make pthread_…timed.. calls, and found this discussion very helpful. Thanks guys.
/*
Ratings of alternatives to clock_gettime() to use with pthread timed waits:
Solution 1 "gettimeofday":
Complexity : simple
Portability : POSIX 1
timespec : easy to convert from timeval to timespec
granularity : 1000 ns,
call : 120 ns,
Rating : the best.
Solution 2 "host_get_clock_service, clock_get_time":
Complexity : simple (error handling?)
Portability : Mac specific (is it always available?)
timespec : yes (struct timespec return)
granularity : 1000 ns (don't be fooled by timespec format)
call time : 6000 ns
Rating : the worst.
Solution 3 "mach_absolute_time + gettimeofday once":
Complexity : simple..average (requires initialisation)
Portability : Mac specific. Always available
timespec : system clock can be converted to timespec without float-math
granularity : 1 ns.
call time : 90 ns unoptimised.
Rating : not bad, but do we really need nanoseconds timeout?
References:
- OS X is UNIX System 3 [U03] certified
http://www.opengroup.org/homepage-items/c987.html
- UNIX System 3 <--> POSIX 1 <--> IEEE Std 1003.1-1988
http://en.wikipedia.org/wiki/POSIX
http://www.unix.org/version3/
- gettimeofday() is mandatory on U03,
clock_..() functions are optional on U03,
clock_..() are part of POSIX Realtime extensions
http://www.unix.org/version3/inttables.pdf
- clock_gettime() is not available on MacMini OS X
(Xcode > Preferences > Downloads > Command Line Tools = Installed)
- OS X recommends to use gettimeofday to calculate values for timespec
https://developer.apple.com/library/mac/documentation/Darwin/Reference/ManPages/man3/pthread_cond_timedwait.3.html
- timeval holds microseconds, timespec - nanoseconds
http://www.gnu.org/software/libc/manual/html_node/Elapsed-Time.html
- microtime() is used by kernel to implement gettimeofday()
http://ftp.tw.freebsd.org/pub/branches/7.0-stable/src/sys/kern/kern_time.c
- mach_absolute_time() is really fast
http://www.opensource.apple.com/source/Libc/Libc-320.1.3/i386/mach/mach_absolute_time.c
- Only 9 deciaml digits have meaning when int nanoseconds converted to double seconds
Tutorial: Performance and Time post uses .12 precision for nanoseconds
http://www.macresearch.org/tutorial_performance_and_time
Example:
Three ways to prepare absolute time 1500 milliseconds in the future to use with pthread timed functions.
Output, N = 3, stock MacMini, OSX 10.7.5, 2.3GHz i5, 2GB 1333MHz DDR3:
inittime.tv_sec = 1390659993
inittime.tv_nsec = 361539000
initclock = 76672695144136
get_abs_future_time_0() : 1390659994.861599000
get_abs_future_time_0() : 1390659994.861599000
get_abs_future_time_0() : 1390659994.861599000
get_abs_future_time_1() : 1390659994.861618000
get_abs_future_time_1() : 1390659994.861634000
get_abs_future_time_1() : 1390659994.861642000
get_abs_future_time_2() : 1390659994.861643671
get_abs_future_time_2() : 1390659994.861643877
get_abs_future_time_2() : 1390659994.861643972
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h> /* gettimeofday */
#include <mach/mach_time.h> /* mach_absolute_time */
#include <mach/mach.h> /* host_get_clock_service, mach_... */
#include <mach/clock.h> /* clock_get_time */
#define BILLION 1000000000L
#define MILLION 1000000L
#define NORMALISE_TIMESPEC( ts, uint_milli ) \
do { \
ts.tv_sec += uint_milli / 1000u; \
ts.tv_nsec += (uint_milli % 1000u) * MILLION; \
ts.tv_sec += ts.tv_nsec / BILLION; \
ts.tv_nsec = ts.tv_nsec % BILLION; \
} while (0)
static mach_timebase_info_data_t timebase = { 0, 0 }; /* numer = 0, denom = 0 */
static struct timespec inittime = { 0, 0 }; /* nanoseconds since 1-Jan-1970 to init() */
static uint64_t initclock; /* ticks since boot to init() */
void init()
{
struct timeval micro; /* microseconds since 1 Jan 1970 */
if (mach_timebase_info(&timebase) != 0)
abort(); /* very unlikely error */
if (gettimeofday(&micro, NULL) != 0)
abort(); /* very unlikely error */
initclock = mach_absolute_time();
inittime.tv_sec = micro.tv_sec;
inittime.tv_nsec = micro.tv_usec * 1000;
printf("\tinittime.tv_sec = %ld\n", inittime.tv_sec);
printf("\tinittime.tv_nsec = %ld\n", inittime.tv_nsec);
printf("\tinitclock = %ld\n", (long)initclock);
}
/*
* Get absolute future time for pthread timed calls
* Solution 1: microseconds granularity
*/
struct timespec get_abs_future_time_coarse(unsigned milli)
{
struct timespec future; /* ns since 1 Jan 1970 to 1500 ms in the future */
struct timeval micro = {0, 0}; /* 1 Jan 1970 */
(void) gettimeofday(&micro, NULL);
future.tv_sec = micro.tv_sec;
future.tv_nsec = micro.tv_usec * 1000;
NORMALISE_TIMESPEC( future, milli );
return future;
}
/*
* Solution 2: via clock service
*/
struct timespec get_abs_future_time_served(unsigned milli)
{
struct timespec future;
clock_serv_t cclock;
mach_timespec_t mts;
host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock);
clock_get_time(cclock, &mts);
mach_port_deallocate(mach_task_self(), cclock);
future.tv_sec = mts.tv_sec;
future.tv_nsec = mts.tv_nsec;
NORMALISE_TIMESPEC( future, milli );
return future;
}
/*
* Solution 3: nanosecond granularity
*/
struct timespec get_abs_future_time_fine(unsigned milli)
{
struct timespec future; /* ns since 1 Jan 1970 to 1500 ms in future */
uint64_t clock; /* ticks since init */
uint64_t nano; /* nanoseconds since init */
clock = mach_absolute_time() - initclock;
nano = clock * (uint64_t)timebase.numer / (uint64_t)timebase.denom;
future = inittime;
future.tv_sec += nano / BILLION;
future.tv_nsec += nano % BILLION;
NORMALISE_TIMESPEC( future, milli );
return future;
}
#define N 3
int main()
{
int i, j;
struct timespec time[3][N];
struct timespec (*get_abs_future_time[])(unsigned milli) =
{
&get_abs_future_time_coarse,
&get_abs_future_time_served,
&get_abs_future_time_fine
};
init();
for (j = 0; j < 3; j++)
for (i = 0; i < N; i++)
time[j][i] = get_abs_future_time[j](1500); /* now() + 1500 ms */
for (j = 0; j < 3; j++)
for (i = 0; i < N; i++)
printf("get_abs_future_time_%d() : %10ld.%09ld\n",
j, time[j][i].tv_sec, time[j][i].tv_nsec);
return 0;
}
In effect, it seems not to be implemented for macOS before Sierra 10.12. You may want to look at this blog entry. The main idea is in the following code snippet:
#include <mach/mach_time.h>
#define ORWL_NANO (+1.0E-9)
#define ORWL_GIGA UINT64_C(1000000000)
static double orwl_timebase = 0.0;
static uint64_t orwl_timestart = 0;
struct timespec orwl_gettime(void) {
// be more careful in a multithreaded environement
if (!orwl_timestart) {
mach_timebase_info_data_t tb = { 0 };
mach_timebase_info(&tb);
orwl_timebase = tb.numer;
orwl_timebase /= tb.denom;
orwl_timestart = mach_absolute_time();
}
struct timespec t;
double diff = (mach_absolute_time() - orwl_timestart) * orwl_timebase;
t.tv_sec = diff * ORWL_NANO;
t.tv_nsec = diff - (t.tv_sec * ORWL_GIGA);
return t;
}
#if defined(__MACH__) && !defined(CLOCK_REALTIME)
#include <sys/time.h>
#define CLOCK_REALTIME 0
// clock_gettime is not implemented on older versions of OS X (< 10.12).
// If implemented, CLOCK_REALTIME will have already been defined.
int clock_gettime(int /*clk_id*/, struct timespec* t) {
struct timeval now;
int rv = gettimeofday(&now, NULL);
if (rv) return rv;
t->tv_sec = now.tv_sec;
t->tv_nsec = now.tv_usec * 1000;
return 0;
}
#endif
Everything you need is described in Technical Q&A QA1398: Technical Q&A QA1398: Mach Absolute Time Units, basically the function you want is mach_absolute_time.
Here's a slightly earlier version of the sample code from that page that does everything using Mach calls (the current version uses AbsoluteToNanoseconds from CoreServices). In current OS X (i.e., on Snow Leopard on x86_64) the absolute time values are actually in nanoseconds and so don't actually require any conversion at all. So, if you're good and writing portable code, you'll convert, but if you're just doing something quick and dirty for yourself, you needn't bother.
FWIW, mach_absolute_time is really fast.
uint64_t GetPIDTimeInNanoseconds(void)
{
uint64_t start;
uint64_t end;
uint64_t elapsed;
uint64_t elapsedNano;
static mach_timebase_info_data_t sTimebaseInfo;
// Start the clock.
start = mach_absolute_time();
// Call getpid. This will produce inaccurate results because
// we're only making a single system call. For more accurate
// results you should call getpid multiple times and average
// the results.
(void) getpid();
// Stop the clock.
end = mach_absolute_time();
// Calculate the duration.
elapsed = end - start;
// Convert to nanoseconds.
// If this is the first time we've run, get the timebase.
// We can use denom == 0 to indicate that sTimebaseInfo is
// uninitialised because it makes no sense to have a zero
// denominator is a fraction.
if ( sTimebaseInfo.denom == 0 ) {
(void) mach_timebase_info(&sTimebaseInfo);
}
// Do the maths. We hope that the multiplication doesn't
// overflow; the price you pay for working in fixed point.
elapsedNano = elapsed * sTimebaseInfo.numer / sTimebaseInfo.denom;
printf("multiplier %u / %u\n", sTimebaseInfo.numer, sTimebaseInfo.denom);
return elapsedNano;
}
Note that macOS Sierra 10.12 now supports clock_gettime():
#include <stdio.h>
#include <time.h>
int main() {
struct timespec res;
struct timespec time;
clock_getres(CLOCK_REALTIME, &res);
clock_gettime(CLOCK_REALTIME, &time);
printf("CLOCK_REALTIME: res.tv_sec=%lu res.tv_nsec=%lu\n", res.tv_sec, res.tv_nsec);
printf("CLOCK_REALTIME: time.tv_sec=%lu time.tv_nsec=%lu\n", time.tv_sec, time.tv_nsec);
}
It does provide nanoseconds; however, the resolution is 1000, so it is (in)effectively limited to microseconds:
CLOCK_REALTIME: res.tv_sec=0 res.tv_nsec=1000
CLOCK_REALTIME: time.tv_sec=1475279260 time.tv_nsec=525627000
You will need XCode 8 or later to be able to use this feature. Code compiled to use this feature will not run on versions of Mac OS X (10.11 or earlier).
Thanks for your posts
I think you can add the following lines
#ifdef __MACH__
#include <mach/mach_time.h>
#define CLOCK_REALTIME 0
#define CLOCK_MONOTONIC 0
int clock_gettime(int clk_id, struct timespec *t){
mach_timebase_info_data_t timebase;
mach_timebase_info(&timebase);
uint64_t time;
time = mach_absolute_time();
double nseconds = ((double)time * (double)timebase.numer)/((double)timebase.denom);
double seconds = ((double)time * (double)timebase.numer)/((double)timebase.denom * 1e9);
t->tv_sec = seconds;
t->tv_nsec = nseconds;
return 0;
}
#else
#include <time.h>
#endif
Let me know what you get for latency and granularity
Maristic has the best answer here to date. Let me simplify and add a remark. #include and Init():
#include <mach/mach_time.h>
double conversion_factor;
void Init() {
mach_timebase_info_data_t timebase;
mach_timebase_info(&timebase);
conversion_factor = (double)timebase.numer / (double)timebase.denom;
}
Use as:
uint64_t t1, t2;
Init();
t1 = mach_absolute_time();
/* profiled code here */
t2 = mach_absolute_time();
double duration_ns = (double)(t2 - t1) * conversion_factor;
Such timer has latency of 65ns +/- 2ns (2GHz CPU). Use this if you need "time evolution" of single execution. Otherwise loop your code 10000 times and profile even with gettimeofday(), which is portable (POSIX), and has the latency of 100ns +/- 0.5ns (though only 1us granularity).
I tried the version with clock_get_time, and did cache the host_get_clock_service call. It's way slower than gettimeofday, it takes several microseconds per invocation. And, what's worse, the return value has steps of 1000, i.e. it's still microsecond granularity.
I'd advice to use gettimeofday, and multiply tv_usec by 1000.
Based on the open source mach_absolute_time.c we can see that the line extern mach_port_t clock_port; tells us there's a mach port already initialized for monotonic time. This clock port can be accessed directly without having to resort to calling mach_absolute_time then converting back to a struct timespec. Bypassing a call to mach_absolute_time should improve performance.
I created a small Github repo (PosixMachTiming) with the code based on the extern clock_port and a similar thread. PosixMachTiming emulates clock_gettime for CLOCK_REALTIME and CLOCK_MONOTONIC. It also emulates the function clock_nanosleep for absolute monotonic time. Please give it a try and see how the performance compares. Maybe you might want to create comparative tests or emulate other POSIX clocks/functions?
As of at least as far back as Mountain Lion, mach_absolute_time() returns nanoseconds and not absolute time (which was the number of bus cycles).
The following code on my MacBook Pro (2 GHz Core i7) showed that the time to call mach_absolute_time() averaged 39 ns over 10 runs (min 35, max 45), which is basically the time between the return of the two calls to mach_absolute_time(), about 1 invocation:
#include <stdint.h>
#include <mach/mach_time.h>
#include <iostream>
using namespace std;
int main()
{
uint64_t now, then;
uint64_t abs;
then = mach_absolute_time(); // return nanoseconds
now = mach_absolute_time();
abs = now - then;
cout << "nanoseconds = " << abs << endl;
}
void clock_get_uptime(uint64_t *result);
void clock_get_system_microtime( uint32_t *secs,
uint32_t *microsecs);
void clock_get_system_nanotime( uint32_t *secs,
uint32_t *nanosecs);
void clock_get_calendar_microtime( uint32_t *secs,
uint32_t *microsecs);
void clock_get_calendar_nanotime( uint32_t *secs,
uint32_t *nanosecs);
For MacOS you can find a good information on their developers page
https://developer.apple.com/library/content/documentation/Darwin/Conceptual/KernelProgramming/services/services.html
I found another portable solution.
Declare in some header file (or even in your source one):
/* If compiled on DARWIN/Apple platforms. */
#ifdef DARWIN
#define CLOCK_REALTIME 0x2d4e1588
#define CLOCK_MONOTONIC 0x0
#endif /* DARWIN */
And the add the function implementation:
#ifdef DARWIN
/*
* Bellow we provide an alternative for clock_gettime,
* which is not implemented in Mac OS X.
*/
static inline int clock_gettime(int clock_id, struct timespec *ts)
{
struct timeval tv;
if (clock_id != CLOCK_REALTIME)
{
errno = EINVAL;
return -1;
}
if (gettimeofday(&tv, NULL) < 0)
{
return -1;
}
ts->tv_sec = tv.tv_sec;
ts->tv_nsec = tv.tv_usec * 1000;
return 0;
}
#endif /* DARWIN */
Don't forget to include <time.h>.

Resources