This question is elaboration and continuation of my previous question - On epoll_pwait, POSIX timers and X11 events. Most of X11 events is either delayed or dropped. I work with XCB window that displays some graphical information. The application has an insteractive input and had more or less static display. Now the requirenments had changed and I need to add some pereodic computations and have a interactive input.
The problem with the new requirenments is that computations goes at high rate. With that, I get inconsistent interaction with the XCB window. Input may lag behind, or change the rate of processing.
The setup is multiplex events with epoll_pwait. The events are signals, X11 events and recently added timers/timeout.
What I understand as of now, I need to separate user interaction from the computations. The problem with my setup, as of now, is that rate of X11 events changes in a way I can't explain.
So I decide to separate waiting on X11 events with the rest of the logic. Can you suggest a proper way to do it? Will having a X11 window in a separate thread/process/epoll set help?
And the actual question, as I look upon it now, is, what is a frequency of epoll_wait wakeups? I plan to have one epoll_wait in a loop. Maybe some processes to be wait on. I understand that epoll_wait will "wakeup" at some random points int time.
Update
My setup is close to this one:
#include <stdbool.h>
#include <string.h>
#include <errno.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <signal.h>
#include <time.h>
#include <math.h>
#include <sys/types.h>
#include <sys/ioctl.h>
#include <sys/signalfd.h>
#include <sys/epoll.h>
#include <sys/timerfd.h>
#include <sys/socket.h>
#include <xcb/xcb.h>
static struct timespec tm_p, tm_c, tm_diff;
static unsigned long nevents = 0;
static inline void timespec_diff(struct timespec *a, struct timespec *b, struct timespec *res) {
res->tv_sec = a->tv_sec - b->tv_sec;
res->tv_nsec = a->tv_nsec - b->tv_nsec;
if (res->tv_nsec < 0) {
--res->tv_sec;
res->tv_nsec += 1000000000L;
}
}
static double compute(){
double t = 1.0;
double eps = 1e-7;
double eps_2 = eps / 2.0;
clock_gettime(CLOCK_MONOTONIC, &tm_p);
while(t > eps){
t -= eps;
t += eps;
t -= eps_2 + eps_2;
}
clock_gettime(CLOCK_MONOTONIC, &tm_c);
timespec_diff(&tm_c, &tm_p, &tm_diff);
printf(" compute: %ld %f\n", tm_diff.tv_sec, tm_diff.tv_nsec / 1e9);
return (int)t;
}
/* defines for epoll */
#define MAXEVENTS 64
#define SET_EV(_ev,_fd,_events) _ev.data.fd = _fd; _ev.events = _events
static int xcb_get_atom(xcb_connection_t *c,
const char *name,
xcb_atom_t *atom)
{
xcb_intern_atom_cookie_t cookie;
xcb_generic_error_t *error;
xcb_intern_atom_reply_t *reply;
cookie = xcb_intern_atom(c, 0, strlen(name), name);
reply = xcb_intern_atom_reply(c, cookie, &error);
if(NULL == reply){
free(error);
return -1;
}
*atom = reply->atom;
free(reply);
return 0;
}
static int xcb_change_prop_wm_close(xcb_connection_t *c,
xcb_window_t window,
xcb_atom_t wm_p,
xcb_atom_t atom)
{
xcb_void_cookie_t cookie;
xcb_generic_error_t *error;
xcb_atom_enum_t type = XCB_ATOM_ATOM;
uint8_t format = 32;
uint32_t data_len = 1;
cookie = xcb_change_property_checked(c, /* xcb connection */
XCB_PROP_MODE_REPLACE, /* mode */
window, /* window */
wm_p, /* the property to change */
type, /* type of the property */
format, /* format(bits) */
data_len, /* number of elements(see format) */
&atom /* property data */
);
error = xcb_request_check(c, cookie);
if (error) {
free(error);
return -1;
}
return 0;
}
int main()
{
xcb_connection_t *c;
xcb_screen_t *screen;
xcb_window_t win;
xcb_atom_t a_wm_p;
xcb_atom_t wm_p_close;
struct epoll_event ep_ev, *ep_evs = NULL;
struct signalfd_siginfo siginf;
sigset_t mask_sigs, mask_osigs;
int sig_fd = -1, x11_fd = -1, ep_fd = -1, tm_fd = -1;
/* set up signals */
if(sigemptyset(&mask_sigs) < 0){
perror(" * sigemptyset(&mask_sigs)");
goto main_terminate;
}
if(sigaddset(&mask_sigs, SIGINT)){ /* these signals will be blocked. the signals will arrive */
perror(" * sigaddset(&mask_sigs, SIGINT)");
goto main_terminate;
}
if(sigaddset(&mask_sigs, SIGQUIT)){ /* to epoll and not to a default signal handler. */
perror(" * sigaddset(&mask_sigs, SIGQUIT)");
goto main_terminate;
}
/* save old sigmask, replace it with new sigmask */
if(sigprocmask(SIG_BLOCK, &mask_sigs, &mask_osigs) < 0){
perror(" * sigprocmask(SIG_BLOCK, &mask_sigs, &mask_osigs)");
goto main_terminate;
}
/* get signal file descriptor */
if((sig_fd = signalfd(-1, &mask_sigs, 0)) < 0){
perror(" * signalfd(-1, &mask_sigs, 0)");
goto main_terminate;
}
/* set signal fd as non-blocking */
{
int on = 1;
if(ioctl(sig_fd, FIONBIO, (char *)&on) < 0){
perror(" * ioctl(sig_fd, FIONBIO)");
goto main_terminate;
}
}
/* Open the connection to the X server */
c = xcb_connect (NULL, NULL);
/* Get the first screen */
screen = xcb_setup_roots_iterator (xcb_get_setup (c)).data;
/* Ask for our window's Id */
win = xcb_generate_id(c);
/* Create the window */
{
unsigned int cw_mask = XCB_CW_BORDER_PIXEL
| XCB_CW_EVENT_MASK
;
/* values must follow in the incresing order of the cw_mask constants */
unsigned int cw_values[] = {screen->white_pixel,
XCB_EVENT_MASK_KEY_PRESS|XCB_EVENT_MASK_KEY_RELEASE
};
xcb_create_window (c, /* Connection */
XCB_COPY_FROM_PARENT, /* depth (same as root)*/
win, /* window Id */
screen->root, /* parent window */
0, 0, /* x, y */
150, 150, /* width, height */
10, /* border_width */
XCB_WINDOW_CLASS_INPUT_OUTPUT, /* class */
screen->root_visual, /* visual */
cw_mask, cw_values); /* masks */
}
/* get x11 connection file descriptor */
x11_fd = xcb_get_file_descriptor(c);
/* atom WM_PROTOCOLS */
if(xcb_get_atom(c, "WM_PROTOCOLS", &a_wm_p) < 0){
fprintf(stderr, " %s:%s:%d\n", __FILE__, __func__, __LINE__);
return -1;
}
/* atom window close */
if(xcb_get_atom(c, "WM_DELETE_WINDOW", &wm_p_close) < 0){
fprintf(stderr, " %s:%s:%d\n", __FILE__, __func__, __LINE__);
return -1;
}
{ /* wm prop: intercept close */
if(xcb_change_prop_wm_close(c, win, a_wm_p, wm_p_close) < 0){
fprintf(stderr, " %s:%s:%d\n", __FILE__, __func__, __LINE__);
goto main_terminate;
}
}
/* create epoll set file descriptor */
if((ep_fd = epoll_create(1)) < 0){
perror(" * epoll_create");
goto main_terminate;
}
/* allocate events for epoll queue */
if(NULL == (ep_evs = (struct epoll_event*)calloc(MAXEVENTS,sizeof(ep_ev)))){
perror(" * calloc(MAXEVENTS)");
goto main_terminate;
}
{ /* fd timer */
struct itimerspec ts;
if((tm_fd = timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK)) < 0){
perror(" * timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK)");
goto main_terminate;
}
ts.it_value.tv_sec = 1;
ts.it_value.tv_nsec = 0;
ts.it_interval.tv_sec = 0;
ts.it_interval.tv_nsec = (unsigned long)10e6; /* 10 msec */
if(timerfd_settime(tm_fd, 0, &ts, NULL) < 0){
perror(" * timerfd_settime(tm_fd, 0, &ts, NULL)");
goto main_terminate;
}
}
/* add X11 event */
SET_EV(ep_ev,x11_fd,EPOLLIN);
if(epoll_ctl (ep_fd, EPOLL_CTL_ADD, x11_fd, &ep_ev) < 0){
perror(" * epoll_ctl x11_fd");
goto main_terminate;
}
/* add timer event */
SET_EV(ep_ev,tm_fd,EPOLLIN);
if(epoll_ctl (ep_fd, EPOLL_CTL_ADD, tm_fd, &ep_ev) < 0){
perror(" * epoll_ctl tm_fd");
goto main_terminate;
}
/* add signal event */
SET_EV(ep_ev,sig_fd,EPOLLIN);
if(epoll_ctl (ep_fd, EPOLL_CTL_ADD, sig_fd, &ep_ev) < 0){
perror(" * epoll_ctl sig_fd");
goto main_terminate;
}
/* window title */
const char *title = "epoll_pwait";
xcb_change_property (c,
XCB_PROP_MODE_REPLACE,
win,
XCB_ATOM_WM_NAME,
XCB_ATOM_STRING,
8,
strlen (title),
title );
/* Map the window on the screen */
xcb_map_window (c, win);
/* Make sure commands are sent before we pause, so window is shown */
xcb_flush (c);
while(1){
int n, i, fd, status;
bool f_compute = false;
bool f_exit_sig = false;
bool f_win_close = false;
n = epoll_pwait (ep_fd, ep_evs, MAXEVENTS, -1, &mask_sigs); /* wait, signal safe */
if(n < 0){
fprintf(stderr, " * main(): %s:%s:%d\n", __FILE__, __func__, __LINE__);
status = 1;
goto main_terminate;
}
for(i = 0; i < n; ++i){ /* service epoll events */
fd = ep_evs[i].data.fd;
if(fd == sig_fd){ /* signal */
status = read(fd, &siginf, sizeof(siginf));
if(status != sizeof(siginf)){
fprintf(stderr,"read != sizeof(siginf)");
goto main_terminate;
}
if(siginf.ssi_signo == SIGINT){
printf("got SIGINT\n");
f_exit_sig = true;
}else if(siginf.ssi_signo == SIGQUIT){
printf("got SIGQUIT\n");
f_exit_sig = true;
goto main_terminate;
}else {
printf("got unregistered signal\n");
}
}else if(fd == x11_fd){ /* x11 event */
xcb_generic_event_t *event;
while((event = xcb_poll_for_event(c))){
if (event && (event->response_type == 0)){ /* error recieved */
free(event);
goto main_terminate;
}
switch(event->response_type & ~0x80){
case XCB_CLIENT_MESSAGE: { /* window close */
xcb_client_message_event_t *ce = (xcb_client_message_event_t*)event;
if(ce->data.data32[0] == wm_p_close){ /* window should close */
f_win_close = true;
}
} break;
case XCB_KEY_PRESS: { /* keyboard key press */
printf("XCB_KEY_PRESS\n");
nevents++;
} break;
} /* end switch */
free(event);
} /* end while event loop */
}else if(fd == tm_fd){ /* timer event */
uint64_t overrun;
status = read(fd, &overrun, sizeof(uint64_t));
if(status != EAGAIN) {
//~ printf(" ! timer overruns: %lu\n", overrun);
}
f_compute = true;
}
} /* finish service epoll events */
if(f_exit_sig){ /* exit signal */
goto main_terminate;
}
if(f_win_close){ /* window close */
goto main_terminate;
}
if(f_compute){ /* do some computations */
compute();
xcb_flush(c);
}
} /* end while(1) */
main_terminate:
if(sig_fd != -1){
close(sig_fd);
}
if(tm_fd != -1){
close(tm_fd);
}
if(ep_fd != -1){
close(ep_fd);
}
if(ep_evs){
free(ep_evs);
}
xcb_disconnect(c);
if (sigprocmask(SIG_SETMASK, &mask_osigs, NULL) < 0){
perror(" * sigprocmask(SIG_SETMASK, &mask_osigs, NULL)");
}
printf("received %lu events\n", nevents);
return 0;
}
With the above, I can not reproduce input lag I do have in more verbose program. I test the above with xdotool, send some input X11 events, and my eye can't catch any visible input delay. All events get delivered. For now I can not post a full code that I have problem with.
what is a frequency of epoll_wait wakeups?
I'll assume you are talking about the precision of wakeups. This surely depends on kernel details, but let's just make an experiment: We can write a small program that tests how long epoll_wait takes. This will test both a timeout of zero and a timeout of 1 ms since a timeout of zero might be treated specially.
Output on my system:
empty: avg 0.015636 μs, min 0.014 μs, max 0.065 μs
sleep 1 ms: avg 1108.55 μs, min 1014.72 μs, max 1577.42 μs
epoll_wait 0: avg 0.761084 μs, min 0.38 μs, max 37.787 μs
epoll_wait 1: avg 1108.97 μs, min 1017.22 μs, max 2602.4 μs
Doing nothing (empty) measures less than a microsecond, so the following results should be somewhat reliable. The minimum time is also not zero, so the clock has enough precision for what we are doing.
Sleeping 1 ms sleeps at least 1.014 ms, but there also was a case of 1.5 ms. I guess that means that wakeups are not all that precise.
Using epoll_wait() to do nothing take less than a microsecond. More than doing nothing, but this still does basically nothing, so perhaps this really just measures the syscall overhead...?
Sleeping 1 ms with epoll_wait() behaves more or less the same as sleeping for 1 ms with nanosleep().
If you want to improve this experiment, you could actually register some FDs via epoll_ctl(). It might be that the kernel handles "empty" epoll FDs specially.
#include <stdio.h>
#include <sys/epoll.h>
#include <time.h>
#include <unistd.h>
#define MEASUREMENT_COUNT 10000
#define NUM_EVENTS 42
static int epoll_fd;
double diff_micro_sec(struct timespec *a, struct timespec *b) {
double sec = a->tv_sec - b->tv_sec;
double ns = a->tv_nsec - b->tv_nsec;
return sec * 1e6 + ns / 1e3;
}
static void empty(void) {
}
static void sleep_one_ms(void) {
struct timespec spec;
spec.tv_sec = 0;
spec.tv_nsec = 1000 * 1000;
nanosleep(&spec, NULL);
}
static void epoll_wait_0(void) {
struct epoll_event events[NUM_EVENTS];
epoll_wait(epoll_fd, events, NUM_EVENTS, 0);
}
static void epoll_wait_1(void) {
struct epoll_event events[NUM_EVENTS];
epoll_wait(epoll_fd, events, NUM_EVENTS, 1);
}
static void do_it(const char *name, void (*func)(void)) {
double sum, min, max;
struct timespec a, b;
for (int i = 0; i < MEASUREMENT_COUNT; i++) {
double diff;
clock_gettime(CLOCK_MONOTONIC, &a);
func();
clock_gettime(CLOCK_MONOTONIC, &b);
diff = diff_micro_sec(&b, &a);
if (i == 0) {
sum = diff;
min = diff;
max = diff;
} else {
sum += diff;
if (diff < min)
min = diff;
if (diff > max)
max = diff;
}
}
printf("%14s: avg %g μs, min %g μs, max %g μs\n", name, sum / MEASUREMENT_COUNT, min, max);
}
int main() {
do_it("empty", empty);
do_it("sleep 1 ms", sleep_one_ms);
epoll_fd = epoll_create(1);
do_it("epoll_wait 0", epoll_wait_0);
do_it("epoll_wait 1", epoll_wait_1);
close(epoll_fd);
return 0;
}
i have the following loop that is supposed to run at a little above 40Hz (24ms between each run). It works great for a couple of second to minutes, but then goes down to 20Hz for no apparent (at least not to me) reason.
I have tried running the code on a onion Omega 2 (that runs openwrt) and a ubuntu laptop with the same result. (the code went down to 25Hz on the omega2, and 20Hz on my laptop)
How can i force it to continue to run at the right speed? I have checked with an oscilloscope on the output, and the send_brk and send_dmx functions seem to run perfectly even when the program is throttled, so i don't think they are the problem.
double t1 = now();
double t2 = now();
while ( 1 ) {
// Attempt receive UDP data
int x = read(sock, &DMX[1], SIZE - 1);
t2 = now();
double dt = t2 - t1;
if(dt < 0.024){
usleep(500);
continue;
}
t1 = t2;
printf("Frame in %f %f/sec\n", dt, 1.0/dt);
if (x < 0) {
if(errno != EAGAIN){
fprintf(stderr, "Error reading from socket %s\n", strerror(errno));
}
}
send_brk();
send_dmx();
}
the functions called are here:
send_brk() sends a serial break that lasts 88us
void send_brk() {
ioctl(fd, TIOCSBRK);
double start = now();
while((now() - start) < 0.000088){
}
ioctl(fd, TIOCCBRK);
}
send_dmx() writes a byte buffer of lenght 512 to the serial port
void send_dmx() {
// setmode(TX);
int n = write(fd, DMX, SIZE);
if(n == -1) {
if (errno != EAGAIN) {
fprintf(stderr, "Error writing to serial %s\n", strerror(errno));
exit(1);
}
}
if (n != SIZE) {
printf("couldn't write full frame =(\n");
}
}
and now() returns the current time in second as a float.
double now() {
struct timeval tv;
gettimeofday(&tv, NULL);
double time = tv.tv_sec;
time = time + (tv.tv_usec / 1000000.0);
return time;
}
I have the following code for my education socket server in C.
#include <arpa/inet.h>
#include <netinet/in.h>
#include <stdio.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
double get_wall_time()
{
struct timeval time;
if (gettimeofday(&time, NULL)){
return 0;
}
return (double)time.tv_sec + (double)time.tv_usec * 0.000001;
}
double get_cpu_time()
{
return (double)clock() / CLOCKS_PER_SEC;
}
int main()
{
double wall = get_wall_time();
double cpu = get_cpu_time();
int sfd = socket(AF_INET, SOCK_STREAM, 0);
struct sockaddr_in own_addr = {0};
own_addr.sin_family = AF_INET;
own_addr.sin_port = htons(5678);
bind(sfd, (struct sockaddr *)&own_addr, sizeof(own_addr));
listen(sfd, 5);
static char message[] = "hello from server\n";
double wall_accept = 0;
double cpu_accept = 0;
int count = 0;
while (1) {
if (count++ == 1000) {
break;
}
double wall_start = get_wall_time();
double cpu_start = get_cpu_time();
int client_sfd = accept(sfd, NULL, NULL);
wall_accept += get_wall_time() - wall_start;
cpu_accept += get_cpu_time() - cpu_start;
send(client_sfd, message, sizeof(message), 0);
close(client_sfd);
}
wall = get_wall_time() - wall;
cpu = get_cpu_time() - cpu;
printf("wall accept: %lf\n", wall_accept);
printf("cpu accept: %lf\n", cpu_accept);
printf("wall: %lf\n", wall);
printf("cpu: %lf\n", cpu);
}
To test I use seq 1000 | time parallel -j 1 -n0 'nc 127.0.0.1 5678' | wc -l with results
wall accept: 6.436480
cpu accept: 0.010000
wall: 6.456266
cpu: 0.020000
For 10000 requests result is
wall accept: 55.434541
cpu accept: 0.080000
wall: 55.633679
cpu: 0.260000
Is accept() slow or I do something wrong? Or maybe this is normal result for single-thread implementation?
UPD. I also wrote a server with pthreads to send a message in different thread.
#include <arpa/inet.h>
#include <netinet/in.h>
#include <stdio.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>
#include <pthread.h>
#include <stdlib.h>
#include <sys/time.h>
double get_wall_time()
{
struct timeval time;
if (gettimeofday(&time, NULL)){
return 0;
}
return (double)time.tv_sec + (double)time.tv_usec * 0.000001;
}
double get_cpu_time()
{
return (double)clock() / CLOCKS_PER_SEC;
}
void *send_message(void *pclient_sfd)
{
int client_sfd = *(int *)pclient_sfd;
free(pclient_sfd);
static char message[] = "hello from server\n";
send(client_sfd, message, sizeof(message), 0);
close(client_sfd);
return NULL;
}
int main()
{
double wall = get_wall_time();
double cpu = get_cpu_time();
int sfd = socket(AF_INET, SOCK_STREAM, 0);
struct sockaddr_in own_addr = {0};
own_addr.sin_family = AF_INET;
own_addr.sin_port = htons(5678);
bind(sfd, (struct sockaddr *)&own_addr, sizeof(own_addr));
listen(sfd, 5);
double wall_accept = 0;
double cpu_accept = 0;
int count = 0;
while (1) {
if (count++ == 10000) {
break;
}
int *pclient_sfd = malloc(sizeof(*pclient_sfd));
double wall_start = get_wall_time();
double cpu_start = get_cpu_time();
*pclient_sfd = accept(sfd, NULL, NULL);
wall_accept += get_wall_time() - wall_start;
cpu_accept += get_cpu_time() - cpu_start;
pthread_t tid;
pthread_create(&tid, NULL, send_message, (void *)pclient_sfd);
}
wall = get_wall_time() - wall;
cpu = get_cpu_time() - cpu;
printf("wall accept: %lf\n", wall_accept);
printf("cpu accept: %lf\n", cpu_accept);
printf("wall: %lf\n", wall);
printf("cpu: %lf\n", cpu);
return 0;
}
Then I use seq 10000 | time parallel -j 4 -n0 'nc 127.0.0.1 5678' | wc -l and it takes 58 seconds.
It's the way you're testing it. When you use this
seq 10000 | time parallel -j 4 -n0 'nc 127.0.0.1 5678' | wc -l
That's actually going to impact the test because you're spawning lots of processes etc ie you're not actually testing the C application your testing the ability to spawn processes.
If we change this to a simple python script ie
#!/usr/bin/env python
import socket
TCP_IP = '127.0.0.1'
TCP_PORT = 5678
BUFFER_SIZE = 1024
msg = "1"
for i in range(0, 1000):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((TCP_IP, TCP_PORT))
s.send(msg.encode('utf-8'))
data = s.recv(BUFFER_SIZE)
s.close()
print("received data: %s" % data)
and run the test the results are vastly different.
real 0m0.269s
user 0m0.074s
sys 0m0.114s
If you really want to test this and see how fast it is you need to use separate machines etc and typically you might need to use C or your limiting factor might be the client. The best tool I've seen for this (HTTP Specific) where you might find some good code is wrk
https://github.com/wg/wrk
Take a look at your listen() second argument. Try to increase it.
here is text from man 2 listen
The backlog argument defines the maximum length to which the queue of
pending connections for sockfd may grow. If a connection request
arrives when the queue is full, the client may receive an error with
an indication of ECONNREFUSED or, if the underlying protocol supports
retransmission, the request may be ignored so that a later reattempt
at connection succeeds.
Interrupts are handled in the kernel mode but to handle interrupt in rt linux they are saying that :
I need to write code using RTLinux in kernel space and with kernel modules. With QNX, you can't write kernel modules (the kernel is not open) and you have real-time in user-space.
my question :
How to write a code in kernel space ??
what is exactly mean by that ??
where should i modify the kernel ??
someone Please suggest some ideas.
udp code :
int CreateSocket()
{
socklen_t len;
// Socket creation for UDP
acceptSocket=socket(AF_INET,SOCK_DGRAM,0);
if(acceptSocket==-1)
{
printf("Failure: socket creation is failed, failure code\n");
return 1;
}
else
{
printf("Socket started!\n");
}
memset(&addr, 0, sizeof(addr));
addr.sin_family=AF_INET;
addr.sin_port=htons(port);
addr.sin_addr.s_addr=htonl(INADDR_ANY);
rc=bind(acceptSocket,(struct sockaddr*)&addr,sizeof(addr));
if(rc== -1)
{
printf("Oh dear, something went wrong with bind()! %s\n", strerror(errno));
return 1;
}
else
{
printf("Socket an port %d \n",port);
}
while(rc!=-1)
{
len = sizeof(client);
rc=recvfrom(acceptSocket,buf, 256, 0, (struct sockaddr*) &client, &len);
//I am calculating the time here
InterruptTime = GetTimeStamp();
measurements[17] = InterruptTime;
if(rc==0)
{
printf("Server has no connection..\n");
break;
}
if(rc==-1)
{
printf("Oh dear, something went wrong with read()! %s\n", strerror(errno));
break;
}
XcpIp_RxCallback( (uint16) rc, (uint8*) buf, (uint16) port );
}
close(acceptSocket);
return 1;
}
int main()
{
CreateSocket();
while(1)
{
TASK1(Task2ms_Raster);
TASK2(Task10ms_Raster);
TASK3(Task100ms_Raster);
}
Unlike QNX the Linux kernel is open source software. Which means that you can modify and change it in whatever ways you desire. You grab the sources from http://kernel.org (preferrably you use Git to clone into a local repository), make your modifications, additions, etc. compile the think and boot your computer with it.
Note that Linux since version 2.6 also offers realtime scheduling, which might be sufficient.
Update code example
For the time raster task execution I suggest the following:
#define _POSIX_C_SOURCE 200809L
#define _XOPEN_SOURCE 500
#include <sched.h> /* for sched_setsched */
#include <unistd.h> /* for usleep */
#include <time.h> /* for clock_gettime */
#include <string.h> /* for memset */
#define MS_to_US(x) ((x)*1000)
useconds_t delta_t_us(struct timespec const *a, struct timespec const *b)
{
time_t const delta_sec = b->tv_sec - a->tv_sec;
long const delta_nsec = b->tv_nsec - a->tv_nsec;
/* this might actually overflow for "long" time intervalls"
* should be safe for a delta_t < 2ms though */
return delta_sec * 1000000 + delta_nsec / 1000;
}
void rastertask()
{
struct sched_param sparm;
memset(&sparm, 0, sizeof(sparm));
sparm.sched_priority = 10; /* 0 = lowest, 99 = highest */
sched_setscheduler(
0 /* pid, 0 ==> this process */,
SCHED_RR /* policy */,
&sparm);
unsigned int n_loop;
for(n_loop=0;;n_loop++) {
struct timespec ts_start, ts_end;
clock_gettime(CLOCK_MONOTONIC, &ts_start);
TASK1(); /* gets called every 2ms */
if( (n_loop % 5) == 0) {
TASK2(); /* get called every 5 * 2ms = 10ms */
}
if( (n_loop % 50) == 0) {
TASK2(); /* get called every 50 * 2ms = 100ms */
}
if( (n_loop % 250) == 0 ) {
/* reset loop counter when smallest common
* multiple of timing grid has been reached */
n_loop = 0;
}
clock_gettime(CLOCK_MONOTONIC, &ts_end);
useconds_t const tasks_execution_time = delta_t_us(&ts_start, &ts_end);
if( tasks_execution_time >= MS_to_US(2) ) {
/* report an error that tasks took longer than 2ms to execute */
}
/* wait for 2ms - task_execution_time so that tasks get called in
* a close 2ms timing grid */
usleep( MS_to_US(2) - tasks_execution_time );
}
}
Please note that the process requires either CAP_SYS_NICE privileges (prior to Linux 2.6.12) or sufficient RLIMIT_RTPRIO resource limits to be able to set its priority.