libcurl async integration with kevent on macOS Sierra - c

I'm integrating curl into an asynchronous I/O event loop based on kqueue.
libcurl has an excellent API for integrating into the applications event loop.
You provide libcurl with two callbacks, one to set a timer (used to limit request/connect times), and the other to register a libcurl's file descriptors for read/write/error events.
The documentation for the callback used to perform FD registration is here: CURLMOPT_SOCKETFUNCTION
The argument that informs the callback of what events libcurl is interested in, has four enum values:
CURL_POLL_IN
Wait for incoming data. For the socket to become readable.
CURL_POLL_OUT
Wait for outgoing data. For the socket to become writable.
CURL_POLL_INOUT
Wait for incoming and outgoing data. For the socket to become readable or writable.
CURL_POLL_REMOVE
The specified socket/file descriptor is no longer used by libcurl.
Although not explicitly documented, libcurl expects, that on subsequent calls to the callback, that the filter state of the event loop, be updated to match what it passed. i.e. If on the first call it passed CURL_POLL_IN (EVFILT_READ) and on a subsequent call it passed CURL_POLL_OUT (EVFILT_WRITE), then the original EVFILT_READ filter would be removed.
I updated the FD registration code to handle this.
int fr_event_fd_insert(fr_event_list_t *el, int fd,
fr_event_fd_handler_t read,
fr_event_fd_handler_t write,
fr_event_fd_handler_t error,
void *ctx)
{
int filter = 0;
struct kevent evset[2];
struct kevent *ev_p = evset;
fr_event_fd_t *ef, find;
if (!el) {
fr_strerror_printf("Invalid argument: NULL event list");
return -1;
}
if (!read && !write) {
fr_strerror_printf("Invalid arguments: NULL read and write callbacks");
return -1;
}
if (fd < 0) {
fr_strerror_printf("Invalid arguments: Bad FD %i", fd);
return -1;
}
if (el->exit) {
fr_strerror_printf("Event loop exiting");
return -1;
}
memset(&find, 0, sizeof(find));
/*
* Get the existing fr_event_fd_t if it exists.
*/
find.fd = fd;
ef = rbtree_finddata(el->fds, &find);
if (!ef) {
ef = talloc_zero(el, fr_event_fd_t);
if (!ef) {
fr_strerror_printf("Out of memory");
return -1;
}
talloc_set_destructor(ef, _fr_event_fd_free);
el->num_fds++;
ef->fd = fd;
rbtree_insert(el->fds, ef);
/*
* Existing filters will be overwritten if there's
* a new filter which takes their place. If there
* is no new filter however, we need to delete the
* existing one.
*/
} else {
if (ef->read && !read) filter |= EVFILT_READ;
if (ef->write && !write) filter |= EVFILT_WRITE;
if (filter) {
EV_SET(ev_p++, ef->fd, filter, EV_DELETE, 0, 0, 0);
filter = 0;
}
/*
* I/O handler may delete an event, then
* re-add it. To avoid deleting modified
* events we unset the do_delete flag.
*/
ef->do_delete = false;
}
ef->ctx = ctx;
if (read) {
ef->read = read;
filter |= EVFILT_READ;
}
if (write) {
ef->write = write;
filter |= EVFILT_WRITE;
}
ef->error = error;
EV_SET(ev_p++, fd, filter, EV_ADD | EV_ENABLE, 0, 0, ef);
if (kevent(el->kq, evset, ev_p - evset, NULL, 0, NULL) < 0) {
fr_strerror_printf("Failed inserting event for FD %i: %s", fd, fr_syserror(errno));
talloc_free(ef);
return -1;
}
ef->is_registered = true;
return 0;
}
Unfortunately, it doesn't work. kevent does not appear to remove the old filters (we continue to receive notifications from them).
What's weirder is if I apply the two operations in two separate calls, it works perfectly.
if (filter) {
EV_SET(&evset, ef->fd, filter, EV_DELETE, 0, 0, 0);
kevent(el->kq, evset, ev_p - evset, NULL, 0, NULL);
filter = 0;
}
Is this a bug in Sierra's kevent implementation, or did I misunderstand how kevent should work?

The problem here, is that you can't 'or' together the EVFILT_READ and EVFILT_WRITE flags.
When enabling or disabling multiple filters you need to call EV_SET() multiple times, on multiple evset structures.
The non-functional code in the example above:
struct kevent evset[2];
struct kevent *ev_p = evset;
if (read) {
ef->read = read;
filter |= EVFILT_READ;
}
if (write) {
ef->write = write;
filter |= EVFILT_WRITE;
}
ef->error = error;
EV_SET(ev_p++, fd, filter, EV_ADD | EV_ENABLE, 0, 0, ef);
event(el->kq, evset, ev_p - evset, NULL, 0, NULL)
becomes:
int count = 0;
struct ev_set[2];
if (read) {
ef->read = read;
EV_SET(ev_set[count++], fd, EVFILT_READ, EV_ADD | EV_ENABLE, 0, 0, ef);
}
if (write) {
ef->write = write;
EV_SET(ev_set[count++], fd, EVFILT_WRITE, EV_ADD | EV_ENABLE, 0, 0, ef);
}
ef->error = error;
kevent(el->kq, ev_set, count, NULL, 0, NULL)
After making this change everything worked as expected.

Related

issue in C with read after epoll_wait in a pthread

I am working on a MQTT client app in C for an Embedded ARM system that must send message base on GPIO change.
To do this, I have try to launch a pthread that do a epoll_wait and a read on /sys/class/gpio/gpio<x>/value to get the value on change.
First step is the configuration of the gpio as input and edge as both:
root#ad:~# cat /sys/class/gpio/gpio13/direction
in
root#ad:~# cat /sys/class/gpio/gpio13/edge
both
Second step, is the start of the pthread in the main function. (After running the prthread, main will enter in a loop to manage MQTT communication):
// Main
int main(int argc, char *argv[]) {
Network n;
MQTTClient c;
(...)
toStop = false;
pthread_create(&thread_id, NULL, detection, NULL);
while (!toStop)
{
MQTTYield(&c, 100);
(...MQTT stuff...)
}
// stop thread if exists
if(thread_id)
{
pthread_cancel(thread_id);
thread_id = NULL;
}
(...)
}
Then the pthread run the following function:
// detection function call as pthread
void *detection(void *args){
char strvalue[1];
int ret;
int nn;
int ep;
int fd;
struct epoll_event ev, events;
ep = epoll_create1(0);
fd = open("/sys/class/gpio/gpio13/value", O_RDONLY);
nn = read(fd, &strvalue, 1);
if (nn > 0) {
printf("Initial value = %c\n", strvalue[0]);
lseek(fd, 0, SEEK_SET);
}
ev.events = EPOLLIN | EPOLLET; // EPOLLPRI;
ev.data.fd = fd;
ret = epoll_ctl(ep, EPOLL_CTL_ADD, fd, &ev);
printf("ret=%d\n", ret);
while (1)
{
printf("Waiting\n");
ret = epoll_wait(ep, &events, 1, -1);
printf("ret=%d\n", ret);
if(ret > 0) {
lseek(fd, 0, SEEK_SET);
printf("fd=%d\n", events.data.fd);
nn = read(events.data.fd, &strvalue, 1);
printf("nn=%d\n", nn);
printf("value = %c\n", strvalue[0]);
}
}
}
The problem is that when gpio change, epoll_wait got it, but the thread stop during read
Here are the output:
Initial value = 1
ret=0
Waiting
ret=1
fd=7
nn=1
value = 1
Waiting
ret=-1
Waiting
ret=1
fd=7
If I call the function *detection directely in the main (so without pthread), everything is working well.
How to solve this issue ?
Specific issue. thread_id variable name was already used in an other c code included, but compiler dont warning it.

Select always returns 0 in an input file

Select always returns 0 in an input file
I wrote a function function that receives FILE* and checks if it is ready.
The function:
int ioManager_nextReady(FILE *IFILE) {
// Setting input ifle
int inDescrp = fileno(IFILE ? IFILE : stdin);
// Setting timer to 0
struct timeval timeout;
timeout.tv_sec = timeout.tv_usec = 0;
// Variables for select
unsigned short int nfds = 1;
fd_set readfds;
FD_ZERO(&readfds);
FD_SET(inDescrp, &readfds);
// Run select
int nReady = select(nfds, &readfds, NULL, NULL, &timeout);
if (nReady > 0) {
return inDescrp;
}
return -1;
}
I am trying to test this function with check.h.
The tests:
static FILE *tmpIn;
void before(char *line) {
tmpIn = tmpfile();
if (line) {
fprintf(tmpIn, "%s\n", line);
rewind(tmpIn);
fflush(tmpIn);
}
}
void after() { fclose(tmpIn); }
START_TEST(test_ioManager_nextReady_NULL) {
before(NULL);
int data;
data = ioManager_nextReady(tmpIn);
ck_assert_int_eq(data, -1);
after();
}
END_TEST
#define LINEIN "Sample input"
START_TEST(test_ioManager_nextReady_text) {
before(LINEIN);
int data;
data = ioManager_nextReady(tmpIn);
ck_assert_int_ne(data, -1);
after();
}
END_TEST
The result:
Running suite(s): IOManager
50%: Checks: 2, Failures: 1, Errors: 0
ioManager.test.c:42:F:Smoke:test_ioManager_nextReady_text:0: Assertion 'data != -1' failed: data == -1, -1 == -1
Select is returning 0 after I use rewind and fflush.
When I use read I can retreive the data.
// Debug
char bff[MAXLINE];
int n = read(inDescrp, bff, MAXLINE);
bff[n] = '\0';
printf("%d\n", inDescrp);
printf("%s\n", bff);
So select is returning 0 even when I can read data.
The problem also continues if I try to set a not zero timeout.
Why is this happening?
I need to check if a file is ready to be read.
What is a possible solution?
I can see why you have been led astray, by the 'nfds' parameter to select(). It reads and sounds like "number of file descriptors".
It is not that. It should be the value of the highest file descriptor that you care about, plus 1. See (for example) the Linux manpage about it
As an aside, the nfds parameter is an int - so don't use an unsigned short. It will "just work" generally, but is very confusing.

How to port "select" to "poll" for third error data?

I have the following code that uses select:
fd_set fdsu;
FD_ZERO(&fdsu);
FD_SET(fd, &fdsu);
fd_set efds = fdsu;
fd_set dfds = fdsu;
while (1) {
select(cameraUSBP.fd + 1, NULL, &dfds, &efds, NULL);
if (FD_ISSET(cameraUSBP.fd, &efds)) {
errorData();
}
if (FD_ISSET(cameraUSBP.fd, &dfds)) {
writeData();
}
}
I want to port it to use poll:
struct pollfd pollfds[1];
pollfds[0].fd = fd;
pollfds[0].events = POLLIN;
while (1) {
poll(pollfds, 1, -1);
//how to insert writeData() and errorData()
}
I'm confused. How do I insert the writeData and errorData events?
First, your select() version has a number of significant errors.
From the linux manpage:
Note well: Upon return, each of the file descriptor sets is modified in place to indicate which file descriptors are currently "ready". Thus, if using select() within a loop, the sets must be reinitialized before each call.
You're not doing that. You also can't portably assign fd_sets like you're doing; it works on some implementations, it doesn't on others (Say, if it's implemented as an array). Nor are you checking to see if select() fails.
It should look like
while (1) {
fd_set efds, dfds;
FD_ZERO(&efds);
FD_SET(cameraUSBP.fd, &efds);
FD_ZERO(&dfds);
FD_SET(cameraUSBP.fd, &dfds);
if (select(cameraUSBP.fd + 1, NULL, &dfds, &efds, NULL) < 0) {
reportError(errno);
break; // or exit or whatever
}
if (FD_ISSET(cameraUSBP.fd, &efds)) {
errorData();
}
if (FD_ISSET(cameraUSBP.fd, &dfds)) {
writeData();
}
}
struct pollfd, on the other hand, has separate fields for events to monitor and events that happened, so it only has to be initialized once if the descriptors you're monitoring never change. You set events to the things you're interested in, using a bitwise or of flags, and check revents to see what happened by doing a bitwise and with the relevant flag.
struct pollfd pollfds[1];
pollfds[0].fd = cameraUSBP.fd;
pollfds[0].events = POLLOUT; // since you want to know when it's writable
while (1) {
if (poll(pollfds, 1, -1) < 0) {
reportError(errno);
break;
}
if (pollfds[0].revents & POLLERR) { // Doesn't need to be set in events
errorData();
}
if (pollfds[0].revents & POLLOUT) {
writeData();
}
}

Why does timerfd_settime fail with EBADF when called with valid fd from timerfd_create?

I have a function that creates a timerfd timer, but sometimes the timerfd_settime returns with EBADF (Bad file descriptor). I cannot fathom a scenario where timerfd_create returns a valid file descriptor, which then fails when immediately called with timerfd_settime.
I use this function with an epoll event loop, and sometimes, this function will return a valid fd, only to have epoll_ctl fail with EBADF when adding the timer fd. I assume that if I understand why timerfd_settime sometimes fail, it will illuminate the epoll fail as well.
static inline int create_timer(uint32_t interval_ms, uint32_t start_ms)
{
struct itimerspec its = {{0}};
int fd = timerfd_create(CLOCK_MONOTONIC, 0);
if (fd < 0) {
perror("timerfd_create");
return -1;
}
its.it_interval = timespec_ns((int64_t)(interval_ms) * NSEC_PER_MSEC);
if (start_ms)
its.it_value = timespec_ns((int64_t)(start_ms) * NSEC_PER_MSEC);
else
its.it_value.tv_nsec = 1;
if (timerfd_settime(fd, 0, &its, NULL) < 0) {
perror("timerfd_settime");
close(fd);
return -1;
}
return fd;
}
It is used in a multi threaded "curl_multi_socket" application. There are multiple worker threads, that each needs to download many files and parse them, often. Each thread has its own epoll-loop. Inter-thread communication is handled through the use of unix sockets.
The function is used to set the timeouts for the CURLs timerfunc callback:
static int fetch_timerfunc(CURLM *curlm, long timeout_ms, void *ctx)
{
struct fetch *cm = (struct fetch*) ctx;
struct epoll_event ev = {};
// Cancel previous timeout, if any
if (cm->timer_fd > 0) {
close(cm->timer_fd);
cm->timer_fd = 0;
}
if (timeout_ms < 0) {
return 0;
}
cm->timer_fd = create_timer(0, timeout_ms);
if (cm->timer_fd < 0) {
perror("fetch_timerfunc: create_timer");
return cm->timer_fd;
}
ev.events = EPOLLIN;
ev.data.fd = CURL_SOCKET_TIMEOUT;
if (epoll_ctl(cm->epoll_fd, EPOLL_CTL_ADD, cm->timer_fd, &ev) < 0) {
if (!exiting) {
perror("fetch_timerfunc: epoll_ctl");
}
}
return 0;
}
#KamilCuk is right. A bug in another place caused one thread to sometimes close fd 0 many times in quick succession. This meant that timerfd_create sometimes returned 0 as its assigned fd, and that got immediately closed in the buggy thread before the call to timerfd_settime.

Asynchronous named Windows pipe communication via overlapped IO

I'm using overlapped IO to read and write a single Windows pipe in C code simultantiously. I want to write a synchronous function to read and write data from seperate threads. If you are using synchronous IO you cannot read and write the pipe simultaniously. My client and my server are using the same write/read functions. Time by time my client sends data that is never received by the server. Does anyone have an idea how this could happen? If I use synchronous IO in the client everything works as expected.
The server pipe is opened by the following cmd:
CreateNamedPipe(pipeName, PIPE_ACCESS_DUPLEX | FILE_FLAG_OVERLAPPED, PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT,
instances, PIPE_BUF_SIZE, PIPE_BUF_SIZE, 0, NULL);
The client pipe is opend this way:
CreateFile(pipeName, GENERIC_READ | GENERIC_WRITE, 0, NULL, OPEN_EXISTING, FILE_FLAG_OVERLAPPED, NULL);
I'm using these read function:
int readBytesPipeCommChannel(PipeCommChannelData* comm, uint32_t* amount)
OVERLAPPED osRead;
memset(&osRead, 0, sizeof(osRead));
osRead.hEvent = comm->readAsyncIOEvent;
int err = 0;
if(!ReadFile(comm->pipeH, comm->receiveBuffer, sizeof(comm->receiveBuffer), amount, &osRead))
{
err = GetLastError();
if (err == ERROR_IO_PENDING)
{
if(WaitForSingleObject(osRead.hEvent, INFINITE))
{
GetOverlappedResult(comm->pipeH, &osRead, amount, TRUE);
}
else
{
CancelIo(comm->pipeH);
return PIPE_EVENT_ERROR;
}
}
else if(err != ERROR_BROKEN_PIPE)
{
return PIPE_READ_ERROR;
}
}
if(err == ERROR_BROKEN_PIPE)
return PIPE_BROKEN_ERR;
return PIPE_OK;
}
And last but not least the write function:
int sendBytesPipeCommChannel(PipeCommChannelData* comm, const uint8_t* bytes, uint32_t amount)
{
OVERLAPPED osWrite;
memset(&osWrite, 0, sizeof(osWrite));
osWrite.hEvent = comm->writeAsyncIOEvent;
uint32_t bytesWritten = 0;
for(uint32_t curPos = 0; curPos < amount; curPos += bytesWritten)
{
if(!WriteFile(comm->pipeH, &bytes[curPos], (amount - curPos), &bytesWritten, &osWrite))
{
if (GetLastError() != ERROR_IO_PENDING)
return PIPE_WRITE_ERR;
if(!WaitForSingleObject(osWrite.hEvent, INFINITE))
{
CancelIo(comm->pipeH);
return PIPE_EVENT_ERROR;
}
}
}
return PIPE_OK;
}
The documentation for ReadFile says:
If hFile was opened with FILE_FLAG_OVERLAPPED, the following conditions are in effect:
The lpNumberOfBytesRead parameter should be set to NULL. Use the GetOverlappedResult function to get the actual number of bytes read.
This applies even if the operation completes synchronously.

Resources