I was trying to create an application that allows me to multicast my webcam feed over my LAN using a specific multicast address and using sendto() to just send the frame buffer. The application I am trying to build is pretty much the same as on this site
http://nashruddin.com/Streaming_OpenCV_Videos_Over_the_Network
and uses the same architecture.
Only instead of a TCP socket I use SOCK_DGRAM. The problem is that when I use the sendto() function from a different thread it tends to fail i.e it returns -1 and errno gets set to 90 (EMSGSIZE), this basically means that the packet formed is too large to be sent over the network.
But this happens even if I try to send a simple string (like "hello") to the same multicast address. This seems to work fine if the application is a single thread one. that is to say i just capture the image and multicast it all in the same thread. This is the code:
#include <netinet/in.h>
#include <sys/socket.h>
#include <arpa/inet.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <pthread.h>
#include "cv.h"
#include "highgui.h"
#define PORT 12345
#define GROUP "225.0.0.37"
CvCapture* capture;
IplImage* img0;
IplImage* img1;
int is_data_ready = 0;
int serversock, clientsock;
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
void* streamServer(void* arg);
void quit(char* msg, int retval);
int main(int argc, char** argv)
{
pthread_t thread_s;
int key;
if (argc == 2) {
capture = cvCaptureFromFile(argv[1]);
} else {
capture = cvCaptureFromCAM(0);
}
if (!capture) {
quit("cvCapture failed", 1);
}
img0 = cvQueryFrame(capture);
img1 = cvCreateImage(cvGetSize(img0), IPL_DEPTH_8U, 1);
cvZero(img1);
cvNamedWindow("stream_server", CV_WINDOW_AUTOSIZE);
/* print the width and height of the frame, needed by the client */
fprintf(stdout, "width: %d\nheight: %d\n\n", img0->width, img0->height);
fprintf(stdout, "Press 'q' to quit.\n\n");
/* run the streaming server as a separate thread */
if (pthread_create(&thread_s, NULL, streamServer, NULL)) {
quit("pthread_create failed.", 1);
}
while(key != 'q') {
/* get a frame from camera */
img0 = cvQueryFrame(capture);
if (!img0) break;
img0->origin = 0;
cvFlip(img0, img0, -1);
/**
* convert to grayscale
* note that the grayscaled image is the image to be sent to the client
* so we enclose it with pthread_mutex_lock to make it thread safe
*/
pthread_mutex_lock(&mutex);
cvCvtColor(img0, img1, CV_BGR2GRAY);
is_data_ready = 1;
pthread_mutex_unlock(&mutex);
/* also display the video here on server */
cvShowImage("stream_server", img0);
key = cvWaitKey(30);
}
/* user has pressed 'q', terminate the streaming server */
if (pthread_cancel(thread_s)) {
quit("pthread_cancel failed.", 1);
}
/* free memory */
cvDestroyWindow("stream_server");
quit(NULL, 0);
}
/**
* This is the streaming server, run as a separate thread
* This function waits for a client to connect, and send the grayscaled images
*/
void* streamServer(void* arg)
{
struct sockaddr_in server;
/* make this thread cancellable using pthread_cancel() */
pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
/* open socket */
if ((serversock = socket(AF_INET, SOCK_DGRAM, 0)) == -1) {
quit("socket() failed", 1);
}
memset(&server,0,sizeof(server));
server.sin_family = AF_INET;
server.sin_port = htons(PORT);
server.sin_addr.s_addr = inet_addr(GROUP);
int opt = 1;
//if(setsockopt(serversock,SOL_SOCKET,SO_BROADCAST,&opt,sizeof(int))==-1){
// quit("setsockopt failed",0);
//}
// /* setup server's IP and port */
// memset(&server, 0, sizeof(server));
// server.sin_family = AF_INET;
// server.sin_port = htons(PORT);
// server.sin_addr.s_addr = INADDR_ANY;
//
// /* bind the socket */
// if (bind(serversock, (const void*)&server, sizeof(server)) == -1) {
// quit("bind() failed", 1);
// }
//
// /* wait for connection */
// if (listen(serversock, 10) == -1) {
// quit("listen() failed.", 1);
// }
//
// /* accept a client */
// if ((clientsock = accept(serversock, NULL, NULL)) == -1) {
// quit("accept() failed", 1);
// }
/* the size of the data to be sent */
int imgsize = img1->imageSize;
int bytes=0, i;
/* start sending images */
while(1)
{
/* send the grayscaled frame, thread safe */
pthread_mutex_lock(&mutex);
if (is_data_ready) {
// bytes = send(clientsock, img1->imageData, imgsize, 0);
is_data_ready = 0;
if((bytes = sendto(serversock,img1->imageData,imgsize,0,(struct sockaddr*)&server,sizeof(server)))==-1){
quit("sendto FAILED",1);
}
}
pthread_mutex_unlock(&mutex);
// /* if something went wrong, restart the connection */
// if (bytes != imgsize) {
// fprintf(stderr, "Connection closed.\n");
// close(clientsock);
//
// if ((clientsock = accept(serversock, NULL, NULL)) == -1) {
// quit("accept() failed", 1);
// }
// }
/* have we terminated yet? */
pthread_testcancel();
/* no, take a rest for a while */
usleep(1000);
}
}
/**
* this function provides a way to exit nicely from the system
*/
void quit(char* msg, int retval)
{
if (retval == 0) {
fprintf(stdout, (msg == NULL ? "" : msg));
fprintf(stdout, "\n");
} else {
fprintf(stderr, (msg == NULL ? "" : msg));
fprintf(stderr, "\n");
}
if (clientsock) close(clientsock);
if (serversock) close(serversock);
if (capture) cvReleaseCapture(&capture);
if (img1) cvReleaseImage(&img1);
pthread_mutex_destroy(&mutex);
exit(retval);
}
In the sendto() call, you reference imgsize which is initialized to img1->imageSize.
But I don't see where img1->imageSize is set and it appears that imgsize is never updated.
So first check that the imgsize value being passed to sendto() is correct.
Then check that it is not too large:
UDP/IP datagrams have a hard payload limit of 65,507 bytes. However, an IPv4 network is not required to support more than 548 bytes of payload. (576 is the minimum IPv4 MTU size, less 28 bytes of UDP/IP overhead). Most networks have an MTU of 1500, giving you a nominal payload of 1472 bytes.
Most networks allow you to exceed the MTU by breaking the datagram into IP fragments, which the receiving OS must reassemble. This is invisible to your application: recvfrom() either gets the whole reassembled packet or it gets nothing. But the odds of getting nothing go up with fragmentation because the loss of any fragment will cause the entire packet to be loss. In addition, some routers and operating systems have obscure security rules which will block some UDP patterns or fragments of certain sizes.
Finally, any given network may enforce a maximum datagram size even with fragmentation, and this is often much less than 65507 bytes.
Since you are dealing with a specific network, you will need to experiment to see how big you can reliably go.
UDP/IP at Wikipedia
IPv4 at Wikipedia
Are you absolutely sure that you don't try to send more than limit of UDP which is around 65500 bytes? From my experience you shouldn't even send more than Ethernet packet limit which is around 1500 bytes to keep best UDP reliability.
I think that right now you are trying to send much more data in a form of stream. UDP isn't a stream protocol and you can't replace TCP by it. But of course it is possible to use UDP to send video stream on multicast, but you need some protocol on top of UDP that will handle message size limit of UDP. In real world RTP protocol on top of UDP is used for such kind of task.
Related
I am comparing AF-XDP sockets vs Linux Sockets in terms of how many packets they can process without packet-loss (packet-loss is defined as the RTP-sequence number of the current packet is not equal to the RTP-sequence number of the previous packet + 1).
I noticed that my AF-XDP socket program (I can't determine if this problem is related to the kernel program or the user-space program) is losing around ~25 packets per second at around 390.000 packets per second whereas an equivalent program with generic linux sockets doesn't lose any packets.
I implemented a so-called distributor-program which loads the XDP-kernel program once, sets up a generic linux socket and adds setsockopt(IP_ADD_MEMBERSHIP) to this generic socket for every multicast-address I pass to the program via command line.
After this, the distributor loads the filedescriptor of a BPF_MAP_TYPE_HASH placed in the XDP-kernel program and inserts routes for the traffic in case a single AF-XDP socket needs to share its umem later on.
The XDP-kernel program then checks for each IPv4/UDP packet if there is an entry in that hash-map. This basically looks like this:
const struct pckt_idntfy_raw raw = {
.src_ip = 0, /* not used at the moment */
.dst_ip = iph->daddr,
.dst_port = udh->dest,
.pad = 0
};
const int *idx = bpf_map_lookup_elem(&xdp_packet_mapping, &raw);
if(idx != NULL) {
if (bpf_map_lookup_elem(&xsks_map, idx)) {
bpf_printk("Found socket # index: %d!\n", *idx);
return bpf_redirect_map(&xsks_map, *idx, 0);
} else {
bpf_printk("Didn't find connected socket for index %d!\n", *idx);
}
}
In case idx exists this means that there is a socket sitting behind that index in the BPF_MAP_TYPE_XSKMAP.
After doing all that the distributor spawns a new process via fork() passing all multicast-addresses (including destination port) which should be processed by that process (one process handles one RX-Queue). In case there are not enough RX-Queues, some processes may receive multiple multicast-addresses. This then means that they are going to use SHARED UMEM.
I basically oriented my AF-XDP user-space program on this example code: https://github.com/torvalds/linux/blob/master/samples/bpf/xdpsock_user.c
I am using the same xsk_configure_umem, xsk_populate_fill_ring and xsk_configure_socket functions.
Because I figured I don't need maximum latency for this application, I send the process to sleep for a specified time (around 1 - 2ms) after which it loops through every AF-XDP socket (most of the time it is only one socket) and processes every received packet for that socket, verifying that no packets have been missed:
while(!global_exit) {
nanosleep(&spec, &remaining);
for(int i = 0; i < cfg.ip_addrs_len; i++) {
struct xsk_socket_info *socket = xsk_sockets[i];
if(atomic_exchange(&socket->stats_sync.lock, 1) == 0) {
handle_receive_packets(socket);
atomic_fetch_xor(&socket->stats_sync.lock, 1); /* release socket-lock */
}
}
}
In my opinion there is nothing too fancy about this but somehow I lose ~25 packets at around 390.000 packets even though my UMEM is close to 1GB of RAM.
In comparison, my generic linux socket program looks like this (in short):
int fd = socket(AF_INET, SOCK_RAW, IPPROTO_UDP);
/* setting some socket options */
struct sockaddr_in sin;
memset(&sin, 0, sizeof(struct sockaddr_in));
sin.sin_family = AF_INET;
sin.sin_port = cfg->ip_addrs[0]->pckt.dst_port;
inet_aton(cfg->ip_addrs[0]->pckt.dst_ip, &sin.sin_addr);
if(bind(fd, (struct sockaddr*)&sin, sizeof(struct sockaddr)) < 0) {
fprintf(stderr, "Error on binding socket: %s\n", strerror(errno));
return - 1;
}
ioctl(fd, SIOCGIFADDR, &intf);
The distributor-program creates a new process for every given multicast-ip in case generic linux sockets are used (because there are no sophisticated methods such as SHARED-UMEM in generic sockets I don't bother with multiple multicast-streams per process).
Later on I of course join the multicast membership:
struct ip_mreqn mreq;
memset(&mreq, 0, sizeof(struct ip_mreqn));
const char *multicast_ip = cfg->ip_addrs[0]->pckt.dst_ip;
if(inet_pton(AF_INET, multicast_ip, &mreq.imr_multiaddr.s_addr)) {
/* Local interface address */
memcpy(&mreq.imr_address, &cfg->ifaddr, sizeof(struct in_addr));
mreq.imr_ifindex = cfg->ifindex;
if(setsockopt(igmp_socket_fd, IPPROTO_IP, IP_ADD_MEMBERSHIP, &mreq, sizeof(struct ip_mreqn)) < 0) {
fprintf(stderr, "Failed to set `IP_ADD_MEMBERSHIP`: %s\n", strerror(errno));
return;
} else {
printf("Successfully added Membership for IP: %s\n", multicast_ip);
}
}
and start processing packets (not sleeping but in a busy-loop like fashion):
void read_packets_recvmsg_with_latency(struct config *cfg, struct statistic *st, void *buff, const int igmp_socket_fd) {
char ctrl[CMSG_SPACE(sizeof(struct timeval))];
struct msghdr msg;
struct iovec iov;
msg.msg_control = (char*)ctrl;
msg.msg_controllen = sizeof(ctrl);
msg.msg_name = &cfg->ifaddr;
msg.msg_namelen = sizeof(cfg->ifaddr);
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
iov.iov_base = buff;
iov.iov_len = BUFFER_SIZE;
struct timeval time_user, time_kernel;
struct cmsghdr *cmsg = (struct cmsghdr*)&ctrl;
const int64_t read_bytes = recvmsg(igmp_socket_fd, &msg, 0);
if(read_bytes == -1) {
return;
}
gettimeofday(&time_user, NULL);
if(cmsg->cmsg_level == SOL_SOCKET && cmsg->cmsg_type == SCM_TIMESTAMP) {
memcpy(&time_kernel, CMSG_DATA(cmsg), sizeof(struct timeval));
}
if(verify_rtp(cfg, st, read_bytes, buff)) {
const double timediff = (time_user.tv_sec - time_kernel.tv_sec) * 1000000 + (time_user.tv_usec - time_kernel.tv_usec);
if(timediff > st->stats.latency_us) {
st->stats.latency_us = timediff;
}
}
}
int main(...) {
....
while(!is_global_exit) {
read_packets_recvmsg_with_latency(&cfg, &st, buffer, igmp_socket_fd);
}
}
That's pretty much it.
Please not that in the described use case where I start to lose packets I don't use SHARED UMEM, it's just a single RX-Queue receiving a multicast-stream. In case I process a smaller multicast-stream of around 150.000 pps - the AF-XDP solution doesn't lose any packets. But it is also the other way around - for around 520.000 pps on the same RX-Queue (using SHARED UMEM) I get a loss of 12.000 pps.
Any ideas what I am missing?
This question is similar to How to call a function on a thread's creation and exit? but more specific. In another multi-process shared memory project I used a combination of an __attribute__((constructor)) labeled library init routine, lazy initialisation for each thread, and robust futexes to make sure resources weren't leaked in the shared memory even if a sys admin chose to SIGKILL one of the processes using it. However futexes within the APIs are way too heavyweight for my current project and even the few instructions to deke around some lazy initialisation is something I'd rather avoid. The library APIs will literally be called several trillion times over a few hundred threads across several processes (each API is only a couple hundred instructions.)
I am guessing the answer is no, but since I spent a couple hours looking for and not finding a definitive answer I thought I'd ask it here, then the next person looking for a simple answer will be able to find it more quickly.
My goal is pretty simple: perform some per-thread initialisation as threads are created in multiple processes asynchronously, and robustly perform some cleanup at some point when threads are destroyed asynchronously. Doesn't have to be immediately, it just has to happen eventually.
Some hypothetical ideas to engage critical thinking: a hypothetical pthread_atclone() called from an __attribute__((constructor)) labeled library init func would satisfy the first condition. And an extension to futex()es to add a semop-like operation with a per-thread futex_adj value that, if non-zero in do_exit(), causes FUTEX_OWNER_DIED to be set for the futex "semaphore" allowing cleanup the next time the futex is touched.
Well, first, you should document that library users should not asynchronously terminate threads in such a manner that they dont explictly release resources belonging to your library, (closing a handle, whatever), TBH, just terminating threads at all before process termination is a bad idea.
It's more difficult to detect if a whole process is SIGKILLed while it's using your lib. My current best guess is that all processes wishing to use your library have to log in first so that their pid can be added to a container. Using a thread started at your lib initialization, poll for pid's that have diappeared with kill(pid,0) and take any approriate cleanup. It's not very satisfactory, (I hate polling), but I don't see any alternatives that are not grossly messy:(
After research and experimentation I've come up with what seems to be current "best practice" as far as I can tell. If anyone knows any better, please comment!
For the first part, per-thread initialisation, I was not able to come up with any alternative to straightforward lazy initialisation. However, I did decide that it's slightly more efficient to move the branch to the caller so that pipelining in the new stack frame isn't immediately confronted with an effectively unnecessary branch. so instead of this:
__thread int tInf = 0;
void
threadDoSomething(void *data)
{
if (!tInf) {
_threadInitInfo(&tInf);
}
/*l
* do Something.
*/
}
This:
__thread int tInf = 0;
#define threadDoSomething(data) (((!tInf)?_threadInitInfo(&tInf):0), \
_threadDoSomething((data)))
void
_threadDoSomething(void *data)
{
/*l
* do Something.
*/
}
Comments on the (admittedly slight) usefulness of this welcome!
For the second part, robustly performing some cleanup when threads die no matter how asynchronously, I was not able to find any solution better than to have a reaping process epoll_wait() on a file descriptor for the read end of an open pipe passed to it via an SCM_RIGHTS control message in a sendmsg() call on an abstract UNIX domain socket address. Sounds complex, but it's not that bad, here's the client side:
/*m
* Client that registers a thread with a server who will do cleanup of a
* shared interprocess object even if the thread dies asynchronously.
*/
#include <sys/socket.h> // socket(), bind(), recvmsg()
#include <sys/syscall.h> // syscall()
#include <sys/un.h> // sockaddr_un
#include <stdint.h> // uint64_t
#include <fcntl.h> // O_CLOEXEC()
#include <malloc.h> // malloc()
#include <stdlib.h> // random()
#include <unistd.h> // close(), usleep()
#include <pthread.h> // pthread_create()
#include <tsteplsrv.h> // Our API.
char iovBuf[] = "SP1"; // 3 char buf to send client type
__thread pid_t cliTid = 0; // per-thread copy of self's Thread ID
/*f
* initClient() is called when we realise we need to lazily initialise
* our thread based on cliTid being zero.
*/
void *
initClient(void *ptr)
{
struct sockaddr_un svAddr;
struct msghdr msg;
struct iovec io;
struct cmsghdr *ctrMsg;
uint64_t ltid; // local 8-byte copy of the tid
int pfds[2], // two fds of our pipe
sfd; // socket fd
/*s
* This union is necessary to ensure that the buffer is aligned such that
* we can read cmsg_{len,level,type} from the cmsghdr without causing an
* alignment fault (SIGBUS.)
*/
union {
struct cmsghdr hdr;
char buf[CMSG_SPACE(sizeof(int))];
} ctrBuf;
pfds[0] = pfds[1] = sfd = -1;
/*l
* Get our Thread ID.
*/
ltid = (uint64_t)(cliTid = syscall(SYS_gettid));
/*l
* Set up an abstract unix domain socket address.
*/
svAddr.sun_family = AF_UNIX;
svAddr.sun_path[0] = '\0';
strcpy(&svAddr.sun_path[1], EPLS_SRV_ADDR);
/*l
* Set up a socket datagram send buffer.
*/
io.iov_base = iovBuf;
io.iov_len = sizeof(iovBuf);
msg.msg_iov = &io;
msg.msg_iovlen = 1;
msg.msg_control = ctrBuf.buf;
msg.msg_controllen = sizeof(ctrBuf);
msg.msg_name = (struct sockaddr *)&svAddr,
msg.msg_namelen = (&svAddr.sun_path[0] - (char *)&svAddr)
+ 1
+ sizeof(EPLS_SRV_ADDR);
/*l
* Set up the control message header to indicate we are sharing a file
* descriptor.
*/
ctrMsg = CMSG_FIRSTHDR(&msg);
ctrMsg->cmsg_len = CMSG_LEN(sizeof(int));
ctrMsg->cmsg_level = SOL_SOCKET;
ctrMsg->cmsg_type = SCM_RIGHTS;
/*l
* Create file descriptors with pipe().
*/
if (-1 == pipe(pfds)) {
printErrMsg("TID: %d pipe() failed", cliTid);
} else {
/*l
* Write our tid to the pipe.
*/
memmove(CMSG_DATA(ctrMsg), &pfds[0], sizeof(int));
if (-1 == write(pfds[1], <id, sizeof(uint64_t))) {
printErrMsg("TID: %d write() failed", cliTid);
} if (-1 == (sfd = socket(AF_UNIX, SOCK_DGRAM, 0))) {
printErrMsg("TID: %d socket() failed", cliTid);
} else if (-1 == sendmsg(sfd, &msg, 0)) {
printErrMsg("TID: %d sendmsg() failed", cliTid);
} else {
printVerbMsg("TID: %d sent write fd %d to server kept read fd %d",
cliTid,
pfds[0],
pfds[1]);
/*l
* Close the read end of the pipe, the server has it now.
*/
close(pfds[0]);
pfds[0] = -1;
}
}
if (-1 != pfds[1]) close(pfds[1]);
if (-1 != pfds[0]) close(pfds[0]);
if (-1 != sfd) close(sfd);
return (void *)0;
}
And the reaper's code:
/*m
* Abstract datagram socket listening for FD's from clients.
*/
#include <sys/socket.h> // socket(), bind(), recvmsg()
#include <sys/epoll.h> // epoll_{create,wait}()
#include <sys/un.h> // sockaddr_un
#include <malloc.h> // malloc()
#include <unistd.h> // close()
#include <tsteplsrv.h> // Our API.
/*s
* socket datagram structs for receiving structured messages used to transfer
* fds from our clients.
*/
struct msghdr msg = { 0 };
struct iovec io = { 0 };
char iovBuf[EPLS_MSG_LEN]; // 3 char buf to receive client type
/*s
* This union is necessary to ensure that the buffer is aligned such that
* we can read cmsg_{len,level,type} from the cmsghdr without causing an
* alignment fault (SIGBUS.)
*/
union {
struct cmsghdr hdr;
char buf[CMSG_SPACE(sizeof(int))];
} ctrBuf;
typedef struct _tidFd_t {
struct _tidFd_t *next;
pid_t tid;
int fd;
} tidFd_t;
tidFd_t *tidFdLst = (tidFd_t *)0;
/*f
* Perform some handshaking with a new client and add the file descriptor
* it shared with us to the epoll set.
*/
static void
welcomeClient(int efd, int cfd)
{
uint64_t tid;
tidFd_t *tfd;
struct epoll_event epEv;
tfd = (tidFd_t *)-1;
/*l
* The fd is a pipe and should be readable, and should contain the
* tid of the client.
*/
if (-1 != read(cfd, &tid, sizeof(tid)) && (tfd = malloc(sizeof(*tfd)))) {
tfd->fd = cfd;
tfd->tid = (pid_t)tid;
tfd->next = tidFdLst;
/*l
* Single threaded process, no race condition here.
*/
tidFdLst = tfd;
/*l
* Add the fd to the epoll() set so that we will be woken up with
* an error if the thread dies.
*/
epEv.events = EPOLLIN;
epEv.data.fd = cfd;
if (-1 == epoll_ctl(efd, EPOLL_CTL_ADD, cfd, &epEv)) {
printErrMsg("TID: %ld Could not register fd %d with epoll set",
tid,
cfd);
} else {
printVerbMsg("TID: %ld Registered fd %d with epoll set", tid, cfd);
}
/*l
* Couldn't allocate memory for the new client.
*/
} else if (!tfd) {
printErrMsg("Could not allocate memory for new client");
/*l
* Could not read from the eventfd() file descriptor.
*/
} else {
printErrMsg("Could not read from client file descriptor");
}
}
/*f
* Perform some handshaking with a new client and add the file descriptor
* it shared with us to the epoll set.
*/
static void
processClientEvent(int efd, struct epoll_event *epEv)
{
tidFd_t *tfd, **bLnk;
/*l
* Walk the list of per-tid fd structs.
*/
for (bLnk = &tidFdLst; (tfd = *bLnk); bLnk = &tfd->next)
if (tfd->fd == epEv->data.fd)
break;
if (!tfd) {
printErrMsg("client file descriptor %d not found on the tfd list!",
epEv->data.fd);
/*l
* If we received an EPOLLHUP on the fd, cleanup.
*/
} else if (epEv->events & EPOLLHUP) {
/*l
* Try to remove the tid's pipe fd from the epoll set.
*/
if (-1 == epoll_ctl(efd, EPOLL_CTL_DEL, epEv->data.fd, epEv)) {
printErrMsg("couldn't delete epoll for tid %d", tfd->tid);
/*l
* Do tid cleanup here.
*/
} else {
printVerbMsg("TID: %d closing fd: %d", tfd->tid, epEv->data.fd);
close(epEv->data.fd);
/*l
* Remove the per-tid struct from the list and free it.
*/
*bLnk = tfd->next;
free(tfd);
}
} else {
printVerbMsg("TID: %d Received unexpected epoll event %d",
tfd->tid,
epEv->events);
}
}
/*f
* Create and listen on a datagram socket for eventfd() file descriptors
* from clients.
*/
int
main(int argc, char *argv[])
{
struct sockaddr_un svAddr;
struct cmsghdr *ctrMsg;
struct epoll_event *epEv,
epEvs[EPLS_MAX_EPEVS];
int sfd, efd, cfd, nfds;
sfd = efd = -1;
/*l
* Set up an abstract unix domain socket address.
*/
svAddr.sun_family = AF_UNIX;
svAddr.sun_path[0] = '\0';
strcpy(&svAddr.sun_path[1], EPLS_SRV_ADDR);
/*l
* Set up a socket datagram receive buffer.
*/
io.iov_base = iovBuf; // 3-char buffer to ID client type
io.iov_len = sizeof(iovBuf);
msg.msg_name = (char *)0; // No need for the client addr
msg.msg_namelen = 0;
msg.msg_iov = &io; // single IO vector in the S/G array
msg.msg_iovlen = 1;
msg.msg_control = ctrBuf.buf; // Control message buffer
msg.msg_controllen = sizeof(ctrBuf);
/*l
* Set up an epoll event.
*/
epEv = &epEvs[0];
epEv->events = EPOLLIN;
/*l
* Create a socket to receive datagrams on and register the socket
* with our epoll event.
*/
if (-1 == (epEv->data.fd = sfd = socket(AF_UNIX, SOCK_DGRAM, 0))) {
printErrMsg("socket creation failed");
/*l
* Bind to the abstract address. The pointer math is to portably
* handle weird structure packing _just_in_case_.
*/
} else if (-1 == bind(sfd,
(struct sockaddr *)&svAddr,
(&svAddr.sun_path[0] - (char *)&svAddr)
+ 1
+ sizeof(EPLS_SRV_ADDR))) {
printErrMsg("could not bind address: %s", &svAddr.sun_path[1]);
/*l
* Create an epoll interface. Set CLOEXEC for tidiness in case a thread
* in the server fork()s and exec()s.
*/
} else if (-1 == (efd = epoll_create1(EPOLL_CLOEXEC))) {
printErrMsg("could not create epoll instance");
/*l
* Add our socket fd to the epoll instance.
*/
} else if (-1 == epoll_ctl(efd, EPOLL_CTL_ADD, sfd, epEv)) {
printErrMsg("could not add socket to epoll instance");
/*l
* Loop receiving events on our epoll instance.
*/
} else {
printVerbMsg("server listening on abstract address: %s",
&svAddr.sun_path[1]);
/*l
* Loop forever listening for events on the fds we are interested
* in.
*/
while (-1 != (nfds = epoll_wait(efd, epEvs, EPLS_MAX_EPEVS, -1))) {
/*l
* For each fd with an event, figure out what's up!
*/
do {
/*l
* Transform nfds from a count to an index.
*/
--nfds;
/*l
* If the fd with an event is the listening socket a client
* is trying to send us their eventfd() file descriptor.
*/
if (sfd == epEvs[nfds].data.fd) {
if (EPOLLIN != epEvs[nfds].events) {
printErrMsg("unexpected condition on socket: %d",
epEvs[nfds].events);
nfds = -1;
break;
}
/*l
* Reset the sizes of the receive buffers to their
* actual value; on return they will be set to the
* read value.
*/
io.iov_len = sizeof(iovBuf);
msg.msg_controllen = sizeof(ctrBuf);
/*l
* Receive the waiting message.
*/
if (-1 == recvmsg(sfd, &msg, MSG_CMSG_CLOEXEC)) {
printVerbMsg("failed datagram read on socket");
/*l
* Verify that the message's control buffer contains
* a file descriptor.
*/
} else if ( NULL != (ctrMsg = CMSG_FIRSTHDR(&msg))
&& CMSG_LEN(sizeof(int)) == ctrMsg->cmsg_len
&& SOL_SOCKET == ctrMsg->cmsg_level
&& SCM_RIGHTS == ctrMsg->cmsg_type) {
/*l
* Unpack the file descriptor.
*/
memmove(&cfd, CMSG_DATA(ctrMsg), sizeof(cfd));
printVerbMsg("Received fd %d from client type %c%c%c",
cfd,
((char *)msg.msg_iov->iov_base)[0],
((char *)msg.msg_iov->iov_base)[1],
((char *)msg.msg_iov->iov_base)[2]);
/*l
* Process the incoming file descriptor and add
* it to the epoll() list.
*/
welcomeClient(efd, cfd);
/*l
* Note but ignore incorrectly formed datagrams.
*/
} else {
printVerbMsg("could not extract file descriptor "
"from client's datagram");
}
/*l
* The epoll() event is on one of the file descriptors
* shared with a client, process it.
*/
} else {
processClientEvent(efd, &epEvs[nfds]);
}
} while (nfds);
/*l
* If something happened to our socket break the epoll_wait()
* loop.
*/
if (nfds)
break;
}
}
/*l
* An error occurred, cleanup.
*/
if (-1 != efd)
close(efd);
if (-1 != sfd)
close(sfd);
return -1;
}
At first I tried using eventfd() rather than pipe() but eventfd file descriptors represent objects not connections, so closing the fd in the client code did not produce an EPOLLHUP in the reaper. If anyone knows of a better alternative to pipe() for this, let me know!
For completeness here's the #defines used to construct the abstract address:
/*d
* server abstract address.
*/
#define EPLS_SRV_NAM "_abssSrv"
#define EPLS_SRV_VER "0.0.1"
#define EPLS_SRV_ADDR EPLS_SRV_NAM "." EPLS_SRV_NAM
#define EPLS_MSG_LEN 3
#define EPLS_MAX_EPEVS 32
That's it, hope this is useful for someone.
I can't figure this out.
When I run my code ... I see data from all Ethernet types and from all interfaces even though I bind successfuly.
After a couple minutes running ... it fixes itself.
Then I see only from a particular interface and only if the Ether type matches.
The objective is to cycle through all interfaces looking for a particular MAC address.
When the correct response is returned ... we drop out the for loop with all things configured as necessary.
// Copyright (c) 2017 Keith M. Bradley
//
//
// History:
// 13 May 2017 Keith M. Bradley Creation
// all rights reserved.
//
/* ----------------------- Standard includes --------------------------------*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <stdint.h>
#include <stdbool.h>
#include <pthread.h>
#include <unistd.h>
#include <ctype.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/ioctl.h>
#include <fcntl.h>
#include <net/if.h>
#include <net/ethernet.h>
#include <netpacket/packet.h>
#include <netdb.h>
#define SIGNAL_THREAD_KILL 0xFF
#define SIGNAL_THREAD_RESET 0xFE
// Ethernet II protocol to use (0x88b5 ... experimental #1).
#define eType 0x88b5
#define msg_Hello "MikiePLC"
#define msg_Reply "IOM_1.0"
#define msg_Ack "ackMikiePLC"
void* PLCThread(void* arg)
{
// get our pointer to the PLC struct
PLC *myPLC = arg;
// get and save our thread ID
myPLC->tid = pthread_self();
// thread index number?
//------------------------------------------------------------------------------------------------------------------
// locals
uint8_t i; // used as an index or loop counts.
uint8_t j; // used as 2nd index or loop counts.
int rtn; // temp store or function return values.
//------------------------------------------------------------------------------------------------------------------
// create Ethernet buffers and variables.
char* outBuff = NULL; // character buffer for sending out on Ethernet.
size_t outBuffSz = 1540;
char* inBuff = NULL; // character buffer for receiving in on Ethernet.
size_t inBuffSz = 1540;
int fd; // file descriptor for socket.
int flags; // socket flags used bt fcntl().
struct
ifreq ifr; // used to get and set interface parameters.
struct
sockaddr_ll IOM_sa_flt; // socket address struct, used to filter received Ethernet frames from the remote IO module ... used by bind().
struct
sockaddr_ll IOM_sa_rcv; // socket address struct, used to store addr details of received frame ... used by recvfrom().
socklen_t IOM_sa_len; // IOM_sa_rcv length.
fd_set myfds; // used by select().
struct
timeval rcv_tm_out; // time out for select() to declare communications failed.
//------------------------------------------------------------------------------------------------------------------
// initialize Ethernet buffers and variables.
// allocate memory for the Ethernet sending message buffer.
outBuff = malloc(outBuffSz);
if (outBuff == NULL)
printf("\nNATIVE-PLCThread: Could not allocate outBuff memory.");
memset(outBuff, '\0', outBuffSz);
// allocate memory for the Ethernet recevied message buffer.
inBuff = malloc(inBuffSz);
if (inBuff == NULL)
printf("\nNATIVE-PLCThread: Could not allocate inBuff memory.");
// clear the sockaddr_ll structs.
// (send was already cleared ... it is inside the PLC typdef).
memset(&IOM_sa_rcv, 0, sizeof(IOM_sa_rcv));
memset(&IOM_sa_flt, 0, sizeof(IOM_sa_flt));
// set receiving sockaddr_ll struct size.
IOM_sa_len = sizeof(IOM_sa_rcv);
// setup the sending, receiving, and filtering sockaddr_ll's.
myPLC->IOM_sa_snd.sll_family = AF_PACKET;
myPLC->IOM_sa_snd.sll_protocol = htons(eType);
IOM_sa_rcv.sll_family = AF_PACKET;
IOM_sa_rcv.sll_protocol = htons(eType);
IOM_sa_flt.sll_family = AF_PACKET;
IOM_sa_flt.sll_protocol = htons(eType);
//------------------------------------------------------------------------------------------------------------------
// open our socket in dgram mode and setup the socket's features.
fd = socket(AF_PACKET, SOCK_DGRAM, htons(ETH_P_ALL));
if (fd == -1)
{
fprintf(stderr, "%s\n", strerror(errno));
printf("\nNATIVE-PLCThread: socket() failed !! - ");
}
// get the socket file descriptor flags.
flags = fcntl(fd, F_GETFL, 0);
// if succesful, set to non-blocking.
if (flags != -1)
fcntl(fd, F_SETFL, flags | O_NONBLOCK);
if (fd != -1) // valid socket file descriptor means ok to proceed with IOM_Addr_search.
{
// IOM_MAC_search
// if MAC_Addr is configured,
// loop to find which interface has the IOM (I/O Module).
//
// begin for loop ----------------------------------------------------------------------------------------------
for (i = 1; 1; i++)
{
// we need to test for thread kill signal.
if((myPLC->ThreadCtrl == SIGNAL_THREAD_KILL) || (myPLC->ThreadCtrl == SIGNAL_THREAD_RESET)) break;
// if the user cleared the MAC addr while we were searching ... give up and run the engine.
if (myPLC->MAC_is_Valid != 0xa5) break;
// clear the ifreq struct.
memset(&ifr, 0, sizeof(ifr));
// i is our 'for' loop counter and our current interface index.
ifr.ifr_ifindex = i;
// does the interface exist?
if (ioctl(fd, SIOCGIFNAME, &ifr) == -1)
{
// if not, we ran past top of network interfaces.
printf("\nNATIVE-PLCThread: IOM_MAC_search MAC address not found after searching all interfaces !!!\n");
printf("\n_________________________________________________________________________________________\n");
sleep(10);
i = 0;
continue;
}
// don't mess with loopback interface.
if (strcmp(ifr.ifr_name,"lo") == 0) continue;
// store the ifname using the pointer.
strncpy (myPLC->ifName, ifr.ifr_name, sizeof(ifr.ifr_name) - 1);
myPLC->ifName[IFNAMSIZ - 1] = '\0';
// update the interface index in all sockaddr structs.
myPLC->IOM_sa_snd.sll_ifindex = i;
IOM_sa_rcv.sll_ifindex = i;
IOM_sa_flt.sll_ifindex = i;
// is the interface up?
ioctl(fd, SIOCGIFFLAGS, &ifr);
if ((ifr.ifr_flags & IFF_UP) == 0)
{
printf("\nNATIVE-PLCThread: IOM_Addr_search interface %s (index %d) is down.\n", myPLC->ifName, i);
continue;
}
// bind it.
if (bind(fd, (struct sockaddr*)&IOM_sa_flt, sizeof(IOM_sa_flt)) == -1)
{
fprintf(stderr, "%s\n", strerror(errno));
printf("\nNATIVE-PLCThread: IOM_Addr_search bind() failed !!!\n");
continue;
}
// pause and flush? (didn't help at all)
sleep(2);
recvfrom(fd, inBuff, inBuffSz, 0, (struct sockaddr *)&IOM_sa_rcv, &IOM_sa_len);
// fill outBuff with the hello message.
strcpy(outBuff, msg_Hello);
// send hello msg to the IOM with configured IOM_MAC_address.
if (sendto(fd, outBuff, sizeof(msg_Hello), 0, (struct sockaddr *)&(myPLC->IOM_sa_snd), sizeof (myPLC->IOM_sa_snd)) == -1)
{
fprintf(stderr, "%s\n", strerror(errno));
printf("\nNATIVE-PLCThread: IOM_Addr_search sendto() failed on interface %s (index %d) !!!\n", myPLC->ifName, i);
continue;
}
// setup for the select() time out loop.
rcv_tm_out.tv_sec = 0;
rcv_tm_out.tv_usec = 50000;
// begin while loop ------------------------------------------------------------------------------------------
//
// select() time out loop.
// wait for valid response from IOM_MAC_address (discard any ETHERNET 2 messages from other MAC's).
//
while ((rcv_tm_out.tv_sec != 0) || (rcv_tm_out.tv_usec != 0))
{
// create the file descriptor set for use by select().
FD_ZERO(&myfds);
FD_SET(fd, &myfds);
// select() to sleep until received frame is ready, or the maximum length of time it would taked to get a response is exceeded.
rtn = select(fd + 1, &myfds, NULL, NULL, &rcv_tm_out);
if (rtn < 0)
{
fprintf(stderr, "%s\n", strerror(errno));
printf("\nNATIVE-PLCThread: IOM_Addr_search select() returned <0 on interface %s (index %d).\n", myPLC->ifName, i);
break;
}
// did we time out? ... then goto the next interface to search.
else if (rtn == 0)
{
printf("\nNATIVE-PLCThread: IOM_Addr_search select() timed out (returned 0) on interface %s (index %d).\n", myPLC->ifName, i);
break;
}
else // select() returned > 0.
{
if (FD_ISSET(fd, &myfds))
{
// our socket is ready for reading ... 1st clear the buffer and the sock addr.
memset(inBuff, '\0', inBuffSz);
for (j = 0; j < 6; j++)
IOM_sa_rcv.sll_addr[j] = 0;
rtn = recvfrom(fd, inBuff, inBuffSz, 0, (struct sockaddr *)&IOM_sa_rcv, &IOM_sa_len);
if(rtn < 0)
{
if (errno == EAGAIN)
printf("\nNATIVE-PLCThread: IOM_Addr_search recvfrom() returned EAGAIN.\n");
else if (errno == EWOULDBLOCK)
printf("\nNATIVE-PLCThread: IOM_Addr_search recvfrom() returned EWOULDBLOCK.\n");
else
{
fprintf(stderr, "%s\n", strerror(errno));
printf("\nNATIVE-PLCThread: IOM_Addr_search recvfrom() returned unrecoverable error.\n");
}
break;
}
else if (rtn == 0)
printf("\nNATIVE-PLCThread: IOM_Addr_search a_file_descriptor_is_set yet recvfrom() returned zero.\n");
else // recvfrom() returned > 0.
{
printf("\nNATIVE-PLCThread: IOM_Addr_search recvfrom() returned %d bytes on %s (index %d) MAC %02x:%02x:%02x:%02x:%02x:%02x rcv_tm_out.tv_sec = %d.%d\n",
rtn,
myPLC->ifName,
i,
IOM_sa_rcv.sll_addr[0],
IOM_sa_rcv.sll_addr[1],
IOM_sa_rcv.sll_addr[2],
IOM_sa_rcv.sll_addr[3],
IOM_sa_rcv.sll_addr[4],
IOM_sa_rcv.sll_addr[5],
(int)rcv_tm_out.tv_sec,
(int)rcv_tm_out.tv_usec);
// check the IOM_sa_rcv.MAC_Addr ... is it who we want to talk to? ... if not discard.
for (j = 0; j < 6; ++j)
if ((myPLC->IOM_sa_snd.sll_addr[j]) == (IOM_sa_rcv.sll_addr[j])) continue;
// MAC addr matches?
if (j > 50) // set to 50 to debug ... should be 5.
{
printf("\nMAC Addr from our IOM.\n");
// parse the received response to our hello msg.
if (strcmp(inBuff, msg_Reply) == 0)
{
// fill outBuff with the Ack message.
strcpy(outBuff, msg_Ack);
// send ack message to the IOM with configured IOM_MAC_address.
if (sendto(fd, outBuff, sizeof("ackMikiePLC"), 0, (struct sockaddr *)&(myPLC->IOM_sa_snd), sizeof (myPLC->IOM_sa_snd)) == -1)
{
fprintf(stderr, "%s\n", strerror(errno));
printf("\nNATIVE-PLCThread: IOM_Addr_search sendto() failed on interface %s (index %d) !!!\n", myPLC->ifName, i);
continue;
}
else
{
// declare ComStatus ok.
myPLC->ComStatus = 0xa5;
break; // we have a winner !!!
}
}
else
{
// declare ComStatus still NOT ok.
myPLC->ComStatus = 0x5a;
continue;
}
}
else
{
printf("\nMAC Addr from a stranger (discarded)!!!\n");
break;
}
}// END recvfrom() returned > 0.
}// END if (FD_ISSET(fd, &myfds))
else printf("\nNATIVE-PLCThread: IOM_Addr_search select() returned > 0 yet our only file descriptor was not set !!!\n");
}// END select() returned > 0.
}// END while loop -------------------------------------------------------------------------------------------
if (myPLC->ComStatus == 0xa5) break; // search is done ... break out of for loop.
}// END for loop -----------------------------------------------------------------------------------------------
}// END "valid socket fd means ok to proceed" ----------------------------------------------------------------------
else printf("\nNATIVE-PLCThread: IOM_Addr_search socket() previously failed ... search cannot proceed.\n");
// MAIN ENGINE LOOP !!!---------------------------------------------------------------------------------------------
//
// Loop for the life of this Sedona PLC object (unless Enable is false).
//
while((myPLC->ThreadCtrl != SIGNAL_THREAD_KILL) && (myPLC->ThreadCtrl != SIGNAL_THREAD_RESET))
{
}
CleanExit: //--------------------------------------------------------------------------------------------------------
close(fd);
free(outBuff);
free(inBuff);
free(myPLC);
pthread_exit(NULL);
}
Here is a print example when it starts:
NATIVE-PLCThread: IOM_Addr_search recvfrom() returned 104 bytes on eth0 (index 2) MAC 00:1e:c9:7d:c4:36 rcv_tm_out.tv_sec = 0.49997
MAC Addr from a stranger !!!
NATIVE-PLCThread: IOM_Addr_search recvfrom() returned 152 bytes on enp1s0 (index 3) MAC 00:1e:c9:7d:c4:36 rcv_tm_out.tv_sec = 0.49998
MAC Addr from a stranger !!!
NATIVE-PLCThread: IOM_MAC_search MAC address not found after searching all interfaces !!!
I should see "select() timed out" on eth0 since there is nothing responding with Ether type 0x88b5.
I think I see the problem.
I created the socket with ETH_P_ALL.
I assumed I could be more specific in the bind as the docs say we can.
Preliminary test so far has not reproduced the issue.
I have seen many sources that say one can do what I originally did ... so this may be a bug in Linux or the driver?
I recently did some testing with kernel events and I came up with the following:
Does it make sense to use a kernel event for accepting sockets? My testing showed that I was only able to handle one accept at once (even if the eventlist array is bigger)(Makes sense to me cause .ident == sockfd is only true for one socket).
I thought the use of kevent is mainly to read from multiple sockets at once. Is that true?
Is this how a TCP server is done with a kqueue implementation? :
Listening Thread (without kqueue)
Accepts new connections and adds FD to a worker kqueue.
QUESTION: Is this even possible? My testing showed yes, but is it guaranteed that the worker thread will be aware of the changes and is kevent really thread safe?
Worker thread (with kqueue)
Waits on reads on file descriptors added from the listening thread.
QUESTION: How many sockets at once would make sense to check for updates?
Thanks
This is not really an answer but I made a little server script with kqueue explaining the problem:
#include <stdio.h> // fprintf
#include <sys/event.h> // kqueue
#include <netdb.h> // addrinfo
#include <arpa/inet.h> // AF_INET
#include <sys/socket.h> // socket
#include <assert.h> // assert
#include <string.h> // bzero
#include <stdbool.h> // bool
#include <unistd.h> // close
int main(int argc, const char * argv[])
{
/* Initialize server socket */
struct addrinfo hints, *res;
int sockfd;
bzero(&hints, sizeof(hints));
hints.ai_family = AF_INET;
hints.ai_socktype = SOCK_STREAM;
assert(getaddrinfo("localhost", "9090", &hints, &res) == 0);
sockfd = socket(AF_INET, SOCK_STREAM, res->ai_protocol);
assert(sockfd > 0);
{
unsigned opt = 1;
assert(setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt)) == 0);
#ifdef SO_REUSEPORT
assert(setsockopt(sockfd, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt)) == 0);
#endif
}
assert(bind(sockfd, res->ai_addr, res->ai_addrlen) == 0);
freeaddrinfo(res);
/* Start to listen */
(void)listen(sockfd, 5);
{
/* kevent set */
struct kevent kevSet;
/* events */
struct kevent events[20];
/* nevents */
unsigned nevents;
/* kq */
int kq;
/* buffer */
char buf[20];
/* length */
ssize_t readlen;
kevSet.data = 5; // backlog is set to 5
kevSet.fflags = 0;
kevSet.filter = EVFILT_READ;
kevSet.flags = EV_ADD;
kevSet.ident = sockfd;
kevSet.udata = NULL;
assert((kq = kqueue()) > 0);
/* Update kqueue */
assert(kevent(kq, &kevSet, 1, NULL, 0, NULL) == 0);
/* Enter loop */
while (true) {
/* Wait for events to happen */
nevents = kevent(kq, NULL, 0, events, 20, NULL);
assert(nevents >= 0);
fprintf(stderr, "Got %u events to handle...\n", nevents);
for (unsigned i = 0; i < nevents; ++i) {
struct kevent event = events[i];
int clientfd = (int)event.ident;
/* Handle disconnect */
if (event.flags & EV_EOF) {
/* Simply close socket */
close(clientfd);
fprintf(stderr, "A client has left the server...\n");
} else if (clientfd == sockfd) {
int nclientfd = accept(sockfd, NULL, NULL);
assert(nclientfd > 0);
/* Add to event list */
kevSet.data = 0;
kevSet.fflags = 0;
kevSet.filter = EVFILT_READ;
kevSet.flags = EV_ADD;
kevSet.ident = nclientfd;
kevSet.udata = NULL;
assert(kevent(kq, &kevSet, 1, NULL, 0, NULL) == 0);
fprintf(stderr, "A new client connected to the server...\n");
(void)write(nclientfd, "Welcome to this server!\n", 24);
} else if (event.flags & EVFILT_READ) {
/* sleep for "processing" time */
readlen = read(clientfd, buf, sizeof(buf));
buf[readlen - 1] = 0;
fprintf(stderr, "bytes %zu are available to read... %s \n", (size_t)event.data, buf);
sleep(4);
} else {
fprintf(stderr, "unknown event: %8.8X\n", event.flags);
}
}
}
}
return 0;
}
Every time a client sends something the server experiences a "lag" of 4 seconds. (I exaggerated a bit, but for testing quite reasonable). So how do get around to that problem? I see worker threads (pool) with own kqueue as possible solution, then no connection lag would occur. (each worker thread reads a certain "range" of file descriptors)
Normally, you use kqueue as an alternative to threads. If you're going to use threads, you can just set up a listening thread and a worker threadpool with one thread per accepted connection. That's a much simpler programming model.
In an event-driven framework, you would put both the listening socket and all the accepted sockets into the kqueue, and then handle events as they occur. When you accept a socket, you add it to the kqueue, and when a socket handler finishes it works, it could remove the socket from the kqueue. (The latter is not normally necessary because closing a fd automatically removes any associated events from any kqueue.)
Note that every event registered with a kqueue has a void* userdata, which can be used to identify the desired action when the event fires. So it's not necessary that every event queue have a unique event handler; in fact, it is common to have a variety of handlers. (For example, you might also want to handle a control channel set up through a named pipe.)
Hybrid event/thread models are certainly possible; otherwise, you cannot take advantage of multicore CPUs. One possible strategy is to use the event queue as a dispatcher in a producer-consumer model. The queue handler would directly handle events on the listening socket, accepting the connection and adding the accepted fd into the event queue. When a client connection event occurs, the event would be posted into the workqueue for later handling. It's also possible to have multiple workqueues, one per thread, and have the accepter guess which workqueue a new connection should be placed in, presumably on the basis of that thread's current load.
I'm writing a web server in C (which I suck with) using Pthreads (which I suck with even more) and I'm stuck at this point. The model for the server is boss-worker so the boss thread instantiates all worker threads at the beginning of the program. There is a global queue that stores the socket of the incoming connection(s). The boss thread is the one that adds all items (sockets) to the queue as the connections are accepted. All of the worker threads then wait for an item to be added to a global queue in order for them to take up the processing.
The server works fine as long as I connect to it less times than the number of worker threads that the server has. Because of that, I think that either something is wrong with my mutexes (maybe the signals are getting lost?) or the threads are being disabled after they run once (which would explain why if there are 8 threads, it can only parse the first 8 http requests).
Here is my global queue variable.
int queue[QUEUE_SIZE];
This is the main thread. It creates a queue struct (defined elsewhere) with methods enqueue, dequeue, empty, etc. When the server accepts a connection, it enqueues the socket that the incoming connection is on. The worker threads which were dispatched at the beginning are constantly checking this queue to see if any jobs have been added, and if there are jobs, then they dequeue the socket, connect to that port, and read/parse/write the incoming http request.
int main(int argc, char* argv[])
{
int hSocket, hServerSocket; /* handle to socket */
struct hostent* pHostInfo; /* holds info about a machine */
struct sockaddr_in Address; /* Internet socket address stuct */
int nAddressSize = sizeof(struct sockaddr_in);
int nHostPort;
int numThreads;
int i;
init(&head,&tail);
//**********************************************
//ALL OF THIS JUST SETS UP SERVER (ADDR STRUCT,PORT,HOST INFO, ETC)
if(argc < 3) {
printf("\nserver-usage port-num num-thread\n");
return 0;
}
else {
nHostPort=atoi(argv[1]);
numThreads=atoi(argv[2]);
}
printf("\nStarting server");
printf("\nMaking socket");
/* make a socket */
hServerSocket=socket(AF_INET,SOCK_STREAM,0);
if(hServerSocket == SOCKET_ERROR)
{
printf("\nCould not make a socket\n");
return 0;
}
/* fill address struct */
Address.sin_addr.s_addr = INADDR_ANY;
Address.sin_port = htons(nHostPort);
Address.sin_family = AF_INET;
printf("\nBinding to port %d\n",nHostPort);
/* bind to a port */
if(bind(hServerSocket,(struct sockaddr*)&Address,sizeof(Address)) == SOCKET_ERROR) {
printf("\nCould not connect to host\n");
return 0;
}
/* get port number */
getsockname(hServerSocket, (struct sockaddr *) &Address,(socklen_t *)&nAddressSize);
printf("Opened socket as fd (%d) on port (%d) for stream i/o\n",hServerSocket, ntohs(Address.sin_port));
printf("Server\n\
sin_family = %d\n\
sin_addr.s_addr = %d\n\
sin_port = %d\n"
, Address.sin_family
, Address.sin_addr.s_addr
, ntohs(Address.sin_port)
);
//Up to this point is boring server set up stuff. I need help below this.
//**********************************************
//instantiate all threads
pthread_t tid[numThreads];
for(i = 0; i < numThreads; i++) {
pthread_create(&tid[i],NULL,worker,NULL);
}
printf("\nMaking a listen queue of %d elements",QUEUE_SIZE);
/* establish listen queue */
if(listen(hServerSocket,QUEUE_SIZE) == SOCKET_ERROR) {
printf("\nCould not listen\n");
return 0;
}
while(1) {
pthread_mutex_lock(&mtx);
printf("\nWaiting for a connection");
while(!empty(head,tail)) {
pthread_cond_wait (&cond2, &mtx);
}
/* get the connected socket */
hSocket = accept(hServerSocket,(struct sockaddr*)&Address,(socklen_t *)&nAddressSize);
printf("\nGot a connection");
enqueue(queue,&tail,hSocket);
pthread_mutex_unlock(&mtx);
pthread_cond_signal(&cond); // wake worker thread
}
}
Here is the worker thread. This should be always running checking for new requests (by seeing if the queue is not empty). At the end of this method, it should be deferring back to the boss thread to wait for the next time it is needed.
void *worker(void *threadarg) {
pthread_mutex_lock(&mtx);
while(empty(head,tail)) {
pthread_cond_wait(&cond, &mtx);
}
int hSocket = dequeue(queue,&head);
unsigned nSendAmount, nRecvAmount;
char line[BUFFER_SIZE];
nRecvAmount = read(hSocket,line,sizeof line);
printf("\nReceived %s from client\n",line);
//***********************************************
//DO ALL HTTP PARSING (Removed for the sake of space; I can add it back if needed)
//***********************************************
nSendAmount = write(hSocket,allText,sizeof(allText));
if(nSendAmount != -1) {
totalBytesSent = totalBytesSent + nSendAmount;
}
printf("\nSending result: \"%s\" back to client\n",allText);
printf("\nClosing the socket");
/* close socket */
if(close(hSocket) == SOCKET_ERROR) {
printf("\nCould not close socket\n");
return 0;
}
pthread_mutex_unlock(&mtx);
pthread_cond_signal(&cond2);
}
Any help would be greatly appreciated. I can post more of the code if anyone needs it, just let me know. I'm not the best with OS stuff, especially in C, but I know the basics of mutexes, cond. variables, semaphores, etc. Like I said, I'll take all the help I can get. (Also, I'm not sure if I posted the code exactly right since this is my first question. Let me know if I should change the formatting at all to make it more readable.)
Thanks!
Time for a workers' revolution.
The work threads seem to be missing a while(true) loop. After the HTTP exchange and closing the socket, they should be looping back to wait on the queue for more sockets/requests.