Related
I have a program running on a QEMU VM. The program running inside this VM gets notified by a program on the host via interrupts and using QEMU ivshmem. The program on the host creates an eventfd and sends this file descriptor to QEMU when the VM starts. The program in the guest then opens a VFIO group device and sets an interrupt request fd on this device. We can then add the interrupt fd to epoll and epoll_wait to wait for notifications from the host.
The thing is that I want a 1-1 matching between the times the host writes to the eventfd and the number of events that are signaled in epoll_wait. For this I decided to use EFD_SEMAPHORE for the evenfds on the host and the guest. From my understanding, every time I write an 8 byte integer with value 1, the eventfd_counter is incremented by 1. Then every time the eventfd is read, the counter is decremented by 1 (different from a regular eventfd where each read clears the whole counter). For some reason, I am not getting the desired behaviour, so I was wondering if either eventfds with the EFD_SEMAPHORE flags are not properly supported by VFIO or QEMUs ivshmem.
Below is a simplified version of the parts I think are relevant and how I setup the notification system. I hope the code below is not too verbose. I tried to reduce the number of irrelevant parts (there is too much other code in the middle that is not particularly relevant to the problem) but not 100% sure what might be relevant or not.
Code host uses to signal guest
int ivshmem_uxsocket_send_int(int fd, int64_t i)
{
int n;
struct iovec iov = {
.iov_base = &i,
.iov_len = sizeof(i),
};
struct msghdr msg = {
.msg_name = NULL,
.msg_namelen = 0,
.msg_iov = &iov,
.msg_iovlen = 1,
.msg_control = NULL,
.msg_controllen = 0,
.msg_flags = 0,
};
if ((n = sendmsg(fd, &msg, 0)) != sizeof(int64_t))
{
return -1;
}
return n;
}
int ivshmem_uxsocket_sendfd(int uxfd, int fd, int64_t i)
{
int n;
struct cmsghdr *chdr;
/* Need to pass at least one byte of data to send control data */
struct iovec iov = {
.iov_base = &i,
.iov_len = sizeof(i),
};
/* Allocate a char array but use a union to ensure that it
is aligned properly */
union {
char buf[CMSG_SPACE(sizeof(fd))];
struct cmsghdr align;
} cmsg;
memset(&cmsg, 0, sizeof(cmsg));
/* Add control data (file descriptor) to msg */
struct msghdr msg = {
.msg_name = NULL,
.msg_namelen = 0,
.msg_iov = &iov,
.msg_iovlen = 1,
.msg_control = &cmsg,
.msg_controllen = sizeof(cmsg),
.msg_flags = 0,
};
/* Set message header to describe ancillary data */
chdr = CMSG_FIRSTHDR(&msg);
chdr->cmsg_level = SOL_SOCKET;
chdr->cmsg_type = SCM_RIGHTS;
chdr->cmsg_len = CMSG_LEN(sizeof(int));
memcpy(CMSG_DATA(chdr), &fd, sizeof(fd));
if ((n = sendmsg(uxfd, &msg, 0)) != sizeof(i))
{
return -1;
}
return n;
}
/* SETUP IVSHMEM WITH QEMU AND PASS THE EVENTFD USED TO
NOTIFY THE GUEST */
int ivshmem_uxsocket_accept()
{
int ret;
int cfd, ifd, nfd;
int64_t version = IVSHMEM_PROTOCOL_VERSION;
uint64_t hostid = HOST_PEERID;
int vmid = 0
/* Accept connection from qemu ivshmem */
if ((cfd = accept(uxfd, NULL, NULL)) < 0)
{
return -1;
}
/* Send protocol version as required by qemu ivshmem */
ret = ivshmem_uxsocket_send_int(cfd, version);
if (ret < 0)
{
return -1;
}
/* Send vm id to qemu */
ret = ivshmem_uxsocket_send_int(cfd, vmid);
if (ret < 0)
{
return -1;
}
/* Send shared memory fd to qemu */
ret = ivshmem_uxsocket_sendfd(cfd, shm_fd, -1);
if (ret < 0)
{
return -1;
}
/* Eventfd used by guest to notify host */
if ((nfd = eventfd(0, EFD_SEMAPHORE | EFD_NONBLOCK)) < 0)
{
return -1;
}
/* Ivshmem protocol requires to send host id
with the notify fd */
ret = ivshmem_uxsocket_sendfd(cfd, nfd, hostid);
if (ret < 0)
{
return -1;
}
/* THIS IS THE EVENTFD OF INTEREST TO US: USED BY HOST
TO NOTIFY GUEST */
if ((ifd = eventfd(0, EFD_SEMAPHORE | EFD_NONBLOCK)) < 0)
{
return -1;
}
ret = ivshmem_uxsocket_sendfd(cfd, ifd, vmid);
if (ret < 0)
{
return -1;
}
if (epoll_ctl(epfd, EPOLL_CTL_ADD, cfd, &ev) < 0)
{
return -1;
}
return 0;
}
/* NOW EVERY TIME WE WANT TO NOTIFY THE GUEST
WE CALL THE FOLLOWING FUNCTION */
int notify_guest(int fd)
{
int ret;
uint64_t buf = 1;
ret = write(fd, &buf, sizeof(uint64_t));
if (ret < sizeof(uint64_t))
{
return -1;
}
return 0;
}
Code guest uses to receive notifications from host
/* THIS FUNCTION SETS THE IRQ THAT RECEIVES THE
NOTIFICATIONS FROM THE HOST */
int vfio_set_irq(int dev)
{
int fd;
struct vfio_irq_set *irq_set;
char buf[sizeof(struct vfio_irq_set) + sizeof(int)];
if ((fd = eventfd(0, EFD_SEMAPHORE | EFD_NONBLOCK)) < 0)
{
return -1;
}
irq_set = (struct vfio_irq_set *) buf;
irq_set->argsz = sizeof(buf);
irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
irq_set->index = 2;
irq_set->start = 0;
irq_set->count = 1;
memcpy(&irq_set->data, &fd, sizeof(int));
if (ioctl(dev, VFIO_DEVICE_SET_IRQS, irq_set) < 0)
{
return -1;
}
return irq_fd;
}
/* The guest sets up the ivshmem region from QEMU and sets the
interrupt request. */
int vfio_init()
{
int cont, group, irq_fd;
struct epoll_event ev;
struct vfio_group_status g_status = { .argsz = sizeof(g_status) };
struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
/* Create vfio container */
if ((cont = open("/dev/vfio/vfio", O_RDWR)) < 0)
{
return -1;
}
/* Check API version of container */
if (ioctl(cont, VFIO_GET_API_VERSION) != VFIO_API_VERSION)
{
return -1;
}
if (!ioctl(cont, VFIO_CHECK_EXTENSION, VFIO_NOIOMMU_IOMMU))
{
return -1;
}
/* Open the vfio group */
if((group = open(VFIO_GROUP, O_RDWR)) < 0)
{
return -1;
}
/* Test if group is viable and available */
ioctl(group, VFIO_GROUP_GET_STATUS, &g_status);
if (!(g_status.flags & VFIO_GROUP_FLAGS_VIABLE))
{
return -1;
}
/* Add group to container */
if (ioctl(group, VFIO_GROUP_SET_CONTAINER, &cont) < 0)
{
return -1;
}
/* Enable desired IOMMU model */
if (ioctl(cont, VFIO_SET_IOMMU, VFIO_NOIOMMU_IOMMU) < 0)
{
return -1;
}
/* Get file descriptor for device */
if ((dev = ioctl(group, VFIO_GROUP_GET_DEVICE_FD, VFIO_PCI_DEV)) < 0)
{
return -1;
}
/* Get device info */
if (ioctl(dev, VFIO_DEVICE_GET_INFO, &device_info) < 0)
{
return -1;
}
/* Set interrupt request fd */
if ((irq_fd = vfio_set_irq(dev)) < 0)
{
return -1
}
/* Add interrupt request fd to interest list */
if (vfio_subscribe_irq() < 0)
{
return -1;
}
/* Do other shm setup stuff not related to the interrupt
request */
ev.events = EPOLLIN;
ev.data.ptr = EP_NOTIFY;
ev.data.fd = irq_fd;
if (epoll_ctl(epfd, EPOLL_CTL_ADD, irq_fd, &ev) != 0)
{
return -1;
}
return 0;
}
int ivshmem_drain_evfd(int fd)
{
int ret;
uint64_t buf;
ret = read(fd, &buf, sizeof(uint64_t));
if (ret == 0)
{
return -1;
}
return ret;
}
/* I should get every notification from the host here,
but it seems that not all notifications are going
through. The number of calls to notify_guest does not
match the number of events received from epoll_wait
here */
int notify_poll()
{
int i, n;
struct epoll_event evs[32];
n = epoll_wait(epfd, evs, 32, 0);
for (i = 0; i < n; i++)
{
if (evs[i].events & EPOLLIN)
{
/* Drain evfd */
drain_evfd(irq_fd);
/* Handle notification ... */
handle();
}
}
}
I'm trying to send a message from kernel to user space using generic netlink and libnl, the part of my code which does this is implemented as follow:
int struct my_callback(struct sk_buff *skb, struct genl_info *info)
{
struct sk_buff *obuff;
void *msg_head;
if ((obuff = genlmsg_new(0, GFP_KERNEL)) == NULL) { // I've tried to change the len to NLMSG_GOODSIZE but not worked
pr_err("Failed allocating message to an reply\n");
return 0;
}
if ((msg_head = genlmsg_put_reply(obuff, info, &lunatik_family, 0, LIST_STATES)) == NULL) {
pr_err("Failed to put generic netlink header\n");
return 0;
}
//I've tried to put a genlmsg_end(obuff, msg_head); but didn't work as well
if (genlmsg_reply(obuff, info) < 0) {
pr_err("Failed to send message to user space\n");
return 0;
}
pr_info("Message sent to user-space\n");
return 0;
}
P.s: LIST_STATES is a member of enum and have the value of 3
And my user space code is basically:
static int req_handler(struct nl_msg *msg, void *arg)
{
struct nlmsghdr *nlhdr;
struct genlmsghdr *genlhdr;
nlhdr = nlmsg_hdr(msg);
genlhdr = genlmsg_hdr(nlhdr);
printf("Received a message from kernel: %d\n", genlhdr->cmd);
return NL_OK;
}
int socket_init(struct nl_sock *sock)
{
int err = -1;
if ((sock = nl_socket_alloc()) == NULL)
return err;
if ((err = genl_connect(sock)))
return err;
if ((err = genl_ctrl_resolve(sock, LUNATIK_FAMILY)) < 0)
return err;
// I've tried to use NL_CB_VALID, but when I use it I receive no message at all
nl_socket_modify_cb(sock, NL_CB_MSG_IN, NL_CB_CUSTOM, req_handler, NULL);
return 0;
}
My output on dmesg is:
Message sent to user-space
And my output on user space is:
Received a message from kernel: 0
I should receive 3 instead of 0, I noticed that I'm receiving only the ACK message, but not the message that I'm sending, I would like to know why this is happening and what I'm doing wrong.
The result of genl_ctrl_resolve() is twofold:
If < 0, it's an error code.
If >= 0, it's the family identication number.
You're throwing away your family identication number. Instead of
if ((err = genl_ctrl_resolve(sock, LUNATIK_FAMILY)) < 0)
return err;
, do
if ((lunatik_family = genl_ctrl_resolve(sock, LUNATIK_FAMILY)) < 0)
return lunatik_family;
Later, when you're setting up the Netlink Header, make sure to use it:
if (!genlmsg_put(..., ..., ..., lunatik_family, ..., ..., LIST_STATES, ...))
/* handle error */
And one more thing: nl_socket_modify_cb() also returns an error code. Instead of
nl_socket_modify_cb(sock, NL_CB_MSG_IN, NL_CB_CUSTOM, req_handler, NULL);
return 0;
do
return nl_socket_modify_cb(sock, NL_CB_MSG_IN, NL_CB_CUSTOM, req_handler, NULL);
I'm writing a program in C and it needs to look up the IP & MAC address of an interface. I'm using IOCTL calls. Until recently I was still using custom data structure to store theses addresses, and everything was working perfectly.
Then I moved to using standards struct like
struct in_addr
struct ether_addr.
On my OS (Archlinux), it is still working properly,so that's good. But usually I run my program into a virtualized environment (Slitaz linux) in VirtualBox.I do this so I can run a virtualized network with GNS3. Since these modifications, impossible to get the IOCTL call working properly in Slitaz. When I call
printf("%s\n",strerror(errno));
I just get
No Such Device
If it wasn't working properly on both architecture, I would search more in-depth, but here i'm completely lost, it works fine on Arch but not on Slitaz. it WORKED in Slitaz BEFORE these change, I can still use the older version ( 2 branches in git, one for the old version and one for the current).
Thank you for your help.
Nikko
Here are the relevant part of code (I show for MAC oinyl since IP problem is the same):
/*
* Return a ifreq structure for this interface
* */
struct ifreq
get_ifreq ( const char * interface )
{
struct ifreq ifr;
size_t if_len;
if_len = strlen(interface);
if (if_len >= sizeof(ifr.ifr_name)){
fprintf(stderr,"Interface name too long to open descriptor.\nAbort.");
exit(EXIT_FAILURE);
}
strncpy(ifr.ifr_name,interface,if_len);
return ifr;
}
int
get_mac_address(const char * interface, struct ether_addr * ether) {
int fd ;
struct ifreq ifr = get_ifreq(interface);
if((fd = get_socketudp()) == -1) {
fprintf(stderr,"Unable to get mac address.\n");
return -1;
};
if(ioctl(fd,SIOCGIFHWADDR,&ifr) == -1) {
fprintf(stderr,"%s\n",strerror(fd));
fprintf(stderr,"Error while operating IOCTL (MAC resolving).\n");
close(fd);
return -1;
}
close(fd);
memcpy(ether,&ifr.ifr_hwaddr.sa_data,ETH_ALEN);
return 0;
}
And in the main.c, where I call this function :
char * interface = NULL;
/*----------------------------------------------------------------------
* Our OWN mac address & ip address
*-----------------------------------------------------------------------*/
struct ether_addr mac;
struct in_addr ip;
int
main ( int argc, char *argv[] )
{
char * operation = NULL;
char * hostA = NULL;
char * hostB = NULL;
int c = 0;
if (argc < 2) {
usage();
exit(EXIT_FAILURE);
}
while((c = getopt(argc,argv,"m:i:a:b:f:l:")) != -1){
switch(c){
case 'm':
operation = optarg;
if (strncmp(operation,"mitm",4) != 0 &&
strncmp(operation,"flood",5) != 0) {
fprintf(stderr,"Operation %s is unknown.Abort\n",operation);
abort();
}
break;
case 'i':
interface = optarg;
break;
case '?':
fprintf(stderr,"Option %c requires an argument",optopt);
abort();
}
}
/* Check options consistency */
if(operation == NULL) {
fprintf(stderr,"No Operations given. Abort.\n");
exit(EXIT_FAILURE);
} else if (interface == NULL) {
fprintf(stderr,"No interface given. Abort.\n");
exit(EXIT_FAILURE);
}
/* Store our own mac address */
if (get_mac_address(interface,mac) == -1) {
fprintf(stderr,"Abort.\n");
exit(EXIT_FAILURE);
}
SOLUTION
Thanks to the anser I changed my get_ifreq method to :
struct ifreq
get_ifreq ( const char * interface )
{
struct ifreq ifr;
size_t if_len;
memset(ifr.ifr_name,0x00,IFNAMSIZ);
if_len = strlen(interface);
if (if_len >= IFNAMSIZ){
fprintf(stderr,"Interface name too long to open descriptor.\nAbort.");
exit(EXIT_FAILURE);
}
strncpy(ifr.ifr_name,interface,if_len);
return ifr;
}
It seems likely that there is additional garbage in the ifreq structure that you're not clearing out here:
struct ifreq ifr;
if_len = strlen(interface);
strncpy(ifr.ifr_name,interface,if_len);
You declare the struct ifreq on the stack but don't initialize it so the bytes in that structure are potentially random garbage. You then copy exactly if_len bytes into it, but what about the bytes immediately following that. Assuming if_len is less than IFNAMSIZ, how will the kernel know to stop at if_len in interpreting the interface name?
I would clear the structure prior to the strncpy.
I'm trying to debug a code that is using a libevent library. In that library, there is a function event_new that is suppose to create an event_cb. Somehow after I dispatch the event base, the event_cb cannot be called or accessed. This problem only happens on hpux itanium. This code works on hpux pa-risc, Redhat, AIX, and Solaris. Is there any certain thing that need to be set?
This is part of the code
int ttypread (int fd, Header *h, char **buf)
{
int c,k;
struct user_data user_data;
struct bufferevent *in_buffer;
struct event_config *evconfig;
log_debug("inside ttypread");
in_buffer = NULL;
user_data.fd = fd;
user_data.h = h;
user_data.buf = buf;
log_debug("from user_data, fd = %d",user_data.fd); //the log_debug is a debugging function for me to check the value sent by the system. I use it to compare between each platform
log_debug("from user_data, buf = %s",user_data.buf);
log_debug("from user_data, h.len = %d",user_data.h->len);
log_debug("from user_data, h.type = %d",user_data.h->type);
evconfig = event_config_new();
if (evconfig == NULL) {
log_error("event_config_new failed");
return -1;
}
if (event_config_require_features(evconfig, EV_FEATURE_FDS)!=0) {
log_error("event_config_require_features failed");
return -1;
}
base = event_base_new_with_config(evconfig);
if (!base) {
log_error("ttypread:event_base_new failed");
return -1;
}
const char* method; //these 3 lines are the new line edited
method = event_base_get_method(base);
log_debug("ttyread is using method = %s",method);
ev = event_new(base, fd, EV_READ|EV_PERSIST, ttypread_event_cb, &user_data);
c = event_add(ev, NULL);
log_debug("ttypread passed event_add with c value is %d",c);
in_buffer = bufferevent_socket_new(base, STDIN_FILENO, BEV_OPT_CLOSE_ON_FREE);
log_debug("ttypread passed bufferevent_socket_new");
if(in_buffer == NULL){
log_debug("problem with bufferevent_socket_new");
}
bufferevent_setcb(in_buffer, in_read_cb, NULL, in_event_cb, NULL);
bufferevent_disable(in_buffer, EV_WRITE);
bufferevent_enable(in_buffer, EV_READ);
k =event_base_dispatch(base);
log_debug("event_base have been dispatched"); //when looking at the debugging file, the other plaform will go to ttypread_event_cb function. But for hpux itanium, it stays here.
if (k == 0){
log_debug("event_base_dispatch returned 0");
} else if (k == -1){
log_debug("event_base_dispatch returned -1");
} else {
log_debug("event_base_dispatch returned 1");
}
event_base_free(base);
event_free(ev);
log_debug("finish ttypread");
log_debug("ttypread_ret will return [%d]",ttypread_ret);
return ttypread_ret;
}
void ttypread_event_cb(evutil_socket_t fd, short events, void *arg)
{
int nread;
struct timeval t;
struct user_data *user_data;
user_data = (struct user_data*)arg;
nread = 0;
log_debug("inside ttypread_event_cb");
if (events & EV_READ) {
log_debug("got events & EV_READ");
nread = ttyread(fd, user_data->h, user_data->buf);
if (nread == -1) {
ttypread_ret = -1;
event_del(ev);
event_base_loopexit(base, NULL);
} else if (nread == 0) {
if (access(input_filename, F_OK)!=0) {
log_debug("cannot access [%s]",input_filename);
tcsetattr(0, TCSANOW, &old); /* Return terminal state */
exit(EXIT_SUCCESS);
}
t.tv_sec = 0;
t.tv_usec = 250000;
select(0, 0, 0, 0, &t);
} else {
ttypread_ret = 1;
event_del(ev);
event_base_loopexit(base, NULL);
}
}
else if (events & EV_WRITE) {
log_debug("got events & EV_WRITE");
}
}
Not sure if this help. But just some info on the hpux itanium
uname -a = HP-UX hpux-ita B.11.23 U ia64
If you need any additional info or other declaration on function, just leave a comment and I will edit the question.
EDIT : i've added a function inside ttypread. Somehow for hpux itanium its returning devpoll while other platform are returning poll. Im not sure if this is the problem. But if that is so, is there any way for me to change it?
After checking the result from event_base_get_method, I found out that only on my hpux-itanium used devpoll method. This is how I solve it.
char string[8] = "devpoll";
struct user_data user_data;
struct bufferevent *in_buffer;
struct event_config *evconfig;
const char *method;
const char *devpoll;
devpoll = string;
in_buffer = NULL;
user_data.fd = fd;
user_data.h = h;
user_data.buf = buf;
evconfig = event_config_new();
if (evconfig == NULL) {
log_error("event_config_new failed");
return -1;
}
if (event_config_require_features(evconfig, EV_FEATURE_FDS)!=0) {
log_error("event_config_require_features failed");
return -1;
}
if (event_config_avoid_method(evconfig,devpoll) != 0)
{
log_error("Failed to ignore devpoll method");
}
Force the libevent to ignore using devpoll and use poll instead.
I'm trying to implement a web prefetching system. The purpose of a system like this is to “predict” future requests and prefetch them.
The system builds a predictive model from web navigation logs (Squid access.log files). The model is a dependency graph, where a node representing URL A has an arc to a node representing URL B if URL B has been requested immediately after URL A.
Once the model is built, the system receives queries of URLs requested by users, and make “predictions” based on the graph. Predictions are resources (URLs) very likely to be requested in the future. So, based on predictions, the system prefetches these resources to store them in cache prior to users' requests.
I'm using the following testing scenario:
A process simulate multiple clients, requesting URLs in a file using libcurl. The process runs in a different PC from the prefetching system. PCs are connected directly via an ethernet cable
Requests made by the client simulator are always the same URLs in the same relative time from the first request made. All requests are going to port 3128 (Prefetch PC Squid listen port) (port 80 DNAT to port 3128 in the client).
The prefetching system runs in a CentOS 6.3 box, kernel 2.6.32-71.el6.i686, 2 core Intel Pentium 4 3.00GHz processor, 4 GB RAM.
The prefetching system is one process with multiple threads. The main thread creates the predictive model and generates predictions based on queries. A “listener” thread reads URLs requested by users and prefetches predicted URLs using libcurl. “Listening” means reading from a named pipe (called url_fifo) URLs captured live on an interface using tshark:
stdbuf -o0 tshark -i eth1 tcp port 3128 and "tcp[((tcp[12:1] & 0xf0) >> 2):4] = 0x47455420" -T fields -e http.request.full_uri >> url_fifo
Each 10 minutes (1 cycle) the model is updated based on requests from the last cycle. The client tells the system when a cycle ends and so the model is updated. Once the model is updated, the system tells the client to start requesting URLs from the next cycle.
Here is the situation: Sometimes reading from the named pipe freezes. No URLs are read from the pipe even though tshark keeps capturing URLs and redirecting them to the named pipe. After an hour (or a couple of hours) all “buffered” URLs are read in less than 10 minutes. After that, reading from the pipe keeps going ok again. This situation doesn't happen always (50% of times freezes, 50% no).
It seems that there is a buffering issue, since tshark keeps capturing URLs and all requests are correctly logged in Squid's access.log.
In the beginning, I ran tshark with the -l option, so that its output becomes line buffered. Then I started using stdbuf -o0 (no buffering). Anyway the situation still happens.
In the system code, I also tried opening and reading the named pipe as a stream (FILE *) and set the stream as no buffered or line buffered (using setvbuf() function). The situation still happened.
In some cycles requests are faster than in other cycles. Anyway, it doesn't seems to be a fast producer slow consumer issue, since in many repetitions of the test all URLs are correctly read and processed without any freezes.
Is there something am I missing related to named pipes and buffering? I'd really appreciate some guidance.
Assume networking (interfaces, routing, iptables, squid) is ok. I've not had any issues related to it.
Code (assume necessary header files are included):
functions.c
#define BUFLEN 512
#define QUEUE_LEN 64
#define THREADS_LEN 2
pthread_mutex_t model_lock;
pthread_cond_t model_cond, listen_cond;
pthread_t queries_thread, listen_thread;
short int model_is_updating, model_can_update, program_shutdown;
/* Program execution statistics */
Status * program_status;
/* Thread pool */
threadpool_t *pool;
/* program execution */
int
run(void)
{
Graph_Adj_List * gr = NULL; /* Graph as an adjacency list */
char ** reports = NULL;
unsigned report_counter = 0;
/* Init program status */
program_status = status_init();
/* Load list of custom web navigation reports to be used to build the initial
* version of the predictive model */
reports = file_load_reports(program_config.reports_file);
if (!reports)
return 0;
/* Init mutex and cond */
pthread_mutex_init(&model_lock, NULL);
pthread_cond_init(&model_cond, NULL);
pthread_cond_init(&listen_cond, NULL);
/* Lock */
pthread_mutex_lock (&model_lock);
/* Start first cycle */
status_start_cycle(program_status);
/* Create initial version of the predictive model */
gr = create_model_from_files(reports, &report_counter, program_config.reports_limit);
if (!gr)
{
/* Unlock */
pthread_mutex_unlock (&model_lock);
return 0;
}
/* Unlock */
pthread_mutex_unlock (&model_lock);
/* Start threads */
if (pthread_create(&queries_thread, NULL, fifo_predictions_threaded, (void *)gr) ||
pthread_create(&listen_thread, NULL, listen_end_of_cycle, NULL))
program_shutdown = 1;
/* main loop */
while(!program_shutdown)
{
/* lock */
pthread_mutex_lock (&model_lock);
/* wait for clients' announcement of the end of requests from current cycle */
while (!model_can_update)
pthread_cond_wait(&model_cond, &model_lock);
/* set updating flag */
model_is_updating = 1;
/* Update predictive model, based on Squid's access.log from (about to finish)
* current cycle */
adj_list_update_access(gr, program_config.access_file);
/* Save statistics related to the current cycle and finish it */
status_finish_cycle(program_status);
/* Check if last custom report has been read */
if (!reports[report_counter])
{
program_shutdown = 1;
pthread_mutex_unlock (&model_lock);
break;
}
/* Start a new cycle */
status_start_cycle(program_status);
/* Read a new custom report and update the predictive model */
update_model(gr, reports[report_counter]);
report_counter++;
/* Updating is done */
model_is_updating = 0;
/* Model can't be updated until client announces the end of the cycle
* that has just started */
model_can_update = 0;
/* Tell client to start sending requests from the new cycle */
if (!signal_start_cycle())
{
program_shutdown = 1;
pthread_mutex_unlock (&model_lock);
break;
}
/* Signal listener thread that a new cycle has begin */
pthread_cond_signal(&listen_cond);
/* Unlock */
pthread_mutex_unlock (&model_lock);
}
/* Finish threads */
pthread_cancel(listen_thread);
pthread_cancel(queries_thread);
pthread_join(listen_thread, NULL);
pthread_join(queries_thread, NULL);
/* Free memory */
adj_list_free_all2(&gr);
file_reports_free_all(&reports);
pthread_cond_destroy(&model_cond);
pthread_cond_destroy(&listen_cond);
pthread_mutex_destroy(&model_lock);
status_free(&program_status);
return 1;
}
void *
fifo_predictions_threaded(void * data)
{
Graph_Adj_List * gr = (Graph_Adj_List *) data;
/* Set thread cancel type */
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
while (!program_shutdown)
{
pthread_mutex_lock(&model_lock);
/* Pause reading from named pipe while the model is being updated */
while(model_is_updating)
pthread_cond_wait(&listen_cond, &model_lock);
pthread_mutex_unlock(&model_lock);
/* Read URLs from named pipe */
fifo_predictions(gr, program_config.fifo);
}
pthread_exit(NULL);
return NULL;
}
int
fifo_predictions(Graph_Adj_List * gr, const u8 * fifo)
{
u8 cad[BUFLEN] = { '\0' };
u8 * ini = NULL, * fin = NULL, * fullurl = NULL;
int i, fifo_descriptor, read_urls = 0, fullurl_len = 0, incomplete_url = 1;
FILE * fifo_file = NULL;
/* Open fifo in blocking mode */
fifo_descriptor = open(CHAR_CAST fifo, O_RDONLY);
/* Open fifo as a stream */
// fifo_file = fopen(fifo, "r");
// if (!fifo_file)
if (fifo_descriptor == -1)
return 0;
/* If fifo is opened as a stream, set it line buffered */
// setlinebuf(fifo_file);
do
{
if ((i = read(fifo_descriptor, cad, BUFLEN - 1)) == -1)
// if ( fgets(cad, BUFLEN-1, fifo_file) == NULL)
ERROR(__FILE__, __FUNCTION__, __LINE__, "Fifo read error");
else
{
// i = strlen(cad);
cad[i] = '\0';
read_urls = 0;
if (i > 0)
{
int j = 0;
for (j = 0, ini = cad, fin = NULL ; cad[j] != '\0'; j++)
{
if (cad[j] == '\n')
{
/* Save URL */
fin = &cad[j];
ini = (*ini == '\n' ? ini + 1 : ini);
/* Check if string is a continuation of the previously read URL */
read_urls = fin - ini;
read_urls = read_urls >= 0 ? read_urls : 0;
/* Save URL in fullurl string */
fullurl = realloc(fullurl, fullurl_len + read_urls + 1);
memcpy(&fullurl[fullurl_len], ini, read_urls);
fullurl[fullurl_len + read_urls] = '\0';
ini = fin;
incomplete_url = fullurl_len = 0;
/* Ask the model for predictions and fetch them */
fetch_url_predictions2(gr, fullurl);
u8_free(&fullurl);
} else
incomplete_url = 1;
}
if (incomplete_url)
{
ini = (*ini == '\n' ? ini + 1 : ini);
read_urls = &cad[j] - ini;
read_urls = read_urls >= 0 ? read_urls : 0;
fullurl = realloc(fullurl, fullurl_len + read_urls);
memcpy(&fullurl[fullurl_len], ini, read_urls);
fullurl_len += read_urls;
}
}
}
} while (i > 0);
close(fifo_descriptor);
// fclose (fifo_file);
return 1;
}
int
fetch_url_predictions2(Graph_Adj_List * gr, u8 * in_url)
{
String * string_url = NULL;
Headnode * head = NULL;
LinkedList * list = NULL;
LinkedListElem * elem = NULL;
/* Use custom string type */
string_url = string_create_no_len(in_url);
if (!string_url)
return 0;
pthread_mutex_lock(&model_lock);
/* Get URL node */
head = adj_list_get_node(gr, string_url);
if (head)
{
/* Get predictions (URLs) as a linked list */
list = adj_list_predictions_to_list(head);
if (!list)
{
string_free_all(&string_url);
return 0;
}
pthread_mutex_unlock(&model_lock);
/* Callback fetches URLs */
list->callback = &curl_callback_void;
if (!pool)
pool = threadpool_create(THREADS_LEN, QUEUE_LEN, 0);
/* Load URLs to be fetched to threadpool's task queue */
for (elem = list->first; elem; elem = elem->next)
{
CallbackArg arg;
arg.data = arg.copy(elem->data);
threadpool_add_copy_arg(pool, list->callback, &arg, 1, sizeof(arg), 0);
}
linked_list_free_all(&list);
}
pthread_mutex_unlock(&model_lock);
string_free_all(&string_url);
return 1;
}
fetch.c
void
curl_callback_void(void * data)
{
CallbackArg * arg = (CallbackArg *) data;
char * url = (char *) arg->data;
fetch_url(url);
}
static size_t
write_data(void *buffer, size_t size, size_t nmemb, void *userp)
{
return size * nmemb;
}
int
fetch_url(char * url)
{
CURL *curl;
CURLcode res;
struct timeval time;
char * time_string = NULL;
curl = curl_easy_init();
if (curl)
{
curl_easy_setopt(curl, CURLOPT_URL, url);
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, &write_data);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, NULL);
curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1);
curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 1);
curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1);
curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT, 15);
curl_easy_setopt(curl, CURLOPT_TIMEOUT, 10);
/* Perform the request, res will get the return code */
res = curl_easy_perform(curl);
gettimeofday(&time, NULL);
time_string = timeval_to_str(&time);
/* Check for errors */
if (res != CURLE_OK)
{
fprintf(stderr, "\ntime %s curl_easy_perform() (url %s) failed: %s\n",
time_string, url, curl_easy_strerror(res));
}
else
{
fprintf(stderr, "\ntime %s curl_easy_perform() (url %s) fetched ok\n",
time_string, url);
}
fflush(stderr);
free (time_string);
curl_easy_cleanup(curl);
}
return 0;
}
network.c
/*
* Code based on Beej's Networking Guide
*/
#define MSG_LEN 5
#define QUEUE_SIZE 5
extern pthread_mutex_t model_lock;
extern pthread_cond_t model_cond;
extern short int model_can_update, program_shutdown;
extern Config program_config;
// get sockaddr, IPv4 or IPv6:
static void *
get_in_addr(struct sockaddr *sa) {
if (sa->sa_family == AF_INET) {
return &(((struct sockaddr_in*) sa)->sin_addr);
}
return &(((struct sockaddr_in6*) sa)->sin6_addr);
}
void *
listen_end_of_cycle(void * data)
{
int sockfd, new_fd; // listen on sock_fd, new connection on new_fd
struct addrinfo hints, *servinfo, *p;
struct sockaddr_storage their_addr; // connector's address information
socklen_t sin_size;
int yes = 1;
char s[INET_ADDRSTRLEN], msg[MSG_LEN], *str = NULL;
int rv;
int read_bytes;
struct timeval actual_time;
/* Set thread cancel type */
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL );
memset(&hints, 0, sizeof hints);
hints.ai_family = AF_INET;
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = AI_PASSIVE; // use my IP
if ((rv = getaddrinfo(NULL, program_config.listen_port, &hints, &servinfo))
!= 0) {
fprintf(stderr, "getaddrinfo: %s\n", gai_strerror(rv));
return "error";
}
// loop through all the results and bind to the first we can
for (p = servinfo; p != NULL ; p = p->ai_next) {
if ((sockfd = socket(p->ai_family, p->ai_socktype, p->ai_protocol))
== -1) {
perror("server: socket");
continue;
}
if (setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(int))
== -1) {
perror("setsockopt");
return "error";
}
if (bind(sockfd, p->ai_addr, p->ai_addrlen) == -1) {
close(sockfd);
perror("server: bind");
continue;
}
break;
}
if (p == NULL ) {
fprintf(stderr, "server: failed to bind\n");
return "error";
}
freeaddrinfo(servinfo); // all done with this structure
if (listen(sockfd, QUEUE_SIZE) == -1) {
perror("listen");
return "error";
}
while (!program_shutdown)
{
sin_size = sizeof their_addr;
new_fd = accept(sockfd, (struct sockaddr *) &their_addr, &sin_size);
if (new_fd == -1) {
perror("accept");
continue;
}
inet_ntop(their_addr.ss_family,
get_in_addr((struct sockaddr *) &their_addr), s, sizeof s);
if ((read_bytes = recv(new_fd, msg, MSG_LEN - 1, 0)) == -1) {
perror("recv");
continue;
}
close(new_fd);
msg[read_bytes] = '\0';
/* Check received message */
if (strcmp(msg, "DONE")) {
perror("Not valid message");
continue;
}
printf("\ngot \"DONE\" from %s\n", s);
fflush(stdout);
/* Lock */
pthread_mutex_lock(&model_lock);
/* Flag used by main thread to allow model update */
model_can_update = 1;
/* Signal model can be updated */
pthread_cond_signal(&model_cond);
/* Unlock */
pthread_mutex_unlock(&model_lock);
}
close(sockfd);
pthread_exit(NULL);
return "ok";
}
int signal_start_cycle(void) {
int sockfd;
struct addrinfo hints, *servinfo, *p;
int rv;
char s[INET6_ADDRSTRLEN], *str = NULL;
struct timeval actual_time, aux_time;
struct timeval connect_timeout = { 15, 0 }, max_connect_time = { 0, 0 };
short int connected = 0;
memset(&hints, 0, sizeof hints);
hints.ai_family = AF_INET;
hints.ai_socktype = SOCK_STREAM;
if ((rv = getaddrinfo(program_config.client_ip, program_config.client_port,
&hints, &servinfo)) != 0) {
fprintf(stderr, "getaddrinfo: %s\n", gai_strerror(rv));
return 0;
}
gettimeofday(&aux_time, NULL);
timeval_add(aux_time, connect_timeout, &max_connect_time);
/* Try several times to connect to the remote side */
do {
// loop through all the results and connect to the first we can
for (p = servinfo; p != NULL ; p = p->ai_next) {
if ((sockfd = socket(p->ai_family, p->ai_socktype, p->ai_protocol))
== -1) {
perror("client: socket");
continue;
}
gettimeofday(&actual_time, NULL )
printf("\ntrying to connect %s\n", program_config.client_ip);
fflush(stdout);
if (connect(sockfd, p->ai_addr, p->ai_addrlen) == -1) {
close(sockfd);
perror("client: connect");
continue;
}
connected = 1;
break;
}
} while (!connected && !timeval_greater_than(actual_time, max_connect_time));
if (p == NULL ) {
fprintf(stderr, "client: failed to connect\n");
return 0;
}
inet_ntop(p->ai_family, get_in_addr((struct sockaddr *) p->ai_addr), s,
sizeof s);
printf("\nMAIN THREAD: connecting to %s\n", s);
fflush(stdout);
freeaddrinfo(servinfo); // all done with this structure
if (send(sockfd, "DONE", 4, 0) == -1)
{
perror("send");
return 0;
}
printf("\nsent \"DONE\" to %s\n", s);
fflush(stdout);
close(sockfd);
return 1;
}