Serving large files (>2GB) with libevent on 32-bit system - c

Preamble: lightweight http server written in C based on libevent v2 (evhttp), Linux, ARM, glibc2.3.4
I'm trying to serve big files (over 2GB) using evbuffer_add_file() on 32 bit system.
The libevent was compiled with -D_FILE_OFFSET_BITS=64 flag. Here is the simplified code:
int fd = -1;
if ((fd = open(path, O_RDONLY)) < 0) {
// error handling
}
struct stat st;
if (fstat(fd, &st) < 0) {
// error handling
}
struct evbuffer *buffer = evbuffer_new();
evbuffer_set_flags(buffer, EVBUFFER_FLAG_DRAINS_TO_FD); // force using system's sendfile
evbuffer_add_file(buffer, fd, 0, st.st_size);
evhttp_send_reply(req, 200, NULL, buffer);
evbuffer_free(buffer);
st.st_size has correct value, in this case 4913809524, but response header Content-Length has value of 618842228. Even if i set Content-Length header to appropriate value the file transfer stops at 618842228 ...
Do i miss or do something wrong? Is it possible at all?
Thanks in advance

As i said in comment obviously the problem is not in libevent, but in system's sendfile implementation. So with a little workaround i found the way to solve this problem using
evhttp_send_reply_(start|chunk|end) functions family:
struct chunk_req_state {
struct evhttp_request *req;
int fd;
long chunksize;
off_t filesize;
off_t offset;
};
static void
chunked_trickle_cb(evutil_socket_t fd, short events, void *arg)
{
struct evbuffer *evb = evbuffer_new();
struct chunk_req_state *state = arg;
struct timeval when = { 0, 0 };
ev_ssize_t read;
if (lseek(state->fd, state->offset, SEEK_SET) == -1) {
evbuffer_free(evb);
close(state->fd);
free(state);
return;
}
read = evbuffer_read(evb, state->fd, (ev_ssize_t) state->chunksize);
if (read == -1) {
evbuffer_free(evb);
evhttp_send_reply_end(state->req);
close(state->fd);
free(state);
return;
}
evhttp_send_reply_chunk(state->req, evb);
evbuffer_free(evb);
state->offset += read;
if (state->offset < state->filesize) {
// there's more data to send
event_base_once(ebase, -1, EV_TIMEOUT, chunked_trickle_cb, state, &when);
} else {
// reached the end
evhttp_send_reply_end(state->req);
close(state->fd);
free(state);
}
}
int fd = -1;
if ((fd = open(path, O_RDONLY)) < 0) {
// error handling
}
struct stat st;
if (fstat(fd, &st) < 0) {
// error handling
}
struct timeval when = { 0, 0 };
struct chunk_req_state *state = malloc(sizeof(struct chunk_req_state));
memset(state, 0, sizeof(struct chunk_req_state));
state->req = req;
state->fd = fd;
state->chunksize = 10*1024*1024;
state->filesize = st.st_size;
state->offset = 0;
// set Content-Length to prevent chunked transfer
char *length = NULL;
spprintf(&length, 0, "%lld", st.st_size);
evhttp_add_header(evhttp_request_get_output_headers(request->req), "Content-Length", length);
free(length);
evhttp_send_reply_start(request->req, 200, NULL);
event_base_once(ebase, -1, EV_TIMEOUT, chunked_trickle_cb, state, &when);

Related

Why isn't the Kernel receveing my generic netlink messages?

I'm trying to send nested attributes from user space to kernel using generic netlink, the function nl_send_auto() returns 52 (which was supposed to be the numbers of bytes sent to kernel) but the kernel isn't receiving the messages. Is there some problem with my approach? Here is the code that I wrote on user space:
int err = -1;
struct nl_msg *msg;
struct nlattr *attr;
struct nl_sock *sock;
int family;
int send = 0;
if ((sock = nl_socket_alloc()) == NULL)
return err;
if ((err = genl_connect(sock)))
return err;
if ((family = genl_ctrl_resolve(sock, FAMILY)) < 0)
return family;
if ((msg = nlmsg_alloc()) == NULL)
return err;
if ((genlmsg_put(msg, NL_AUTO_PORT, NL_AUTO_SEQ, FAMILY, 0,
NLM_F_REQUEST, CREATE_STATE, 1)) == NULL)
return err;
if (!(attr = nla_nest_start(msg, KLUA_NL_STATE))){
nla_nest_cancel(msg, attr);
return err;
}
if ((ret = nla_put_string(msg, STATE_NAME, cmd->name)) ||
(ret = nla_put_u32(msg, MAX_ALLOC, cmd->maxalloc)) ||
(ret = nla_put_u32(msg, CURR_ALLOC, cmd->curralloc))
)
return err;
nla_nest_end(msg, attr);
if ((send = nl_send_auto(ctrl->sock, msg)) < 0)
return send;
printf("All done sended %d bytes\n", send);
nlmsg_free(msg);
This code prints 52, which is the bytes sent to kernel;
The FAMILY macro is defined as (both in kernel and user space):
#define FAMILY "family"
My netlink attributes are (both for kernel and user space):
enum {
KLUA_NL_STATE,
STATE_NAME,
MAX_ALLOC,
CURR_ALLOC,
ATTR_COUNT,
#define ATTR_MAX (ATTR_COUNT - 1)
};
My enum for operation is:
enum {
CREATE_STATE = 16,
};
And my kernel code is:
struct nla_policy lunatik_policy[ATTR_MAX] = {
[KLUA_NL_STATE] = { .type = NLA_NESTED },
};
static int klua_create_state(struct sk_buff *buff, struct genl_info *info);
static const struct genl_ops l_ops[] = {
{
.cmd = CREATE_STATE,
.doit = klua_create_state,
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,2,0)
/*Before kernel 5.2.0, each operation has its own policy*/
.policy = lunatik_policy
#endif
},
};
#define KLUA_NL_STATE_ATTRS_COUNT 3
struct genl_family lunatik_family = {
.name = FAMILY,
.version = 1,
.maxattr = ATTR_MAX,
.netnsok = true,
.policy = lunatik_policy,
.module = THIS_MODULE,
.ops = l_ops,
.n_ops = ARRAY_SIZE(l_ops),
};
static int klua_create_state(struct sk_buff *buff, struct genl_info *info)
{
pr_info("I received the message\n");
return 0;
}
This code doesn't print anything on dmesg, and I would like to know why.
You actual problems
During Linux 5.2 refactors, the semantics of the NLA_F_NESTED flag changed somewhat. It appears you now need to always include it when you call nla_nest_start():
if (!(attr = nla_nest_start(msg, KLUA_NL_STATE))){
...
}
Should be
if (!(attr = nla_nest_start(msg, NLA_F_NESTED | KLUA_NL_STATE))){
...
}
Yes, I'm well aware the libnl library should obviously do this for you, and will perhaps do so in the future, but unfortunately this is where we are now.
Also:
enum {
KLUA_NL_STATE,
...
};
Attribute zero is always reserved. You need to change that into
enum {
KLUA_NL_STATE = 1,
...
};
Just FYI: operation zero is also reserved, so it's fortunate that you chose 16. But do keep it in mind in the future.
Syntactic issues
These are probably just copy-paste errors, but I'm including them anyway for the benefit on other people landing in this page looking for examples.
if ((genlmsg_put(msg, NL_AUTO_PORT, NL_AUTO_SEQ, FAMILY, 0,
NLM_F_REQUEST, CREATE_STATE, 1)) == NULL)
return err;
Should be
if ((genlmsg_put(msg, NL_AUTO_PORT, NL_AUTO_SEQ, family, 0,
NLM_F_REQUEST, CREATE_STATE, 1)) == NULL)
return err;
Also:
if ((ret = nla_put_string(msg, STATE_NAME, cmd->name)) ||
(ret = nla_put_u32(msg, MAX_ALLOC, cmd->maxalloc)) ||
(ret = nla_put_u32(msg, CURR_ALLOC, cmd->curralloc))
)
return err;
Should be
if ((err = nla_put_string(msg, STATE_NAME, cmd->name)) ||
(err = nla_put_u32(msg, MAX_ALLOC, cmd->maxalloc)) ||
(err = nla_put_u32(msg, CURR_ALLOC, cmd->curralloc))
)
return err;
Also:
if ((send = nl_send_auto(ctrl->sock, msg)) < 0)
return send;
Should be
if ((send = nl_send_auto(sock, msg)) < 0)
return send;

C: Remaining bytes on AF_UNIX socket

I got some problems with AF_UNIX socket communication because after writing a data buffer there seem to remain some hanging bytes to read that I do not know where they come from.
I am writing a multithreaded server program in C that communicates with clients through AF_UNIX sockets, it must implement a simple chatroom. Among other things, the server must implement file transfers between clients and servers and i encountered problems when i try to send a quite large file (269K) from Server to client. (With smaller files i do not have any problems)
For file transfer i use mmap() function which return a pointer to the map of the file I want to send, then i use write() for write that data on socket linked with the client that must recieve the file.
After write() call i check the returned value to be equal than the file size. (always verified)
The client, after receiving the file, check the size of read data (always verified) and start waiting for other messages so it call a blocking read(). This is the point where I found the error because the client reads something that should not be there, as if there was something left to read on the socket.
I've been debugging this part (both server and client) for two days and I have not yet been able to understand the origin of the problem.
I am sure that no other thread write on the same socket at the same time
Does any of you have an idea of what the cause of this error is?
I try to post some useful code thinking at a normal operation sequence:
First of all message structure:
struct message_hdr
{
op_t op;
char sender[MAX_NAME_LENGTH+1];
};
struct message_data_hdr{
char receiver[MAX_NAME_LENGTH+1];
unsigned int len;
};
struct message_data
{
message_data_hdr_t hdr;
char *buf;
};
struct message
{
message_hdr_t hdr;
message_data_t data;
};
A server->client file transfer starts with server that send a message_hdr_t to a client which is waiting on a read() (the client expects to receive only a message_hdr_t).
int sendHeader(long fd, message_hdr_t* hdr)
{
if(hdr == NULL || fd < 0) {errno = EINVAL; return -1;}
int test;
struct iovec iov;
iov.iov_base = hdr;
iov.iov_len = sizeof(message_hdr_t);
test = writev(fd, &iov, 1);
return test;
}
The client understands from the operation code (message.hdr.op) that it is a file type message and it begins to wait for file,
So server send it:
int sendData(long fd, message_data_t *msg)
{
if(msg == NULL || fd < 0) {errno = EINVAL; return -1;}
int test;
struct iovec iov;
iov.iov_base = &(msg->hdr);
iov.iov_len = sizeof(message_data_hdr_t);
test = writev(fd, &(iov), 1);
if(test == -1){return -1;}
if (msg->hdr.len != 0)
{
test = write(fd, msg->buf, msg->hdr.len);
if(test <= 0)
return -1;
}
return test;
}
And client read it:
int readData(long fd, message_data_t *data)
{
if(data == NULL || fd < 0) {errno = EINVAL; return -1;}
int test;
struct iovec iov;
iov.iov_base = &(data->hdr);
iov.iov_len = sizeof(message_data_hdr_t);
test = readv(fd, &iov, 1);
if(test <= 0){return -1;}
if(data->hdr.len != 0)
{
data->buf = malloc(data->hdr.len);
if(data->buf == NULL){return -1;}
test = read(fd, data->buf, data->hdr.len);
if((unsigned int)test != data->hdr.len)
return -1;
}
return test;
}
At this point the client recived file, and it restart waiting for new messages:
int readMsg(long fd, message_t *msg)
{
if(msg == NULL || fd < 0) {errno = EINVAL; return -1;}
int test;
test = readHeader(fd, &(msg->hdr));
if(test == -1 || test == 0){return -1;}
test += readData(fd, &(msg->data));
return test;
}
This is the point where the client should simply wait because there is no income messages, insted in this case it read something that I do not know where it comes from.
When i try to print this unwanted message with GDB it prints:
{hdr = {op = 512,
sender = "\000\000\020G\032\324\t\000\000\n\000\000\000\000\030\021B\bC\n\000\000\v\000\000\000\000\021D\v\222\000"},
data = {hdr = {receiver = "\000\000\000\000\021E\022C\n\000\000\b\v\000\000\000\000\021F\020I\n\000\000\020\000\006\b\002\n\000\000\006",
len = 131072},
buf = 0x7ffff7f2f010 ""}`
Of course this is meaningless.
I hope that this description will be useful
Thank you all in advance.
Ok, I solved my issue.
As written in the comment, this problem was due to the lack of a check on partial writing.
Now the function readData() looks like this:
int readData(long fd, message_data_t *data)
{
if(data == NULL || fd < 0) {errno = EINVAL; return -1;}
int test;
char* ph;
unsigned int rd = 0;
struct iovec iov;
iov.iov_base = &(data->hdr);
iov.iov_len = sizeof(message_data_hdr_t);
test = readv(fd, &iov, 1);
if(test <= 0){return -1;}
if(data->hdr.len != 0)
{
data->buf = malloc(data->hdr.len);
if(data->buf == NULL){return -1;}
ph = data->buf;
while (rd < data->hdr.len)
{
test = read(fd, ph, data->hdr.len - rd);
if(test == -1)
return -1;
else if(test == 0)
{
errno = ENOENT;
return -1;
}
rd += test;
ph += test;
}
}
return rd;
}
and sendData():
int sendData(long fd, message_data_t *msg)
{
if(msg == NULL || fd < 0) {errno = EINVAL; return -1;}
int test;
char* ph;
unsigned int wr = 0;
struct iovec iov;
iov.iov_base = &(msg->hdr);
iov.iov_len = sizeof(message_data_hdr_t);
test = writev(fd, &(iov), 1);
if(test == -1){return -1;}
if(msg->hdr.len != 0)
{
ph = msg->buf;
while (wr < msg->hdr.len)
{
test = write(fd, ph, msg->hdr.len - wr);
if(test == -1)
return -1;
else if(test == 0)
{
errno = ENOENT;
return -1;
}
wr += test;
ph += test;
}
}
return test;
}
In this way I no longer found the error.
Thanks for the help!

Read() stuck on trying to read file

I have a checkpoint file that receives a server state. This states represents serialized commands that pass trough my network.
I'm trying to read the file but the it gets stuck on the read while loop.
My read function:
struct message_t *pmanager_readop(int fd){
if (fd < 0) return NULL;
// Variables
char *buffer = NULL;
int result, msg_size;
struct message_t *msg;
// Check if file has data
lseek (fd, 0, SEEK_END);
int size_ckp = lseek(fd, 0, SEEK_CUR);
if (size_ckp <= 0)
return NULL;
// Read message size
result = read_all(fd, (char *) &msg_size, 4);
if (result < 0) {
return NULL;
}
msg_size = ntohl(msg_size);
// ............
My read_all() function:
int read_all(int sock, char *buf, int len){
int bufsize = len;
while(len > 0){
int res = read(sock,buf,len);
if(res < 0){
if (errno == EINTR) continue;
return res;
}
buf += res;
len -= res;
}
return bufsize;
}
I use this same function to read data from my server/client connection with the same serialization and format but with a socket descriptor, and it works perfectly.
You ought to handle the case that read() returns 0, telling you that the other side shut-down the connection if reading from a socket descriptor, or EOF encountered if reading from a file descriptor.

Slow network connection speed with berkeley sockets

My friend and I have wrote a small download manager in C that splits the target file into several parts and downloads each part using a single posix thread. Everything seems to work fine, except that it is very slow compared to other download managers like wget (which as I know, does not split the file into several chunks). In every thread, we use a simple loop to download each part from a socket:
while ((nrecv = recv(sockfd, downbuf, sizeof(downbuf), 0)) > 0)
{
if ((nwrite = write(fd, downbuf, nrecv)) != nrecv)
die("write");
totalrw += nwrite;
}
/* ... */
I've tried with several different sizes for "downbuf", like 2014, 2048, 4096 and 8192, but with not much difference. It takes almost 45 seconds to download a 270 MB file, while wget downloads the same file in just 5 seconds. Both server and client are on the same host. Why is the difference so vast? Could you please tell me what trick wget uses?
This is how I make the request to the server:
sockfd = make_conn(website);
hdr.rq_buf = headerbuf; /* buffer to save response header */
hdr.rq_bufsize = sizeof(headerbuf);
hdr.rq_host = website;
hdr.rq_fpath = filepath; /* target file */
hdr.rq_flags = S_HEADFLAG; /* use head method at this moment
to get the total file size */
error = headerinit(hdr);
if (error)
{
die("headerinit()");
}
send(sockfd, headerbuf, strlen(headerbuf), 0); /* send the initial request */
recv(sockfd, respbuf, sizeof(respbuf), 0);
if (-1 == response_proc(respbuf, strlen(respbuf), &resp))
{
myperror("response_proc()");
exit(EXIT_FAILURE);
} /* process the header */
size_t sz = (size_t)strtol(resp.rs_content_length, NULL, 10);
divide(sz, chunks, numcons); /* divide the file into several parts */
for (int i = 0; i < numcons; i++)
{
/* populate data needed for threads */
args[i].t_hdr.rq_offset.c_start = chunks[i].c_start; /* where to start */
args[i].t_hdr.rq_offset.c_end = chunks[i].c_end; /* download up to this point */
args[i].t_hdr.rq_host = strdup(website);
args[i].t_hdr.rq_fpath = strdup(filepath);
snprintf(args[i].t_fname, BUFSIZ, "%sp%i", outfile, i);
args[i].t_order = i;
}
for (i = 0; i < numcons; i++)
{
if (0 != pthread_create(&threads[i], NULL, thread_main,
&args[i]))
{
die("pthread_create()");
}
}
for (i = 0; i < numcons; i++)
{
if (0 != pthread_join(threads[i], &thread_status))
{
die("pthread_join()");
}
}
http_request_header_t is defined as:
typedef struct {
void *rq_buf;
size_t rq_bufsize;
char *rq_host;
char *rq_fpath;
chunk_t rq_offset;
int rq_flags;
} http_request_header_t;
and http_response_header_t is defined as:
typedef struct {
#ifdef WITH_EXTRA_HEADERS
char *rs_version;
#endif
char *rs_status;
char *rs_date;
char *rs_server;
char *rs_last_modified;
char *rs_accept_ranges;
char *rs_content_length;
char *rs_connection;
char *rs_content_type;
} http_response_header_t;
This is the main routine that every thread use:
void *
thread_main(void *arg_orig)
{
thr_arg_t *arg = (thr_arg_t*)arg_orig;
int fd, sockfd;
http_response_header_t resp;
size_t totalrw = 0;
ssize_t nrecv;
char *line = malloc(BUFSIZ * sizeof(char));
char hdrbuf[BUFSIZ];
char respbuf[BUFSIZ];
mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP;
ssize_t nwrite = 0;
void *downbuf = malloc(DOWNBUF * sizeof(char));
sockfd = make_conn(arg->t_hdr.rq_host);
fd = open(arg->t_fname, O_WRONLY | O_CREAT | O_TRUNC | O_EXCL, mode);
if (-1 == fd)
{
die("thread_open(): fd");
}
arg->t_hdr.rq_flags = S_OFFSET;
arg->t_hdr.rq_buf = hdrbuf;
arg->t_hdr.rq_bufsize = sizeof(hdrbuf);
headerinit(arg->t_hdr);
//printf("%s\n", arg->t_hdr.rq_buf);
sendn(sockfd, hdrbuf, strlen(hdrbuf), 0);
/* first, read the header */
while ((nrecv = readheader(sockfd, &line, BUFSIZ)) > 0)
{
strncpy(respbuf + nwrite, line, sizeof(respbuf) - nwrite);
nwrite += nrecv;
}
nwrite = 0;
//printf("\n\n%s\n\n", respbuf);
if (-1 == response_proc(respbuf, strlen(respbuf), &resp))
{
myperror("thread_response_proc()");
exit(EXIT_FAILURE);
}
if (strncmp(resp.rs_status, "416", 3) == 0)
{
fprintf(stderr, "Partial content is not supported by the server\n");
exit(EXIT_FAILURE);
}
/* now read the actual data */
while ((nrecv = recv(sockfd, downbuf, sizeof(downbuf), 0)) > 0)
{
if ((nwrite = write(fd, downbuf, nrecv)) != nrecv)
die("write");
totalrw += nwrite;
}
if(-1 == nrecv)
{
die("recv()");
}
close(sockfd);
close(fd);
idxwr(arg->t_fname, arg->t_order, totalrw);
return ((void*)0);
}
You haven't posted enough here, but usually the cause of an unexpected slowdown in TCP will be Nagle's algorithm. This is triggered when you write small chunks of data to a socket. These would be inefficient to put on the wire on their own so the TCP stack waits for the user program to add more data before it sends out the packet. Only if nothing is added for 'a while' does it actually send an incomplete packet.
This can be disabled, but as your aim is an efficient bulk transfer you probably shouldn't.

event_new() function fails on hpux itanium

I'm trying to debug a code that is using a libevent library. In that library, there is a function event_new that is suppose to create an event_cb. Somehow after I dispatch the event base, the event_cb cannot be called or accessed. This problem only happens on hpux itanium. This code works on hpux pa-risc, Redhat, AIX, and Solaris. Is there any certain thing that need to be set?
This is part of the code
int ttypread (int fd, Header *h, char **buf)
{
int c,k;
struct user_data user_data;
struct bufferevent *in_buffer;
struct event_config *evconfig;
log_debug("inside ttypread");
in_buffer = NULL;
user_data.fd = fd;
user_data.h = h;
user_data.buf = buf;
log_debug("from user_data, fd = %d",user_data.fd); //the log_debug is a debugging function for me to check the value sent by the system. I use it to compare between each platform
log_debug("from user_data, buf = %s",user_data.buf);
log_debug("from user_data, h.len = %d",user_data.h->len);
log_debug("from user_data, h.type = %d",user_data.h->type);
evconfig = event_config_new();
if (evconfig == NULL) {
log_error("event_config_new failed");
return -1;
}
if (event_config_require_features(evconfig, EV_FEATURE_FDS)!=0) {
log_error("event_config_require_features failed");
return -1;
}
base = event_base_new_with_config(evconfig);
if (!base) {
log_error("ttypread:event_base_new failed");
return -1;
}
const char* method; //these 3 lines are the new line edited
method = event_base_get_method(base);
log_debug("ttyread is using method = %s",method);
ev = event_new(base, fd, EV_READ|EV_PERSIST, ttypread_event_cb, &user_data);
c = event_add(ev, NULL);
log_debug("ttypread passed event_add with c value is %d",c);
in_buffer = bufferevent_socket_new(base, STDIN_FILENO, BEV_OPT_CLOSE_ON_FREE);
log_debug("ttypread passed bufferevent_socket_new");
if(in_buffer == NULL){
log_debug("problem with bufferevent_socket_new");
}
bufferevent_setcb(in_buffer, in_read_cb, NULL, in_event_cb, NULL);
bufferevent_disable(in_buffer, EV_WRITE);
bufferevent_enable(in_buffer, EV_READ);
k =event_base_dispatch(base);
log_debug("event_base have been dispatched"); //when looking at the debugging file, the other plaform will go to ttypread_event_cb function. But for hpux itanium, it stays here.
if (k == 0){
log_debug("event_base_dispatch returned 0");
} else if (k == -1){
log_debug("event_base_dispatch returned -1");
} else {
log_debug("event_base_dispatch returned 1");
}
event_base_free(base);
event_free(ev);
log_debug("finish ttypread");
log_debug("ttypread_ret will return [%d]",ttypread_ret);
return ttypread_ret;
}
void ttypread_event_cb(evutil_socket_t fd, short events, void *arg)
{
int nread;
struct timeval t;
struct user_data *user_data;
user_data = (struct user_data*)arg;
nread = 0;
log_debug("inside ttypread_event_cb");
if (events & EV_READ) {
log_debug("got events & EV_READ");
nread = ttyread(fd, user_data->h, user_data->buf);
if (nread == -1) {
ttypread_ret = -1;
event_del(ev);
event_base_loopexit(base, NULL);
} else if (nread == 0) {
if (access(input_filename, F_OK)!=0) {
log_debug("cannot access [%s]",input_filename);
tcsetattr(0, TCSANOW, &old); /* Return terminal state */
exit(EXIT_SUCCESS);
}
t.tv_sec = 0;
t.tv_usec = 250000;
select(0, 0, 0, 0, &t);
} else {
ttypread_ret = 1;
event_del(ev);
event_base_loopexit(base, NULL);
}
}
else if (events & EV_WRITE) {
log_debug("got events & EV_WRITE");
}
}
Not sure if this help. But just some info on the hpux itanium
uname -a = HP-UX hpux-ita B.11.23 U ia64
If you need any additional info or other declaration on function, just leave a comment and I will edit the question.
EDIT : i've added a function inside ttypread. Somehow for hpux itanium its returning devpoll while other platform are returning poll. Im not sure if this is the problem. But if that is so, is there any way for me to change it?
After checking the result from event_base_get_method, I found out that only on my hpux-itanium used devpoll method. This is how I solve it.
char string[8] = "devpoll";
struct user_data user_data;
struct bufferevent *in_buffer;
struct event_config *evconfig;
const char *method;
const char *devpoll;
devpoll = string;
in_buffer = NULL;
user_data.fd = fd;
user_data.h = h;
user_data.buf = buf;
evconfig = event_config_new();
if (evconfig == NULL) {
log_error("event_config_new failed");
return -1;
}
if (event_config_require_features(evconfig, EV_FEATURE_FDS)!=0) {
log_error("event_config_require_features failed");
return -1;
}
if (event_config_avoid_method(evconfig,devpoll) != 0)
{
log_error("Failed to ignore devpoll method");
}
Force the libevent to ignore using devpoll and use poll instead.

Resources