Unable to establish connection using OpenSSL BIO interface - c

I'm debugging in VS2010. BIO_do_connect() fails in the following code. What am I doing wrong?
(pBio is properly set up before use)
static const uint32_t kuSleepIntervalInMs = 50;
...
uint32_t uTimeTaken = 0;
...
BIO_set_nbio(pBio, 1);
for (;;)
{
if (uTimeTaken > 10000)
return ERR_CONNECTION_TIMED_OUT;
if (BIO_do_connect(pBio) > 0)
break;
if (BIO_should_retry(pBio))
{
Sleep(kuSleepIntervalInMs);
uTimeTaken += kuSleepIntervalInMs;
continue;
}
BIO_free_all(pBio);
return ERR_FAILED_TO_ESTABLISH_CONNECTION;
}
It appears that if I increase the sleep interval (for example to 500), BIO_do_connect works fine but I'd like to know why it fails with shorter interval values.

Since posting my original question, I've switched to use select() so the problem is no longer valid.
Instead of doing
uTimeTaken += kuSleepIntervalInMs;
I'm now doing:
int nRet;
int fdSocket;
fd_set connectionfds;
struct timeval timeout;
BIO_set_nbio(pBio, 1);
nRet = BIO_do_connect(pBio);
if ((nRet <= 0) && !BIO_should_retry(pBio))
// failed to establish connection.
if (BIO_get_fd(pBio, &fdSocket) <= 0)
// failed to get fd.
if (nRet <= 0)
{
FD_ZERO(&connectionfds);
FD_SET(fdSocket, &connectionfds);
timeout.tv_usec = 0;
timeout.tv_sec = 10;
nRet = select(fdSocket + 1, NULL, &connectionfds, NULL, &timeout);
if (nRet == 0)
// timeout has occurred.
}
See my other post.

Related

Understand V4L2 request buffer number

I'm newbie in using V4L2 on embedded device and I have two questions related to V4L2 library usage:
First is the buffer allocation for v4l2_requestbuffers:
struct v4l2_requestbuffers req;
CLEAR(req);
req.count = 4;
req.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
req.memory = V4L2_MEMORY_MMAP;
and later it is used by:
if (req.count < 2) {
fprintf(stderr, "Insufficient buffer memory on %s\n",
dev_name);
exit(EXIT_FAILURE);
}
buffers = calloc(req.count, sizeof(*buffers));
if (!buffers) {
fprintf(stderr, "Out of memory\n");
exit(EXIT_FAILURE);
}
for (n_buffers = 0; n_buffers < req.count; ++n_buffers) {
struct v4l2_buffer buf;
CLEAR(buf);
buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
buf.memory = V4L2_MEMORY_MMAP;
buf.index = n_buffers;
if (-1 == xioctl(fd, VIDIOC_QUERYBUF, &buf))
errno_exit("VIDIOC_QUERYBUF");
buffers[n_buffers].length = buf.length;
buffers[n_buffers].start =
mmap(NULL /* start anywhere */,
buf.length,
PROT_READ | PROT_WRITE /* required */,
MAP_SHARED /* recommended */,
fd, buf.m.offset);
if (MAP_FAILED == buffers[n_buffers].start)
errno_exit("mmap");
}
For capturing still image, req.count is 1. For capturing video, do we need to tune the req.count number to a number that we get good fps? That's mean we have to change the req.count from 2 (in this example) to a suitable number to get acceptable fps?
Second question is the timeout setting, as used in the mainloop:
static void mainloop(void)
{
unsigned int count;
count = frame_count;
while (count-- > 0) {
for (;;) {
fd_set fds;
struct timeval tv;
int r;
FD_ZERO(&fds);
FD_SET(fd, &fds);
/* Timeout. */
tv.tv_sec = 2;
tv.tv_usec = 0;
r = select(fd + 1, &fds, NULL, NULL, &tv);
if (-1 == r) {
if (EINTR == errno)
continue;
errno_exit("select");
}
if (0 == r) {
fprintf(stderr, "select timeout\n");
exit(EXIT_FAILURE);
}
if (read_frame())
break;
/* EAGAIN - continue select loop. */
}
}
}
If I set
/* Timeout. */
tv.tv_sec = 0;
tv.tv_usec = 0;
r = select(fd + 1, &fds, NULL, NULL, &tv);
My CPU load is 100%, I have to put it is 1 or 2 to make decrease the CPU load. If I remove timeout select() function, my CPU load is 100% again.
From my reading, the select() operation cost more CPU cycles but how can we remove select() and timeout setting in the mainloop()? Because when I look into other V4L2 capturing code from senior software engineer, I don't see they use timeout function as in the example above.
So my second question is how to avoid using timeout select() but still keep low CPU load .

IOCTL SIOCGIWSCAN error E2BIG

I try to use IOCTL "SIOCGIWSCAN" from wireless extension in order to retrieve scan result from driver :
the problem is that iwrq.u.data.length is always 0
and i get error E2BIG all the time .
Any idea about this error and what is the possible causes behind it ?
this is snippet from the code used to get the scan result "it's similar to iwlist command from wireless tools " :
Note : iwlist work perfectly "iwlist wlan0 scan" give the scan result ?
....
while(true) {
fd_set rfds;
int last_fd;
int retc;
// re init
FD_ZERO(&rfds);
last_fd = -1;
retc = select(last_fd + 1, &rfds, NULL, NULL, &time_out);
int ret_errno = errno;
if(retc < 0) // errno check the type
{
if(ret_errno == EAGAIN || ret_errno == EINTR)
{
continue; // try another time
}
goto exit;
}
if(retc == 0)
{
iwrq.u.data.pointer = buffer;
iwrq.u.data.length = data_length;
iwrq.u.data.flags = 0 ;
if(ioctl(sk_fd, SIOCGIWSCAN , &iwrq) < 0){
errn = errno;
if(errn == EAGAIN)
{
if(num_try > 0){
num_try --; // decrease max try
}
else
{
goto exit;
}
time_out.tv_sec = 0;
time_out.tv_usec = 300000; // sleep for 300 ms
continue;
}
else if(errn == E2BIG){
// buffer too small to hold result
// extend it
if(iwrq.u.data.length > data_length)
{
data_length *= 2;
buffer =(char *) realloc(buffer,data_length * sizeof(char));
if(!buffer){
goto exit;
}
}
// setup timer
time_out.tv_sec = 0;
time_out.tv_usec = 300000; // 100 ms
continue; // try again
}
//ioctl Wireless Scan Unhandled ERROR
goto exit;
}else
{
break; // we have result go and parse it
}
}
/// parse scan result section
....

TCP Socket Multiplexing Send Large Data

Got some trouble with TCP socket multiplexing.
//socket is non-blocking
const int MAX = 4096;
char *buff[MAX];
char *p = buff;
int fd, rvalue;
rvalue = 0;
if ( (fd = open(path, O_RDONLY)) < 0 ) {
return errno;
} else {
int didsend, didread;
int shouldsend;
while ((didread = read(fd, buff, MAX)) > 0) {
p = buff;
shouldsend = didread;
while ( 1 ) {
didsend = send(sockfd, p, shouldsend, 0);
//if send succeeds and returns the number of bytes fewer than asked for then try to send rest part in next time.
if (didsend < shouldsend) {
p += didsent;
shouldsend -= didsend;
continue;
}
//if there is no place for new data to send, then wait a brief time and try again.
if ( didsend < 0 && (errno == EWOULDBLOCK || errno == EAGAIN) ) {
usleep(1000);
continue;
}
//if all data has been sent then sending loop is over.
if (didsend == shouldsend) {
break;
}
//send error
if ( didsend < 0 ) {
rvalue = errno;
break;
}
}
}
close(fd);
if (didread == -1) {
return errno;
}
return rvalue;
}
Assume I use an I/O Multiplexing function poll() or kqueue(), and non-blocking socket, then if there are only some small data like send a short message, it works fine.
But if it comes to large data, I mean larger than send()'s buffer size, since using non-blocking socket, send() will just send a portion of data, and return how much data it sends, the rest part of data can only be sent in another call of send(), but it takes time, and can't tell how long it will takes. So the second while() is actually a blocking send which using non-blocking socket.
Equivalent to:
//socket is blocking
const int MAX = 4096;
char *buff[MAX];
int fd, n;
if ( (fd = open(path, O_RDONLY)) < 0 ) {
return errno;
} else {
while ((n = read(fd, buff, MAX)) > 0) {
if (send(sockfd, buff, n, 0) < 0) {
return errno;
}
}
close(fd);
return 0;
}
So, what is the solution to this, multithreading might work but that's kind of wasting resource maybe.
This is the general pattern for a single-threaded server that works with multiple connections and non-blocking sockets.
It's primarily pseudo-code in C and doesn't do the necessary error checking. But it gives you an idea that for each accepted connection, you keep a struct instance that maintains the socket handle, request parsing state, response stream, and any other "state" members of that connection. Then you just loop using "select" to wait or having multiple threads doing this same thing.
Again this is only pseudo-code and uses select/poll as an example. You can get even more scalability with epoll.
while (1)
{
fd_set readset = {};
fd_set writeset = {};
for (int i = 0; i < number_of_client_connections; i++)
{
if (client_connections[i].reading_request)
FD_SET(client_connection.sock, &readset);
else
FD_SET(client_connection.sock, &writeset);
}
// add the listen socket to the read set
FD_SET(listen_socket, &readset);
select(n + 1, &readset, &writeset, &timeout); // wait for a socket to be ready (not shown - check for errors and return value)
if (FD_ISSET(listen_socket, &readset))
{
int new_client_socket = accept(listen_socket, &addr, &addrlength);
// create a struct that keeps track of the connection state data
struct ConnectionData client_connection = {};
client_connection.sock = new_client_socket;
client_connection.reading_request = 1; // awaiting for all the request bytes to come in
client_connections[number_of_client_connections++] = client_connection; // pseudo code, add the client_connection to the list
}
for (int i = 0; i < number_of_client_connections; i++)
{
if (client_connections[i].reading_request)
{
if (FD_ISSET(client_connections[i], &readset))
{
char buffer[2000];
int len = recv(client_connections[i].sock, buffer, 2000, 0);
// not shown - handle error case when (recv < 0)
// not shown - handle case when (recv == 0)
ProcessIncomingData(client_connections[i], buffer, len); // do all the request parsing here. Flip the client_connections[i].reading_request to 0 if ready to respond
}
}
else if (client_connections[i].reading_request == 0)
{
if (FD_ISSET(client_connections[i], &writeset))
{
client_connection* conn = &client_connections[i];
int len = send(conn->sock, conn->response_buffer + conn->txCount, conn->response_size - conn->txCount, 0);
conn->txCount += len;
if (conn->txCount == conn->response_size)
{
// done sending response - we can close this connection or change it to back to the reading state
}
}
}
}

Socket Blocking and Timeout on Select

I am currently creating an echo server that disconnects clients after a maxWaitTime of being in idle.
I was hoping the program would block the socket until the client sent data but when I run the program in gdb it goes through the select and blocks on Readline.
I know retval = 0 whenever it goes through the select and that the fd_set sock goes to [256, (31 zeroes)] and after the select, sock goes to [32 zeroes].
The accepting of the connection happens in another function and the connection descriptor is passed to the echo function.
If you are able to help point me in the right direction or let me know how I can disconnect a client after a certain amount of time please let me know.
If you require any further information please let me know.
Thanks in advance!
FD_ZERO(&sock);
FD_SET(sockfd,&sock);
int opt = 3;
setsockopt(sockfd, SOL_SOCKET, SO_RCVLOWAT,&opt,sizeof(opt));
timeout.tv_sec = maxWaitTime;
timeout.tv_usec = 0;
for ( ; ; ) {
FD_SET(sockfd,&sock);
printf("Set is %d\n",FD_ISSET(sockfd,&sock));
int retval;
retval = select(1, &sock, NULL, NULL, &timeout);
if(retval)
{
quitProgram(number);
}
else
{
printf("n is %d\n",retval);
if ( (n = Readline(sockfd, line, MAXLINE)) == 0)
{
return; /* connection closed by other end */
}
Writen(sockfd, line, n);
}
`
As others have commented, you have some logic holes in your code. By your own admission:
I know retval = 0 whenever it goes through the select and that the fd_set sock goes to [256, (31 zeroes)] and after the select, sock goes to [32 zeroes].
That should have been an indication to you that something was going wrong. The socket was not in the fd_set after select() exited, which meant the socket was not readible yet. retval=0 means select() timed out.
You have to reset not only the fd_set every time select() is called, but also the timeval as well. Try this instead:
int opt = 3;
setsockopt(sockfd, SOL_SOCKET, SO_RCVLOWAT,&opt,sizeof(opt));
for ( ; ; )
{
timeout.tv_sec = maxWaitTime;
timeout.tv_usec = 0;
FD_ZERO(&sock);
FD_SET(sockfd,&sock);
int retval = select(sockfd+1, &sock, NULL, NULL, &timeout);
if (retval <= 0)
{
quitProgram(number); /* error or connection timed out */
}
else
{
if ( (n = Readline(sockfd, line, MAXLINE)) <= 0)
{
return; /* error or connection closed by other end */
}
Writen(sockfd, line, n);
}
}

SIGPIPE With Running Program

I have two daemons, and A is speaking to B. B is listening on a port, and A opens a tcp connection to that port. A is able to open a socket to B, but when it attempts to actually write said socket, I get a SIGPIPE, so I'm trying to figure out where B could be closing the open socket.
However, if I attach to both daemons in gdb, the SIGPIPE happens before any of the code for handling data is called. This kind of makes sense, because the initial write is never successful, and the listeners are triggered from receiving data. My question is - what could cause daemon B to close the socket before any data is sent? The socket is closed less than a microsecond after opening it, so I'm thinking it can't be a timeout or anything of the sort. I would love a laundry list of possibilities to track down, as I've been chewing on this one for a few days and I'm pretty much out of ideas.
As requested, here is the code that accepts and handles communication:
{
extern char *PAddrToString(pbs_net_t *);
int i;
int n;
time_t now;
fd_set *SelectSet = NULL;
int SelectSetSize = 0;
int MaxNumDescriptors = 0;
char id[] = "wait_request";
char tmpLine[1024];
struct timeval timeout;
long OrigState = 0;
if (SState != NULL)
OrigState = *SState;
timeout.tv_usec = 0;
timeout.tv_sec = waittime;
SelectSetSize = sizeof(char) * get_fdset_size();
SelectSet = (fd_set *)calloc(1,SelectSetSize);
pthread_mutex_lock(global_sock_read_mutex);
memcpy(SelectSet,GlobalSocketReadSet,SelectSetSize);
/* selset = readset;*/ /* readset is global */
MaxNumDescriptors = get_max_num_descriptors();
pthread_mutex_unlock(global_sock_read_mutex);
n = select(MaxNumDescriptors, SelectSet, (fd_set *)0, (fd_set *)0, &timeout);
if (n == -1)
{
if (errno == EINTR)
{
n = 0; /* interrupted, cycle around */
}
else
{
int i;
struct stat fbuf;
/* check all file descriptors to verify they are valid */
/* NOTE: selset may be modified by failed select() */
for (i = 0; i < MaxNumDescriptors; i++)
{
if (FD_ISSET(i, GlobalSocketReadSet) == 0)
continue;
if (fstat(i, &fbuf) == 0)
continue;
/* clean up SdList and bad sd... */
pthread_mutex_lock(global_sock_read_mutex);
FD_CLR(i, GlobalSocketReadSet);
pthread_mutex_unlock(global_sock_read_mutex);
} /* END for each socket in global read set */
free(SelectSet);
log_err(errno, id, "Unable to select sockets to read requests");
return(-1);
} /* END else (errno == EINTR) */
} /* END if (n == -1) */
for (i = 0; (i < max_connection) && (n != 0); i++)
{
pthread_mutex_lock(svr_conn[i].cn_mutex);
if (FD_ISSET(i, SelectSet))
{
/* this socket has data */
n--;
svr_conn[i].cn_lasttime = time(NULL);
if (svr_conn[i].cn_active != Idle)
{
void *(*func)(void *) = svr_conn[i].cn_func;
netcounter_incr();
pthread_mutex_unlock(svr_conn[i].cn_mutex);
func((void *)&i);
/* NOTE: breakout if state changed (probably received shutdown request) */
if ((SState != NULL) &&
(OrigState != *SState))
break;
}
else
{
pthread_mutex_lock(global_sock_read_mutex);
FD_CLR(i, GlobalSocketReadSet);
pthread_mutex_unlock(global_sock_read_mutex);
close_conn(i, TRUE);
pthread_mutex_unlock(svr_conn[i].cn_mutex);
pthread_mutex_lock(num_connections_mutex);
sprintf(tmpLine, "closed connections to fd %d - num_connections=%d (select bad socket)",
i,
num_connections);
pthread_mutex_unlock(num_connections_mutex);
log_err(-1, id, tmpLine);
}
}
else
pthread_mutex_unlock(svr_conn[i].cn_mutex);
} /* END for i */
/* NOTE: break out if shutdown request received */
if ((SState != NULL) && (OrigState != *SState))
return(0);
/* have any connections timed out ?? */
now = time((time_t *)0);
for (i = 0;i < max_connection;i++)
{
struct connection *cp;
pthread_mutex_lock(svr_conn[i].cn_mutex);
cp = &svr_conn[i];
if (cp->cn_active != FromClientDIS)
{
pthread_mutex_unlock(svr_conn[i].cn_mutex);
continue;
}
if ((now - cp->cn_lasttime) <= PBS_NET_MAXCONNECTIDLE)
{
pthread_mutex_unlock(svr_conn[i].cn_mutex);
continue;
}
if (cp->cn_authen & PBS_NET_CONN_NOTIMEOUT)
{
pthread_mutex_unlock(svr_conn[i].cn_mutex);
continue; /* do not time-out this connection */
}
/* NOTE: add info about node associated with connection - NYI */
snprintf(tmpLine, sizeof(tmpLine), "connection %d to host %s has timed out after %d seconds - closing stale connection\n",
i,
PAddrToString(&cp->cn_addr),
PBS_NET_MAXCONNECTIDLE);
log_err(-1, "wait_request", tmpLine);
/* locate node associated with interface, mark node as down until node responds */
/* NYI */
close_conn(i, TRUE);
pthread_mutex_unlock(svr_conn[i].cn_mutex);
} /* END for (i) */
return(0);
}
NOTE: I didn't write this code.
Is it possible you messed up and somewhere else in the program you try to close the same handle twice?
That could do this to you very easily.
HINT: systrace can determine if this is happening.

Resources