Related
I am confused whether i need to implement the OCSP myself or does openssl implicitly implements it in SSL_get_verify_result(ssl) method. If i need to do it myself then how do i do that ? There is no proper documentation or blog that i could find which clearly states how to do it.
My Code to connect to openssl goes like this:
SSL_load_error_strings();
ERR_load_BIO_strings();
OpenSSL_add_all_algorithms();
BIO * bio;
int x = 0;
long error=0;
SSL_library_init();
SSL_CTX * ctx = SSL_CTX_new(TLSv1_2_client_method());
SSL * ssl;
if(! SSL_CTX_load_verify_locations(ctx, "CA.CRT", NULL))
{
/* Handle failed load here */
//Failed
}
bio = BIO_new_ssl_connect(ctx);
error = BIO_get_ssl(bio, &ssl);
error = SSL_set_mode(ssl, SSL_MODE_AUTO_RETRY);
if(bio == NULL){
/* Handle the failure */
//Failed
}
BIO_set_conn_hostname(bio, "hostname:443");
if(error = BIO_do_connect(bio) <= 0){
/* Handle failed connection */
//Failed
return 0;
}
if(SSL_get_verify_result(ssl) != X509_V_OK)
{
/* Handle the failed verification */
//Failed
}
char buff[] = "The entire Post request in http format";
if(BIO_write(bio, buff, strlen(buff)) <= 0)
{
if(! BIO_should_retry(bio))
{
/* Handle failed write here */
//Failed
}
/* Do something to handle the retry */
}
USHORT entireResSize = 0;
BYTE* babuff = NULL;
while(1)
{
BYTE buffer[10000]={0};
USHORT currentResSize = 10000;
currentResSize = BIO_read(bio, buffer, currentResSize);
if(currentResSize == 0)
{
/* Handle closed connection */
//Failed
break;
}
else if(currentResSize < 0)
{
if(! BIO_should_retry(bio))
{
/* Handle failed read here */
//Failed
break;
}
else
{
entireResSize += currentResSize;
babuff = (BYTE*) realloc(babuff,entireResSize);
strcpy(&babuff[entireResSize - currentResSize], buffer);
}
/* Do something to handle the retry */
}
}
/* To reuse the connection, use this line */
BIO_reset(bio);
SSL_CTX_free(ctx);
/* To free it from memory, use this line */
BIO_free_all(bio);
I'm currently using an stm32f405 and an ENC28J60 and lwip as tcp/ip stack. Everything runs fine at startup but after about a minute or so the ENC stops receiving packets. Transmitting keeps working fine. I've tried both polling it and using interrupts.
I'm using https://github.com/wolfgangr/enc28j60 to communicate to the ENC. And this is the code that handles incoming packets:
while (true) {
eventmask_t mask = chEvtWaitAnyTimeout(ALL_EVENTS, LWIP_PACKET_POLL_INTERVAL);
if(mask & ENC_INTERRUPT_ID)
{
/* Handle ENC28J60 interrupt */
ENC_IRQHandler(&encHandle);
/* Reenable interrupts */
ENC_EnableInterrupts(EIE_INTIE);
}
if (mask & PERIODIC_LINK_TIMER_ID)
{
bool current_link_status = ((encHandle.LinkStatus) & PHSTAT2_LSTAT) != 0;
if (current_link_status != prev_link_status) {
if (current_link_status) {
dhcp_start(&thisif);
}
else {
dhcp_stop(&thisif);
}
}
prev_link_status = current_link_status;
}
/* Check if new frames where received */
struct pbuf *p;
while ((p = low_level_input(&thisif)) != NULL) {
struct eth_hdr *ethhdr = p->payload;
switch (htons(ethhdr->type)) {
/* IP or ARP packet? */
case ETHTYPE_IP:
case ETHTYPE_ARP:
/* full packet send to tcpip_thread to process */
if (tcpip_input(p, &thisif) == ERR_OK)
break;
LWIP_DEBUGF(NETIF_DEBUG, ("ethernetif_input: IP input error\n"));
default:
pbuf_free(p);
}
}
}
Function low_level_input:
static struct pbuf *low_level_input(struct netif *netif) {
struct pbuf *p = NULL;
struct pbuf *q;
uint16_t len;
uint8_t *buffer;
uint32_t bufferoffset = 0;
if (!ENC_GetReceivedFrame(&encHandle)) {
return NULL;
}
/* Obtain the size of the packet and put it into the "len" variable. */
len = encHandle.RxFrameInfos.length;
buffer = (uint8_t *)encHandle.RxFrameInfos.buffer;
if (len > 0)
{
/* We allocate a pbuf chain of pbufs from the Lwip buffer pool */
p = pbuf_alloc(PBUF_RAW, len, PBUF_POOL);
}
if (p != NULL)
{
bufferoffset = 0;
for(q = p; q != NULL; q = q->next)
{
/* Copy data in pbuf */
memcpy( (uint8_t*)((uint8_t*)q->payload), (uint8_t*)((uint8_t*)buffer + bufferoffset), q->len);
bufferoffset = bufferoffset + q->len;
}
}
return p;
}
After a while the function ENC_GetReceivedFrame keeps returning false, even if I know for sure some packets should have been received.
I've debugged the function (found in enc28j60.c) and this line:
pktcnt = enc_rdbreg(handle, ENC_EPKTCNT);
pktcnt is always 0. I've looked at the SPI bus with a logic analyzer and the ENC truly anwsers 0. The SPI bus works fine.
Just before this happens some packets are received that are not flagged as RXSTAT_OK (look at line 1259 in enc28j60.c)
I've been at this for day's now, and truly have no ideas left.
I encountered a similar problem..
The EPKTCNT register was times to times decreased with no reason( without setting the ECON2_PKTDEC bit).
I noticed that when it happened it was after setting the ECON2_AUTOINC bit.
Not every time ECON2_AUTOINC was set but often.
I just set ECON2_AUTOINC at the initialization of the ENC28J60, no more during the reading process.
Since EPKTCNT stopped to decrease with no reason.
Hope it can help
I'm trying to implement a web prefetching system. The purpose of a system like this is to “predict” future requests and prefetch them.
The system builds a predictive model from web navigation logs (Squid access.log files). The model is a dependency graph, where a node representing URL A has an arc to a node representing URL B if URL B has been requested immediately after URL A.
Once the model is built, the system receives queries of URLs requested by users, and make “predictions” based on the graph. Predictions are resources (URLs) very likely to be requested in the future. So, based on predictions, the system prefetches these resources to store them in cache prior to users' requests.
I'm using the following testing scenario:
A process simulate multiple clients, requesting URLs in a file using libcurl. The process runs in a different PC from the prefetching system. PCs are connected directly via an ethernet cable
Requests made by the client simulator are always the same URLs in the same relative time from the first request made. All requests are going to port 3128 (Prefetch PC Squid listen port) (port 80 DNAT to port 3128 in the client).
The prefetching system runs in a CentOS 6.3 box, kernel 2.6.32-71.el6.i686, 2 core Intel Pentium 4 3.00GHz processor, 4 GB RAM.
The prefetching system is one process with multiple threads. The main thread creates the predictive model and generates predictions based on queries. A “listener” thread reads URLs requested by users and prefetches predicted URLs using libcurl. “Listening” means reading from a named pipe (called url_fifo) URLs captured live on an interface using tshark:
stdbuf -o0 tshark -i eth1 tcp port 3128 and "tcp[((tcp[12:1] & 0xf0) >> 2):4] = 0x47455420" -T fields -e http.request.full_uri >> url_fifo
Each 10 minutes (1 cycle) the model is updated based on requests from the last cycle. The client tells the system when a cycle ends and so the model is updated. Once the model is updated, the system tells the client to start requesting URLs from the next cycle.
Here is the situation: Sometimes reading from the named pipe freezes. No URLs are read from the pipe even though tshark keeps capturing URLs and redirecting them to the named pipe. After an hour (or a couple of hours) all “buffered” URLs are read in less than 10 minutes. After that, reading from the pipe keeps going ok again. This situation doesn't happen always (50% of times freezes, 50% no).
It seems that there is a buffering issue, since tshark keeps capturing URLs and all requests are correctly logged in Squid's access.log.
In the beginning, I ran tshark with the -l option, so that its output becomes line buffered. Then I started using stdbuf -o0 (no buffering). Anyway the situation still happens.
In the system code, I also tried opening and reading the named pipe as a stream (FILE *) and set the stream as no buffered or line buffered (using setvbuf() function). The situation still happened.
In some cycles requests are faster than in other cycles. Anyway, it doesn't seems to be a fast producer slow consumer issue, since in many repetitions of the test all URLs are correctly read and processed without any freezes.
Is there something am I missing related to named pipes and buffering? I'd really appreciate some guidance.
Assume networking (interfaces, routing, iptables, squid) is ok. I've not had any issues related to it.
Code (assume necessary header files are included):
functions.c
#define BUFLEN 512
#define QUEUE_LEN 64
#define THREADS_LEN 2
pthread_mutex_t model_lock;
pthread_cond_t model_cond, listen_cond;
pthread_t queries_thread, listen_thread;
short int model_is_updating, model_can_update, program_shutdown;
/* Program execution statistics */
Status * program_status;
/* Thread pool */
threadpool_t *pool;
/* program execution */
int
run(void)
{
Graph_Adj_List * gr = NULL; /* Graph as an adjacency list */
char ** reports = NULL;
unsigned report_counter = 0;
/* Init program status */
program_status = status_init();
/* Load list of custom web navigation reports to be used to build the initial
* version of the predictive model */
reports = file_load_reports(program_config.reports_file);
if (!reports)
return 0;
/* Init mutex and cond */
pthread_mutex_init(&model_lock, NULL);
pthread_cond_init(&model_cond, NULL);
pthread_cond_init(&listen_cond, NULL);
/* Lock */
pthread_mutex_lock (&model_lock);
/* Start first cycle */
status_start_cycle(program_status);
/* Create initial version of the predictive model */
gr = create_model_from_files(reports, &report_counter, program_config.reports_limit);
if (!gr)
{
/* Unlock */
pthread_mutex_unlock (&model_lock);
return 0;
}
/* Unlock */
pthread_mutex_unlock (&model_lock);
/* Start threads */
if (pthread_create(&queries_thread, NULL, fifo_predictions_threaded, (void *)gr) ||
pthread_create(&listen_thread, NULL, listen_end_of_cycle, NULL))
program_shutdown = 1;
/* main loop */
while(!program_shutdown)
{
/* lock */
pthread_mutex_lock (&model_lock);
/* wait for clients' announcement of the end of requests from current cycle */
while (!model_can_update)
pthread_cond_wait(&model_cond, &model_lock);
/* set updating flag */
model_is_updating = 1;
/* Update predictive model, based on Squid's access.log from (about to finish)
* current cycle */
adj_list_update_access(gr, program_config.access_file);
/* Save statistics related to the current cycle and finish it */
status_finish_cycle(program_status);
/* Check if last custom report has been read */
if (!reports[report_counter])
{
program_shutdown = 1;
pthread_mutex_unlock (&model_lock);
break;
}
/* Start a new cycle */
status_start_cycle(program_status);
/* Read a new custom report and update the predictive model */
update_model(gr, reports[report_counter]);
report_counter++;
/* Updating is done */
model_is_updating = 0;
/* Model can't be updated until client announces the end of the cycle
* that has just started */
model_can_update = 0;
/* Tell client to start sending requests from the new cycle */
if (!signal_start_cycle())
{
program_shutdown = 1;
pthread_mutex_unlock (&model_lock);
break;
}
/* Signal listener thread that a new cycle has begin */
pthread_cond_signal(&listen_cond);
/* Unlock */
pthread_mutex_unlock (&model_lock);
}
/* Finish threads */
pthread_cancel(listen_thread);
pthread_cancel(queries_thread);
pthread_join(listen_thread, NULL);
pthread_join(queries_thread, NULL);
/* Free memory */
adj_list_free_all2(&gr);
file_reports_free_all(&reports);
pthread_cond_destroy(&model_cond);
pthread_cond_destroy(&listen_cond);
pthread_mutex_destroy(&model_lock);
status_free(&program_status);
return 1;
}
void *
fifo_predictions_threaded(void * data)
{
Graph_Adj_List * gr = (Graph_Adj_List *) data;
/* Set thread cancel type */
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
while (!program_shutdown)
{
pthread_mutex_lock(&model_lock);
/* Pause reading from named pipe while the model is being updated */
while(model_is_updating)
pthread_cond_wait(&listen_cond, &model_lock);
pthread_mutex_unlock(&model_lock);
/* Read URLs from named pipe */
fifo_predictions(gr, program_config.fifo);
}
pthread_exit(NULL);
return NULL;
}
int
fifo_predictions(Graph_Adj_List * gr, const u8 * fifo)
{
u8 cad[BUFLEN] = { '\0' };
u8 * ini = NULL, * fin = NULL, * fullurl = NULL;
int i, fifo_descriptor, read_urls = 0, fullurl_len = 0, incomplete_url = 1;
FILE * fifo_file = NULL;
/* Open fifo in blocking mode */
fifo_descriptor = open(CHAR_CAST fifo, O_RDONLY);
/* Open fifo as a stream */
// fifo_file = fopen(fifo, "r");
// if (!fifo_file)
if (fifo_descriptor == -1)
return 0;
/* If fifo is opened as a stream, set it line buffered */
// setlinebuf(fifo_file);
do
{
if ((i = read(fifo_descriptor, cad, BUFLEN - 1)) == -1)
// if ( fgets(cad, BUFLEN-1, fifo_file) == NULL)
ERROR(__FILE__, __FUNCTION__, __LINE__, "Fifo read error");
else
{
// i = strlen(cad);
cad[i] = '\0';
read_urls = 0;
if (i > 0)
{
int j = 0;
for (j = 0, ini = cad, fin = NULL ; cad[j] != '\0'; j++)
{
if (cad[j] == '\n')
{
/* Save URL */
fin = &cad[j];
ini = (*ini == '\n' ? ini + 1 : ini);
/* Check if string is a continuation of the previously read URL */
read_urls = fin - ini;
read_urls = read_urls >= 0 ? read_urls : 0;
/* Save URL in fullurl string */
fullurl = realloc(fullurl, fullurl_len + read_urls + 1);
memcpy(&fullurl[fullurl_len], ini, read_urls);
fullurl[fullurl_len + read_urls] = '\0';
ini = fin;
incomplete_url = fullurl_len = 0;
/* Ask the model for predictions and fetch them */
fetch_url_predictions2(gr, fullurl);
u8_free(&fullurl);
} else
incomplete_url = 1;
}
if (incomplete_url)
{
ini = (*ini == '\n' ? ini + 1 : ini);
read_urls = &cad[j] - ini;
read_urls = read_urls >= 0 ? read_urls : 0;
fullurl = realloc(fullurl, fullurl_len + read_urls);
memcpy(&fullurl[fullurl_len], ini, read_urls);
fullurl_len += read_urls;
}
}
}
} while (i > 0);
close(fifo_descriptor);
// fclose (fifo_file);
return 1;
}
int
fetch_url_predictions2(Graph_Adj_List * gr, u8 * in_url)
{
String * string_url = NULL;
Headnode * head = NULL;
LinkedList * list = NULL;
LinkedListElem * elem = NULL;
/* Use custom string type */
string_url = string_create_no_len(in_url);
if (!string_url)
return 0;
pthread_mutex_lock(&model_lock);
/* Get URL node */
head = adj_list_get_node(gr, string_url);
if (head)
{
/* Get predictions (URLs) as a linked list */
list = adj_list_predictions_to_list(head);
if (!list)
{
string_free_all(&string_url);
return 0;
}
pthread_mutex_unlock(&model_lock);
/* Callback fetches URLs */
list->callback = &curl_callback_void;
if (!pool)
pool = threadpool_create(THREADS_LEN, QUEUE_LEN, 0);
/* Load URLs to be fetched to threadpool's task queue */
for (elem = list->first; elem; elem = elem->next)
{
CallbackArg arg;
arg.data = arg.copy(elem->data);
threadpool_add_copy_arg(pool, list->callback, &arg, 1, sizeof(arg), 0);
}
linked_list_free_all(&list);
}
pthread_mutex_unlock(&model_lock);
string_free_all(&string_url);
return 1;
}
fetch.c
void
curl_callback_void(void * data)
{
CallbackArg * arg = (CallbackArg *) data;
char * url = (char *) arg->data;
fetch_url(url);
}
static size_t
write_data(void *buffer, size_t size, size_t nmemb, void *userp)
{
return size * nmemb;
}
int
fetch_url(char * url)
{
CURL *curl;
CURLcode res;
struct timeval time;
char * time_string = NULL;
curl = curl_easy_init();
if (curl)
{
curl_easy_setopt(curl, CURLOPT_URL, url);
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, &write_data);
curl_easy_setopt(curl, CURLOPT_WRITEDATA, NULL);
curl_easy_setopt(curl, CURLOPT_FAILONERROR, 1);
curl_easy_setopt(curl, CURLOPT_NOPROGRESS, 1);
curl_easy_setopt(curl, CURLOPT_NOSIGNAL, 1);
curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT, 15);
curl_easy_setopt(curl, CURLOPT_TIMEOUT, 10);
/* Perform the request, res will get the return code */
res = curl_easy_perform(curl);
gettimeofday(&time, NULL);
time_string = timeval_to_str(&time);
/* Check for errors */
if (res != CURLE_OK)
{
fprintf(stderr, "\ntime %s curl_easy_perform() (url %s) failed: %s\n",
time_string, url, curl_easy_strerror(res));
}
else
{
fprintf(stderr, "\ntime %s curl_easy_perform() (url %s) fetched ok\n",
time_string, url);
}
fflush(stderr);
free (time_string);
curl_easy_cleanup(curl);
}
return 0;
}
network.c
/*
* Code based on Beej's Networking Guide
*/
#define MSG_LEN 5
#define QUEUE_SIZE 5
extern pthread_mutex_t model_lock;
extern pthread_cond_t model_cond;
extern short int model_can_update, program_shutdown;
extern Config program_config;
// get sockaddr, IPv4 or IPv6:
static void *
get_in_addr(struct sockaddr *sa) {
if (sa->sa_family == AF_INET) {
return &(((struct sockaddr_in*) sa)->sin_addr);
}
return &(((struct sockaddr_in6*) sa)->sin6_addr);
}
void *
listen_end_of_cycle(void * data)
{
int sockfd, new_fd; // listen on sock_fd, new connection on new_fd
struct addrinfo hints, *servinfo, *p;
struct sockaddr_storage their_addr; // connector's address information
socklen_t sin_size;
int yes = 1;
char s[INET_ADDRSTRLEN], msg[MSG_LEN], *str = NULL;
int rv;
int read_bytes;
struct timeval actual_time;
/* Set thread cancel type */
pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL );
memset(&hints, 0, sizeof hints);
hints.ai_family = AF_INET;
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = AI_PASSIVE; // use my IP
if ((rv = getaddrinfo(NULL, program_config.listen_port, &hints, &servinfo))
!= 0) {
fprintf(stderr, "getaddrinfo: %s\n", gai_strerror(rv));
return "error";
}
// loop through all the results and bind to the first we can
for (p = servinfo; p != NULL ; p = p->ai_next) {
if ((sockfd = socket(p->ai_family, p->ai_socktype, p->ai_protocol))
== -1) {
perror("server: socket");
continue;
}
if (setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(int))
== -1) {
perror("setsockopt");
return "error";
}
if (bind(sockfd, p->ai_addr, p->ai_addrlen) == -1) {
close(sockfd);
perror("server: bind");
continue;
}
break;
}
if (p == NULL ) {
fprintf(stderr, "server: failed to bind\n");
return "error";
}
freeaddrinfo(servinfo); // all done with this structure
if (listen(sockfd, QUEUE_SIZE) == -1) {
perror("listen");
return "error";
}
while (!program_shutdown)
{
sin_size = sizeof their_addr;
new_fd = accept(sockfd, (struct sockaddr *) &their_addr, &sin_size);
if (new_fd == -1) {
perror("accept");
continue;
}
inet_ntop(their_addr.ss_family,
get_in_addr((struct sockaddr *) &their_addr), s, sizeof s);
if ((read_bytes = recv(new_fd, msg, MSG_LEN - 1, 0)) == -1) {
perror("recv");
continue;
}
close(new_fd);
msg[read_bytes] = '\0';
/* Check received message */
if (strcmp(msg, "DONE")) {
perror("Not valid message");
continue;
}
printf("\ngot \"DONE\" from %s\n", s);
fflush(stdout);
/* Lock */
pthread_mutex_lock(&model_lock);
/* Flag used by main thread to allow model update */
model_can_update = 1;
/* Signal model can be updated */
pthread_cond_signal(&model_cond);
/* Unlock */
pthread_mutex_unlock(&model_lock);
}
close(sockfd);
pthread_exit(NULL);
return "ok";
}
int signal_start_cycle(void) {
int sockfd;
struct addrinfo hints, *servinfo, *p;
int rv;
char s[INET6_ADDRSTRLEN], *str = NULL;
struct timeval actual_time, aux_time;
struct timeval connect_timeout = { 15, 0 }, max_connect_time = { 0, 0 };
short int connected = 0;
memset(&hints, 0, sizeof hints);
hints.ai_family = AF_INET;
hints.ai_socktype = SOCK_STREAM;
if ((rv = getaddrinfo(program_config.client_ip, program_config.client_port,
&hints, &servinfo)) != 0) {
fprintf(stderr, "getaddrinfo: %s\n", gai_strerror(rv));
return 0;
}
gettimeofday(&aux_time, NULL);
timeval_add(aux_time, connect_timeout, &max_connect_time);
/* Try several times to connect to the remote side */
do {
// loop through all the results and connect to the first we can
for (p = servinfo; p != NULL ; p = p->ai_next) {
if ((sockfd = socket(p->ai_family, p->ai_socktype, p->ai_protocol))
== -1) {
perror("client: socket");
continue;
}
gettimeofday(&actual_time, NULL )
printf("\ntrying to connect %s\n", program_config.client_ip);
fflush(stdout);
if (connect(sockfd, p->ai_addr, p->ai_addrlen) == -1) {
close(sockfd);
perror("client: connect");
continue;
}
connected = 1;
break;
}
} while (!connected && !timeval_greater_than(actual_time, max_connect_time));
if (p == NULL ) {
fprintf(stderr, "client: failed to connect\n");
return 0;
}
inet_ntop(p->ai_family, get_in_addr((struct sockaddr *) p->ai_addr), s,
sizeof s);
printf("\nMAIN THREAD: connecting to %s\n", s);
fflush(stdout);
freeaddrinfo(servinfo); // all done with this structure
if (send(sockfd, "DONE", 4, 0) == -1)
{
perror("send");
return 0;
}
printf("\nsent \"DONE\" to %s\n", s);
fflush(stdout);
close(sockfd);
return 1;
}
I have two daemons, and A is speaking to B. B is listening on a port, and A opens a tcp connection to that port. A is able to open a socket to B, but when it attempts to actually write said socket, I get a SIGPIPE, so I'm trying to figure out where B could be closing the open socket.
However, if I attach to both daemons in gdb, the SIGPIPE happens before any of the code for handling data is called. This kind of makes sense, because the initial write is never successful, and the listeners are triggered from receiving data. My question is - what could cause daemon B to close the socket before any data is sent? The socket is closed less than a microsecond after opening it, so I'm thinking it can't be a timeout or anything of the sort. I would love a laundry list of possibilities to track down, as I've been chewing on this one for a few days and I'm pretty much out of ideas.
As requested, here is the code that accepts and handles communication:
{
extern char *PAddrToString(pbs_net_t *);
int i;
int n;
time_t now;
fd_set *SelectSet = NULL;
int SelectSetSize = 0;
int MaxNumDescriptors = 0;
char id[] = "wait_request";
char tmpLine[1024];
struct timeval timeout;
long OrigState = 0;
if (SState != NULL)
OrigState = *SState;
timeout.tv_usec = 0;
timeout.tv_sec = waittime;
SelectSetSize = sizeof(char) * get_fdset_size();
SelectSet = (fd_set *)calloc(1,SelectSetSize);
pthread_mutex_lock(global_sock_read_mutex);
memcpy(SelectSet,GlobalSocketReadSet,SelectSetSize);
/* selset = readset;*/ /* readset is global */
MaxNumDescriptors = get_max_num_descriptors();
pthread_mutex_unlock(global_sock_read_mutex);
n = select(MaxNumDescriptors, SelectSet, (fd_set *)0, (fd_set *)0, &timeout);
if (n == -1)
{
if (errno == EINTR)
{
n = 0; /* interrupted, cycle around */
}
else
{
int i;
struct stat fbuf;
/* check all file descriptors to verify they are valid */
/* NOTE: selset may be modified by failed select() */
for (i = 0; i < MaxNumDescriptors; i++)
{
if (FD_ISSET(i, GlobalSocketReadSet) == 0)
continue;
if (fstat(i, &fbuf) == 0)
continue;
/* clean up SdList and bad sd... */
pthread_mutex_lock(global_sock_read_mutex);
FD_CLR(i, GlobalSocketReadSet);
pthread_mutex_unlock(global_sock_read_mutex);
} /* END for each socket in global read set */
free(SelectSet);
log_err(errno, id, "Unable to select sockets to read requests");
return(-1);
} /* END else (errno == EINTR) */
} /* END if (n == -1) */
for (i = 0; (i < max_connection) && (n != 0); i++)
{
pthread_mutex_lock(svr_conn[i].cn_mutex);
if (FD_ISSET(i, SelectSet))
{
/* this socket has data */
n--;
svr_conn[i].cn_lasttime = time(NULL);
if (svr_conn[i].cn_active != Idle)
{
void *(*func)(void *) = svr_conn[i].cn_func;
netcounter_incr();
pthread_mutex_unlock(svr_conn[i].cn_mutex);
func((void *)&i);
/* NOTE: breakout if state changed (probably received shutdown request) */
if ((SState != NULL) &&
(OrigState != *SState))
break;
}
else
{
pthread_mutex_lock(global_sock_read_mutex);
FD_CLR(i, GlobalSocketReadSet);
pthread_mutex_unlock(global_sock_read_mutex);
close_conn(i, TRUE);
pthread_mutex_unlock(svr_conn[i].cn_mutex);
pthread_mutex_lock(num_connections_mutex);
sprintf(tmpLine, "closed connections to fd %d - num_connections=%d (select bad socket)",
i,
num_connections);
pthread_mutex_unlock(num_connections_mutex);
log_err(-1, id, tmpLine);
}
}
else
pthread_mutex_unlock(svr_conn[i].cn_mutex);
} /* END for i */
/* NOTE: break out if shutdown request received */
if ((SState != NULL) && (OrigState != *SState))
return(0);
/* have any connections timed out ?? */
now = time((time_t *)0);
for (i = 0;i < max_connection;i++)
{
struct connection *cp;
pthread_mutex_lock(svr_conn[i].cn_mutex);
cp = &svr_conn[i];
if (cp->cn_active != FromClientDIS)
{
pthread_mutex_unlock(svr_conn[i].cn_mutex);
continue;
}
if ((now - cp->cn_lasttime) <= PBS_NET_MAXCONNECTIDLE)
{
pthread_mutex_unlock(svr_conn[i].cn_mutex);
continue;
}
if (cp->cn_authen & PBS_NET_CONN_NOTIMEOUT)
{
pthread_mutex_unlock(svr_conn[i].cn_mutex);
continue; /* do not time-out this connection */
}
/* NOTE: add info about node associated with connection - NYI */
snprintf(tmpLine, sizeof(tmpLine), "connection %d to host %s has timed out after %d seconds - closing stale connection\n",
i,
PAddrToString(&cp->cn_addr),
PBS_NET_MAXCONNECTIDLE);
log_err(-1, "wait_request", tmpLine);
/* locate node associated with interface, mark node as down until node responds */
/* NYI */
close_conn(i, TRUE);
pthread_mutex_unlock(svr_conn[i].cn_mutex);
} /* END for (i) */
return(0);
}
NOTE: I didn't write this code.
Is it possible you messed up and somewhere else in the program you try to close the same handle twice?
That could do this to you very easily.
HINT: systrace can determine if this is happening.
we are building a NAT program,we change each packet that comes from our internal subnet,
change it's source IP address by libnet functions.( catch the packet with libpcap, put it
sniff structures and build the new packet with libnet)
over TCP, the syn/ack packets are good after the change, and when a HTTP-GET request is coming, we can see by wireshark that there is an error on the checksum field..
all the other fields are exactly the same as the original packet.
Is anyone knows what can cause this problem?
the new checksum in other packets is calculated as it should be..
but in the HTTP packet it doesn't..
Modern ethernet cards can compute the checksum in hardware, so TCP stacks tend to offload the job to the card. As a result, it is quite common for the checksum to be invalid in Wireshark.
Side note: There is an option in Wireshark to validate the checksum:
Edit
Preferences
Protocols
TCP
Validate the TCP checksum if possible
Turn this off to stop Wireshark nagging you about the checksum.
Is this actually causing a problem - i.e. does the packet with "bad checksum" get dropped or processed incorrectly? Or are you just worried about the "bad checksum" notification? If the packets are processed OK, this may be just checksum offloading and it's nothing to worry about.
Wireshark documentation says:
If the received checksum is wrong, Wireshark won't even see the packet, as the Ethernet hardware internally throws away the packet.
Hey, answering in her name.
The GET and ACK are separated, and yes the GET request is sent completely, this being said based on the fact that the packets are sent by Firefox or Wget. we do not create the packets, nor the responses of the server (which is an Apache).
Our code just runs in the middle. we have a 3rd machine besides the Client and the Server, and all of the three are Virtual Machines (in VMWare Server [NAT, Client = Ubuntu; Server = Fedora]). it has 2 NIC's (each one connected to each subnet accordingly) and it's goal is to change the Source IP and Source Port fields (in both directions of network traffic).
Thanks for the explanation about TCP checksum offload, which is guessed to happen by wireshark, but in my opinion is not the case because the error appears in the server side too (not 100% sure if that rules out the possibility, thoughts?).
If I'm right, what could be the reason for the checksum's incorrectness?
Here is a link to the current code:
http://rapidshare.com/files/393704745/13.5.10.tar.gz
Thanks a lot,
Aviv.
If it's better as a code in here, there you go:
#include "./main.h"
int main(int argc, char **argv)
{
pcap_t *pcapInt, *pcapExt; /* pcap descriptor */
u_char *intPacket, *extPacket;
int i;
struct pcap_pkthdr intPkthdr, extPkthdr;
char errbuf[PCAP_ERRBUF_SIZE];
struct bpf_program filter_code;
bpf_u_int32 intLocalNet, intNetmask, extLocalNet;
for(i=0;i to quit\n");
/* read the configuration file and store it's data in an array */
LIBXML_TEST_VERSION
xmlNode *cur_node = xmlDocGetRootElement(xmlReadFile(((argv[1]) != NULL ? argv[1] : "conf.xml"), NULL, 0));
strcpy(config.filter, "");
XMLtoConf(cur_node);
strcat(config.filter, " and not src host 192.168.191.137");
printf("FILTER: %s\n", config.filter);
/* get network number and mask associated with the internal capture device */
if (pcap_lookupnet(config.intNIC, &intLocalNet, &intNetmask, errbuf) == -1) {
fprintf(stderr, "Couldn't get netmask for device %s: %s\n",
config.intNIC, errbuf);
intLocalNet = 0;
intNetmask = 0;
}
/* open internal capture device */
pcapInt = pcap_open_live(config.intNIC, SNAP_LEN, 1, 1000, errbuf);
if (pcapInt == NULL) {
fprintf(stderr, "Couldn't open device %s: %s\n", config.intNIC, errbuf);
exit(EXIT_FAILURE);
}
/* open external capture device */
pcapExt = pcap_open_live(config.extNIC, SNAP_LEN, 1, 1000, errbuf);
if (pcapExt == NULL) {
fprintf(stderr, "Couldn't open device %s: %s\n", config.extNIC, errbuf);
exit(EXIT_FAILURE);
}
/* make sure we're capturing on an Ethernet device [2] */
if (pcap_datalink(pcapInt) != DLT_EN10MB) {
fprintf(stderr, "%s is not an Ethernet\n", config.intNIC);
exit(EXIT_FAILURE);
}
if (pcap_datalink(pcapExt) != DLT_EN10MB) {
fprintf(stderr, "%s is not an Ethernet\n", config.extNIC);
exit(EXIT_FAILURE);
}
/* compile the internal filter expression */
if (pcap_compile(pcapInt, &filter_code, config.filter, 1, intLocalNet) == -1) { //adsvfhakdhvkahdvkadh
fprintf(stderr, "Couldn't parse filter %s: %s\n",
argv[1], pcap_geterr(pcapInt));
exit(EXIT_FAILURE);
}
/* compile the external filter expression */
if (pcap_compile(pcapExt, &filter_code, NULL, 1, extLocalNet) == -1) { //adsvfhakdhvkahdvkadh
fprintf(stderr, "Couldn't parse filter %s: %s\n",
argv[1], pcap_geterr(pcapExt));
exit(EXIT_FAILURE);
}
/* apply the compiled internal filter */
if (pcap_setfilter(pcapInt, &filter_code) == -1) {
fprintf(stderr, "Couldn't install filter %s: %s\n",
argv[1], pcap_geterr(pcapInt));
exit(EXIT_FAILURE);
}
//apply the compiled external filter
if (pcap_setfilter(pcapExt, &filter_code) == -1) {
fprintf(stderr, "Couldn't install filter %s: %s\n",
argv[1], pcap_geterr(pcapExt));
exit(EXIT_FAILURE);
}
while (1 == 1)
{
intPacket = (u_char*)pcap_next(pcapInt, &intPkthdr);
extPacket = (u_char*)pcap_next(pcapExt, &extPkthdr);
if (intPacket != NULL)
{
sniff(intPacket,0);
}
if (extPacket != NULL)
{
sniff(extPacket,1);
}
}
printf("\nCapture complete.\n");
/* cleanup */
pcap_freecode(&filter_code);
pcap_close(pcapInt);
return (EXIT_SUCCESS);
}
int isStrBlank(unsigned char *s)
{
if (!s || strcmp((char *)s, "") == 0) return 1;
while(*s) {
if ( (' ' != *s) && ('\n' != *s) && ('\r' != *s) && ('\t' != *s)) return 0;
++s;
}
return 1;
}
static void XMLtoConf(xmlNode* node)
{
/*
* this initialize the library and check potential ABI mismatches
* between the version it was compiled for and the actual shared
* library used.
*/
LIBXML_TEST_VERSION
xmlNode *cur_node = node;
int i,flag=0;
for (; cur_node; cur_node = cur_node->next) {
if (cur_node->type == XML_ELEMENT_NODE) {
//if (isStrBlank(cur_node->children->content) == 1) continue;
if (strcmp((char *)cur_node->name, "subnet_address") == 0){
strcat(config.filter, "src net ");
strcat(config.filter,(char *)cur_node->children->content);
}
//printf("1: %s", config.filter);
if (strcmp((char *)cur_node->name, "NIC") == 0){
if (strcmp((char *)cur_node->parent->name, "internal") == 0){
config.intNIC = strdup((char *)cur_node->children->content);
}
else{
config.extNIC = strdup((char *)cur_node->children->content);
}
}
for (i = 0; strncmp((char *)cur_node->name, "machine_", 8) == 0; i++){
strcat(config.filter, " and not");
strcat(config.filter, " src host ");
flag=1;
strcat(config.filter, (char *)cur_node->children->content);
cur_node = cur_node->next;
}
}
XMLtoConf(cur_node->children);
}
/*
*Free the global variables that may
*have been allocated by the parser.
*/
xmlCleanupParser();
/*
* If device is NULL, that means the user did not specify one and is
* leaving it up libpcap to find one.
*/
}
void sniff(const u_char *packet , int flag)
{
int i,x,tcpOpen=0;
int protocol=-1; // 0- tcp, 1- udp, 2 -icmp
tcp = (struct sniff_tcp*)(packet + 34); //skipping the ethernet and IP layers
udp = (struct sniff_udp *)(packet + 34); //skipping the ethernet and IP layers
ip = (struct sniff_ip *)(packet + SIZE_ETHERNET);
icmp = (struct sniff_icmp *)(packet+ 34);
ether = (struct sniff_ethernet *)(packet);
printf("/n1--%d/n",IP_HL(ip)*4);
//if(ntohs(tcp->th_sport) == 80 || ntohs(tcp->th_dport) == 80)
//{
if(ip->ip_p==IP_TYPE_TCP )
{
protocol = 0;
payload_s = ntohs(ip->ip_len) - TH_OFF(tcp)*4 - IP_HL(ip)*4;
if (payload_s)
payload = (char* )(packet + SIZE_ETHERNET + TH_OFF(tcp)*4 + IP_HL(ip)*4);
else
payload = NULL;
}
else if(ip->ip_p == IP_TYPE_UDP){
protocol = 1;
payload_s = ntohs(ip->ip_len) - ntohs(udp->udp_len) - IP_HL(ip)*4;
if (payload_s)
payload = (char* )(packet + SIZE_ETHERNET + ntohs(udp->udp_len) + IP_HL(ip)*4);
else
payload = NULL;
}
else if(ip->ip_p == IP_TYPE_ICMP)
{
protocol = 2;
payload_s = ntohs(ip->ip_len) - 8 - IP_HL(ip)*4;
if (payload_s)
payload = (char* )(packet + SIZE_ETHERNET + 8 + IP_HL(ip)*4);
else
payload = NULL;
}
if(flag == 0)// we got a packet from the internal
{
if( ip->ip_p == IP_TYPE_TCP)
{
for(i=0;iip_p)
if(nTable[i].ip_src.s_addr == ip->ip_src.s_addr)
if(nTable[i].ip_dst.s_addr == ip->ip_dst.s_addr)
if(ntohs(nTable[i].srcPort) == ntohs(tcp->th_sport))
if(ntohs(nTable[i].dstPort) == ntohs(tcp->th_dport))
{
printf("we are in an open connection \n");
changeSrcPacket(packet ,(i+2000)%8000 ,protocol);
tcpOpen = 1;
break;
}
}
}
if(tcpOpen == 0)
{
for(i=0;iip_p == IP_TYPE_UDP ||ip->ip_p == IP_TYPE_TCP )
{
if(nTable[i].free==0)
{
nTable[i].free=1;
nTable[i].ip_src = ip->ip_src;
nTable[i].ip_dst = ip->ip_dst;
nTable[i].srcPort = tcp->th_sport;
nTable[i].dstPort = tcp->th_dport;
nTable[i].protocol = ip->ip_p;
//printf("index : %d ipsrc : %s srcport : %d\n",i,inet_ntoa(nTable[i].ip_src),ntohs(nTable[i].srcPort));
////////////change packet and send it with the src ip of the nat machine
///////////and the src port is (i+2000)%8000
changeSrcPacket(packet ,(i+2000)%8000 ,protocol);
break;
}
}
else
{
if(icmpTable[i].free == 0)
{
icmpTable[i].free=1;
icmpTable[i].ip_src = ip->ip_src;
icmpTable[i].ip_dst = ip->ip_dst;
icmpTable[i].protocol = ip->ip_p;
icmpTable[i].icmp_type = icmp->icmp_type;
icmpTable[i].icmp_id1 = icmp->icmp_id1;
changeSrcPacket(packet ,-1 ,protocol);
break;
}
}
}
}
}
else // flag = 1
{
// we got a packet from the external. we want to send it to the right
// place in the internal
//nTable[(tcp->th_dport-2000)%8000];
//printf("dst: %d , src: %d \n",ntohs(tcp->th_dport),ntohs(tcp->th_sport));
if(ip->ip_p== IP_TYPE_ICMP)
{
changeDstPacket (packet,-1,protocol);
}
else
{
for(x=0;xip_p == IP_TYPE_TCP)
{
if(((int)(ntohs(tcp->th_dport))-2000)%8000 == x && nTable[x].free == 1)
{
changeDstPacket (packet,x,protocol);
break;
}
}
else
{
if(((int)(ntohs(udp->udp_destport))-2000)%8000 == x && nTable[x].free == 1)
{
changeDstPacket (packet,x,protocol);
break;
}
}
}
}
// we create a packet with thw same src ip and port as we got
// and only the dst port and ip will be the ones that are
//saved in nTable[(tcp->th_dport-2000)%8000]
// now if it is in udp we will put 0 in nTable[(tcp->th_dport-2000)%8000].free
}
}
void changeSrcPacket(const u_char *packet , int srcPort, int protocol)
{
libnet_t *l;
libnet_ptag_t ipv, ptag, popt,icmp;
char errbuf[LIBNET_ERRBUF_SIZE];
uint32_t nat_adder;
size_t ip_hlen=IP_HL(ip)*4;
size_t ip_len=ntohs(ip->ip_len);
size_t tcp_len = ip_len - ip_hlen;
printf("\n%d %d %d %d",IP_HL(ip),ip_hlen,ip_len,tcp_len);
icmp = ptag = ipv = LIBNET_PTAG_INITIALIZER;
nat_adder = libnet_name2addr4(l,"192.168.191.137",LIBNET_DONT_RESOLVE);
l = libnet_init(LIBNET_RAW4,config.extNIC, errbuf);
if(protocol == 0)//TCP
{
if(TH_OFF(tcp)*4 > TCP_HEADER_SIZE)
{
options = (char*)packet + 54;
options_s = TH_OFF(tcp)*4 - TCP_HEADER_SIZE;
popt = libnet_build_tcp_options((u_int8_t*)options,options_s, l,0);
}
ptag = libnet_build_tcp(
srcPort, // source port
ntohs(tcp->th_dport), // dest port
htonl(tcp->th_seq), // sequence number
ntohl(tcp->th_ack), // ack number
tcp->th_flags, // flags
ntohs(tcp->th_win), // window size
0, // checksum
ntohs(tcp->th_urp), // urg ptr
TH_OFF(tcp)*4, // total length of the TCP packet
(u_int8_t*)payload, // response
payload_s, // response_length
l, // libnet_t pointer
ptag // ptag
);
printf("%d, %d, %d, %d, %d\n", TH_OFF(tcp)*4, IP_HL(ip)*4, payload_s, ntohs(ip->ip_len),TH_OFF(tcp)*4);
if(ptag==-1)
{
fprintf(stderr, "Error building TCP header: %s\n",libnet_geterror(l));
exit(1);
}
if (libnet_do_checksum(l, (u_int8_t*)ip,IPPROTO_TCP, TH_OFF(tcp)*4) udp_destport), /* destination port */
udp->udp_len, /* packet length */
0, /* checksum */
(u_int8_t*)payload, /* payload */
payload_s, /* payload size */
l, /* libnet handle */
ptag); /* libnet id */
if(ptag==-1)
{
fprintf(stderr, "Error building UDP header: %s\n",libnet_geterror(l));
exit(1);
}
}
// if(protocol == 2)//ICMP
//{
///add functions of icmp
// icmp = libnet_build_icmpv4_echo(
//ICMP_ECHO, /* type */
//0, /* code */
//0, /* checksum */
//icmp->icmp_id1, /* id */
//icmp->icmp_seq1, /* sequence number */
//payload, /* payload */
//payload_s, /* payload size */
//l, /* libnet context */
//icmp); /* ptag */
//if (icmp == -1)
//{
// fprintf(stderr, "Can't build ICMP header: %s\n",
// libnet_geterror(l));
//}
// }
ipv = libnet_build_ipv4(
/* total length */
ntohs(ip->ip_len),
ip->ip_tos, /* type of service */
ntohs(ip->ip_id), /* identification */
ntohs(ip->ip_off), /* fragmentation */
ip->ip_ttl, /* time to live */
ip->ip_p, /* protocol */
0, /* checksum */
nat_adder, /* (Nat) source */
ip->ip_dst.s_addr, /* destination */
NULL, /* payload */
0, /* payload size */
l, /* libnet handle */
0); /* ptag */
if(ipv == -1)
{
fprintf(stderr,"Error building IP header: %s\n", libnet_geterror(l));
exit(1);
}
/*if (libnet_do_checksum(l, (u_int8_t*)l, IPPROTO_IP, ntohs(ip->ip_len) + payload_s) th_flags == 0x01)
{
nTable[index].fin++;
}
if(tcp->th_flags == 0x11 && nTable[index].fin == 1)
{
nTable[index].fin++;
}
if(tcp->th_flags == 0x10 && nTable[index].fin == 2)
{
nTable[index].free = 0;
nTable[index].fin = 0;
}
}
// Fix IP header checksum
// ip->ip_sum = 0;
if (libnet_do_checksum(l, (u_int8_t*)ip,IPPROTO_IP, IP_HL(ip)*4) th_sport),ntohs(nTable[index].srcPort));
printf("src ip : %s dst ip: %s\n",inet_ntoa(ip->ip_src), inet_ntoa(nTable[index].ip_src));
ptag = ipv = LIBNET_PTAG_INITIALIZER;
if(protocol == 0 || protocol == 1) // udp or tcp
{
if(nTable[index].free == 1)
{
l = libnet_init(LIBNET_RAW4,config.intNIC, errbuf);
if(protocol == 0 ) //TCP
{
if(TH_OFF(tcp)*4 > TCP_HEADER_SIZE)
{
options = (char*)packet + 54;
options_s = TH_OFF(tcp)*4 - TCP_HEADER_SIZE;
popt = libnet_build_tcp_options((u_int8_t*)options,options_s, l,0);
}
ptag = libnet_build_tcp(
ntohs(tcp->th_sport), // source port
ntohs(nTable[index].srcPort), // dest port
ntohl(tcp->th_seq), // sequence number
ntohl(tcp->th_ack), // ack number
tcp->th_flags, // flags
ntohs(tcp->th_win), // window size
0, // checksum
ntohs(tcp->th_urp), // urg ptr
TH_OFF(tcp)*4, // total length of the TCP packet
(u_int8_t*)payload, // response
payload_s, // response_length
l, // libnet_t pointer
ptag // ptag
);
if(ptag==-1)
{
fprintf(stderr, "Error building TCP header: %s\n",libnet_geterror(l));
exit(1);
}
}
if(protocol == 1)// UDP
{
ptag = libnet_build_udp(
ntohs(udp->udp_srcport), /* source port */
ntohs(nTable[index].srcPort), /* destination port */
udp->udp_len, /* packet length */
0, /* checksum */
(u_int8_t*)payload, /* payload */
payload_s, /* payload size */
l, /* libnet handle */
ptag); /* libnet id */
if(ptag==-1)
{
fprintf(stderr, "Error building UDP header: %s\n",libnet_geterror(l));
exit(1);
}
}
}
}
if(protocol == 2) // ICMP
{
for(i=0;i icmp_type)
if(icmpTable[i].ip_dst.s_addr == ip->ip_src.s_addr)
if(icmpTable[i].icmp_id1 == icmp->icmp_id1)
{
index = i;
break;
}
}
///add functions of icmp
}
ipv = libnet_build_ipv4(
/* total length */
ntohs(ip->ip_len),
ip->ip_tos, /* type of service */
ntohs(ip->ip_id), /* identification */
ntohs(ip->ip_off), /* fragmentation */
ip->ip_ttl, /* time to live */
ip->ip_p, /* protocol */
0, /* checksum */
ip->ip_src.s_addr, /* (Nat) source */
nTable[index].ip_src.s_addr, /* destination */
NULL, /* payload */
0, /* payload size */
l, /* libnet handle */
0); /* ptag */
if(ipv == -1)
{
fprintf(stderr,"Error building IP header: %s\n", libnet_geterror(l));
exit(1);
}
/*if (libnet_do_checksum(l, (u_int8_t*)l, IPPROTO_IP, ntohs(ip->ip_len) + payload_s) th_flags == 0x01)
{
nTable[index].fin++;
}
if(tcp->th_flags == 0x11 && nTable[index].fin == 1)
{
nTable[index].fin++;
}
if(tcp->th_flags == 0x10 && nTable[index].fin == 2)
{
nTable[index].free = 0;
nTable[index].fin = 0;
}
}
else
{
nTable[index].free = 0;
nTable[index].fin = 0;
}
}
if ( libnet_write(l) == -1 )
fprintf(stderr, "Error writing packet: %s\n",libnet_geterror(l));
libnet_destroy(l);
}