I'am curently building a C Tasker, the goal of the program is to connect to a Main Server via Websockets and keep the connection Alive(Client Watchdog) as long as it is possible.
In the mean time the Tasker should be able to receive Jsons from clients (Server Behaviour), process them accordingly and answer.
To perfom this i use GLib's multi threading methods and the Libwebsockets library for the network part.
The C Client/Server Tasker is working well so the next step should be : when the connection is lost with the Main Server to try to reconnect "forever" until the Main Server is up, or to Fallback on another Main Server.
The problem here is that when i cut the connection with the server the watchdog thread seems to ignore the reconnection. As the thread is completely isolated from the libwesockets callbacks.
Here are the code snippets :
Call of Tasker Client in Tasker Server init
int janus_websockets_init(janus_transport_callbacks *callback, const char *config_path) {
if(g_atomic_int_get(&stopping)) {
return -1;
}
if(callback == NULL || config_path == NULL)
return -1;
gateway = callback;
Client_start()
Tasker Server configuration
struct lws_context_creation_info info;
memset(&info, 0, sizeof info);
info.port = wsport;
info.iface = ip ? ip : interface;
info.protocols = wss_protocols;
info.extensions = NULL;
info.ssl_cert_filepath = NULL;
info.ssl_private_key_filepath = NULL;
info.gid = -1;
info.uid = -1;
info.options = 0;
/* Create the WebSocket context */
wss = lws_create_context(&info);
if(wss == NULL) {
ZETA_LOG(LOG_FATAL, "Error initializing libwebsockets...\n");
} else {
ZETA_LOG(LOG_INFO, "WebSockets server started (port %d)...\n", wspor\
t);
}
Tasker Client Configuration
int
Client_start()
{
//* register the signal SIGINT handler */
struct lws_context_creation_info info;
memset(&info, 0, sizeof info);
info.port = CONTEXT_PORT_NO_LISTEN;
info.iface = NULL;
info.protocols = wss_protocols;
info.ssl_cert_filepath = NULL;
info.ssl_private_key_filepath = NULL;
info.extensions = lws_get_internal_extensions();
info.gid = -1;
info.uid = -1;
info.options = 0;
protocol.name = "janus-protocol";
protocol.callback = &ws_client_service_callback;
protocol.per_session_data_size = sizeof(struct session_data);
protocol.rx_buffer_size = 0;
protocol.id = 0;
protocol.user = NULL;
context = lws_create_context(&info);
Tasker Client Watchdog Thread Call
if (context == NULL) {
printf(KRED"[Main Service] context is NULL.\n"RESET);
return -1;
}
wsz = lws_client_connect(context, "xxx.xxx.xxx.xxx",5000, 0,
"/", "xxx.xxx.xxx.xxx:5000", NULL,
protocol.name, -1);
if (wsz == NULL) {
printf(KRED"[Main] wsi create error.\n"RESET);
return -1;
}
reco = false;
printf(KGRN"[Main] wsi create success.\n"RESET);
printf(KRED"[Main Service] %d\n", ConnOk, RESET);
keepAlive = g_thread_try_new("KeepAliveLoop", keep_alive_routine, context, NULL);
while(!dead)
lws_service(context, 50);
lws_context_destroy(context);
return 0;
}
Finnaly The Tasker Client Thread routine Loop and Thread Loop Execution code
static void keep_alive_loop()
{
json_t *hb = NULL;
char *toto = NULL;
if (reco == true)
{
wsz = lws_client_connect(context, "xxx.xxx.xxx.xxx",5000, 0,
"/", "xxx.xxx.xxx.xxx:5000", NULL,
protocol.name, -1);
printf(KRED"[Main Service] UNDER LOOOOOOPING .\n"RESET);
reco = false;
return;
}
hb = json_object();
json_object_set_new(hb, "zetapush", json_string("keepalive"));
json_object_set_new(hb, "timeout", json_integer(8000));
toto = json_dumps(hb, 0);
websocket_client_keep_alive_write_back(hb);
lws_callback_on_writable(wsz);
}
static void *keep_alive_routine(void *data)
{
printf(KBRN"[pthread_routine] WATCHDOG READY AND ANGRY .\n"RESET);
g_timeout_add_seconds(8, keep_alive_loop, NULL);
alive_loop = g_main_loop_new(NULL, 0);
g_main_loop_run(alive_loop);
}
If any further precision is needed let me know, i can also upload to github for better reading, any help or hint would be much appreciated :).
Related
I'm developping an application which will run on a bosch device - XDK - http://xdk.bosch-connectivity.com/.
At this moment, I have a task which implements an http client and sends POST requests to a server. However, the main purpose of my application is to be able to move between areas with network coverage/no coverage. This means that I'm constantly suspending/re-starting the task that implements the http client.
So I'm having problem with the http client in the following scenario: I start the application in a region with coverage, then I move to a region with no coverage and finally, go back again to a region with coverage. That's here I'm getting problems. From that moment, I can't connect anymore with the http client.
I'll post here pieces of code of my application so that you can suggest me some alterations to solve the problem.
All the functions such as HttpClient_initRequest, HttpMsg_setReqUrl, Msg_prependPartFactory, ..., belong to the API provided for the XDK.
void appInitSystem(xTimerHandle xTimer) {
(void) (xTimer);
return_t retValue = FAILURE;
retcode_t wlanRet = RC_PLATFORM_ERROR;
wlanRet = wlan_init();
if (wlanRet != RC_OK){
printf("Network init/connection failed %i \r\n", wlanRet);
assert(0);
}
wlanRet = PAL_initialize();
if (wlanRet != RC_OK){
printf("PAL and network initialize %i \r\n", wlanRet);
assert(0);
}
PAL_socketMonitorInit();
/* start http client */
wlanRet = HttpClient_initialize();
if (wlanRet != RC_OK){
printf("Failed to initialize http client\r\n ");
assert(0);
}
if (RC_OK != PAL_getIpaddress((uint8_t*) "52.58.121.5", &destAddr))
return;
retValue = createTasks();
if (retValue == FAILURE)
assert(0);
}
retcode_t wlan_init(void) {
WLI_connectStatus_t retconnection;
NCI_ipSettings_t myIpSettings;
char ipAddress[PAL_IP_ADDRESS_SIZE] = { 0 };
Ip_Address_T* IpaddressHex = Ip_getMyIpAddr();
WLI_connectSSID_t connectSSID;
WLI_connectPassPhrase_t connectPassPhrase;
NCI_return_t ReturnValue = NCI_FAILURE;
int32_t Result = INT32_C(-1);
if (WLI_SUCCESS != WLI_init())
return (RC_PLATFORM_ERROR);
connectSSID = (WLI_connectSSID_t) WLAN_CONNECT_WPA_SSID;
connectPassPhrase = (WLI_connectPassPhrase_t) WLAN_CONNECT_WPA_PASS;
/*DHCP */
ReturnValue = NCI_setIpDhcp(NULL);
if (ReturnValue != NCI_SUCCESS){
printf("Error in setting IP to DHCP\n\r");
return (RC_PLATFORM_ERROR);
}
if (WLI_SUCCESS == WLI_connectWPA(connectSSID, connectPassPhrase, NULL){
ReturnValue = NCI_getIpSettings(&myIpSettings);
if (NCI_SUCCESS == ReturnValue){
*IpaddressHex = Basics_htonl(myIpSettings.ipV4);
Result = Ip_convertAddrToString(IpaddressHex, ipAddress);
if (Result < 0){
printf("Couldn't convert the IP address to string format \r\n ");
return (RC_PLATFORM_ERROR);
}
printf("Connected to WPA network successfully \r\n ");
printf(" Ip address of the device %s \r\n ", ipAddress);
return (RC_OK);
}
else{
printf("Error in getting IP settings\n\r");
return (RC_PLATFORM_ERROR);
}
}
else
return (RC_PLATFORM_ERROR);
return RC_OK;
}
/* Task which calls the nttp client */
void dataOffload(void* handle) {
(void) handle;
for (;;){
....
http_connServer(NULL);
...
vTaskDelay(1000/portTICK_RATE_MS);
}
}
void http_connServer(void * pvParameters){
retcode_t rc = RC_OK;
Msg_T* msg_ptr;
Ip_Port_T destPort = (Ip_Port_T) DEST_PORT_NUMBER;
static Callable_T SentCallable;
char const *url_ptr = "/";
Callable_T * Callable_pointer;
Callable_pointer = Callable_assign(&SentCallable, http_callbackOnSent);
if (Callable_pointer == NULL)
return;
rc = HttpClient_initRequest(&destAddr, Ip_convertIntToPort(destPort), &msg_ptr);
if (rc != RC_OK || msg_ptr == NULL)
return;
rc = Msg_prependPartFactory( msg_ptr, &http_setContent);
if (rc != RC_OK)
return;
rc = HttpMsg_setReqUrl(msg_ptr, url_ptr);
if (rc != RC_OK)
return;
HttpMsg_setReqMethod(msg_ptr, Http_Method_Post);
rc = HttpClient_pushRequest(msg_ptr, &SentCallable,
http_clientResponseCallback);
if (rc != RC_OK){
printf("Failed HttpClient_pushRequest \r\n ");
return;
}
}
I'm trying to debug a code that is using a libevent library. In that library, there is a function event_new that is suppose to create an event_cb. Somehow after I dispatch the event base, the event_cb cannot be called or accessed. This problem only happens on hpux itanium. This code works on hpux pa-risc, Redhat, AIX, and Solaris. Is there any certain thing that need to be set?
This is part of the code
int ttypread (int fd, Header *h, char **buf)
{
int c,k;
struct user_data user_data;
struct bufferevent *in_buffer;
struct event_config *evconfig;
log_debug("inside ttypread");
in_buffer = NULL;
user_data.fd = fd;
user_data.h = h;
user_data.buf = buf;
log_debug("from user_data, fd = %d",user_data.fd); //the log_debug is a debugging function for me to check the value sent by the system. I use it to compare between each platform
log_debug("from user_data, buf = %s",user_data.buf);
log_debug("from user_data, h.len = %d",user_data.h->len);
log_debug("from user_data, h.type = %d",user_data.h->type);
evconfig = event_config_new();
if (evconfig == NULL) {
log_error("event_config_new failed");
return -1;
}
if (event_config_require_features(evconfig, EV_FEATURE_FDS)!=0) {
log_error("event_config_require_features failed");
return -1;
}
base = event_base_new_with_config(evconfig);
if (!base) {
log_error("ttypread:event_base_new failed");
return -1;
}
const char* method; //these 3 lines are the new line edited
method = event_base_get_method(base);
log_debug("ttyread is using method = %s",method);
ev = event_new(base, fd, EV_READ|EV_PERSIST, ttypread_event_cb, &user_data);
c = event_add(ev, NULL);
log_debug("ttypread passed event_add with c value is %d",c);
in_buffer = bufferevent_socket_new(base, STDIN_FILENO, BEV_OPT_CLOSE_ON_FREE);
log_debug("ttypread passed bufferevent_socket_new");
if(in_buffer == NULL){
log_debug("problem with bufferevent_socket_new");
}
bufferevent_setcb(in_buffer, in_read_cb, NULL, in_event_cb, NULL);
bufferevent_disable(in_buffer, EV_WRITE);
bufferevent_enable(in_buffer, EV_READ);
k =event_base_dispatch(base);
log_debug("event_base have been dispatched"); //when looking at the debugging file, the other plaform will go to ttypread_event_cb function. But for hpux itanium, it stays here.
if (k == 0){
log_debug("event_base_dispatch returned 0");
} else if (k == -1){
log_debug("event_base_dispatch returned -1");
} else {
log_debug("event_base_dispatch returned 1");
}
event_base_free(base);
event_free(ev);
log_debug("finish ttypread");
log_debug("ttypread_ret will return [%d]",ttypread_ret);
return ttypread_ret;
}
void ttypread_event_cb(evutil_socket_t fd, short events, void *arg)
{
int nread;
struct timeval t;
struct user_data *user_data;
user_data = (struct user_data*)arg;
nread = 0;
log_debug("inside ttypread_event_cb");
if (events & EV_READ) {
log_debug("got events & EV_READ");
nread = ttyread(fd, user_data->h, user_data->buf);
if (nread == -1) {
ttypread_ret = -1;
event_del(ev);
event_base_loopexit(base, NULL);
} else if (nread == 0) {
if (access(input_filename, F_OK)!=0) {
log_debug("cannot access [%s]",input_filename);
tcsetattr(0, TCSANOW, &old); /* Return terminal state */
exit(EXIT_SUCCESS);
}
t.tv_sec = 0;
t.tv_usec = 250000;
select(0, 0, 0, 0, &t);
} else {
ttypread_ret = 1;
event_del(ev);
event_base_loopexit(base, NULL);
}
}
else if (events & EV_WRITE) {
log_debug("got events & EV_WRITE");
}
}
Not sure if this help. But just some info on the hpux itanium
uname -a = HP-UX hpux-ita B.11.23 U ia64
If you need any additional info or other declaration on function, just leave a comment and I will edit the question.
EDIT : i've added a function inside ttypread. Somehow for hpux itanium its returning devpoll while other platform are returning poll. Im not sure if this is the problem. But if that is so, is there any way for me to change it?
After checking the result from event_base_get_method, I found out that only on my hpux-itanium used devpoll method. This is how I solve it.
char string[8] = "devpoll";
struct user_data user_data;
struct bufferevent *in_buffer;
struct event_config *evconfig;
const char *method;
const char *devpoll;
devpoll = string;
in_buffer = NULL;
user_data.fd = fd;
user_data.h = h;
user_data.buf = buf;
evconfig = event_config_new();
if (evconfig == NULL) {
log_error("event_config_new failed");
return -1;
}
if (event_config_require_features(evconfig, EV_FEATURE_FDS)!=0) {
log_error("event_config_require_features failed");
return -1;
}
if (event_config_avoid_method(evconfig,devpoll) != 0)
{
log_error("Failed to ignore devpoll method");
}
Force the libevent to ignore using devpoll and use poll instead.
I currently use ldap_bind_s to bind to the server in my C application with SEC_WINNT_AUTH_IDENTITY struct, but the function is marked as deprecated. For this reason I would like to change it to the ldap_sasl_bind_s function.
int main(void) {
LDAP *ld;
int rc = 0;
char *binddn = "cn=admin,dc=local";
const int version = LDAP_VERSION3;
SEC_WINNT_AUTH_IDENTITY wincreds;
struct berval saslcred;
wincreds.User = "admin";
wincreds.UserLength = 5;
wincreds.Password = "secret";
wincreds.PasswordLength = 6;
wincreds.Domain = NULL;
wincreds.DomainLength = 0;
wincreds.Flags = SEC_WINNT_AUTH_IDENTITY_ANSI;
ld = ldap_initA("localhost", LDAP_PORT);
ldap_set_optionA(ld, LDAP_OPT_PROTOCOL_VERSION, &version);
rc = ldap_bind_sA(ld, binddn, (PCHAR)&wincreds, LDAP_AUTH_DIGEST);
printf("0x%x\n", rc); // It's OK (0x0)
ldap_unbind(ld);
saslcred.bv_val = "secret";
saslcred.bv_len = 6;
rc = ldap_sasl_bind_sA(ld, binddn, "DIGEST-MD5", &saslcred, NULL, NULL, NULL);
printf("0x%x\n", rc); // Returns with 0x59
ldap_unbind(ld)
return 0;
}
The ldap_sasl_bind_s returns with LDAP_PARAM_ERROR code. Clearly, the function parameters are wrong above, but I can't find a working sample code with winldap and SASL binding.
I would be grateful for some guide, how to make this code working.
The last parameter of ldap_sasl_bind_sA cannot be NULL. It has to point to a place the function can put the server's response (struct berval*).
...
struct berval* serverResponse = NULL;
rc = ldap_sasl_bind_sA(ld, binddn, "DIGEST-MD5", &saslcred, NULL, NULL, &serverResponse);
...
So finally, after some research and debugging in the past two weeks, I've managed to write a working example code that uses DIGEST-MD5 authentication with WinLDAP's ldap_sasl_bind_s function. The corresponding RFC, this answer and the official SSPI documentation gave me a lot of helps.
Some gotchas that I ran into:
Regardless what documentation says about the ldap_connect function: If you would like to use the ldap_sasl_bind_s function it is not just a "good programming practice" to call it first, it is necessary. Without it the ldap_sasl_bind_s returns with LDAP_SERVER_DOWN (0x51) error code.
The valid pszTargetName (digest-uri) parameter is crucial for the InitializeSecurityContext function to avoid invalid token error.
I hope it will help others to spend less time about figuring out how to use SASL binding mechanisms with WinLDAP.
#include <stdio.h>
#include <windows.h>
#include <winldap.h>
#define SECURITY_WIN32 1
#include <security.h>
#include <sspi.h>
int _tmain(int argc, _TCHAR* argv[]) {
LDAP *ld;
int rc = 0;
const int version = LDAP_VERSION3;
SEC_WINNT_AUTH_IDENTITY wincreds;
struct berval *servresp = NULL;
SECURITY_STATUS res;
CredHandle credhandle;
CtxtHandle newhandle;
SecBufferDesc OutBuffDesc;
SecBuffer OutSecBuff;
SecBufferDesc InBuffDesc;
SecBuffer InSecBuff;
unsigned long contextattr;
ZeroMemory(&wincreds, sizeof(wincreds));
// Set credential information
wincreds.User = (unsigned short *)L"root";
wincreds.UserLength = 4;
wincreds.Password = (unsigned short *)L"p#ssword";
wincreds.PasswordLength = 8;
wincreds.Domain = NULL;
wincreds.DomainLength = 0;
wincreds.Flags = SEC_WINNT_AUTH_IDENTITY_UNICODE;
res = AcquireCredentialsHandle(NULL, L"WDigest", SECPKG_CRED_OUTBOUND,
NULL, &wincreds, NULL, NULL, &credhandle, NULL);
// Buffer for the output token.
OutBuffDesc.ulVersion = 0;
OutBuffDesc.cBuffers = 1;
OutBuffDesc.pBuffers = &OutSecBuff;
OutSecBuff.BufferType = SECBUFFER_TOKEN;
OutSecBuff.pvBuffer = NULL;
ld = ldap_init(L"localhost", LDAP_PORT);
rc = ldap_set_option(ld, LDAP_OPT_PROTOCOL_VERSION, (void*)&version);
rc = ldap_connect(ld, NULL); // Need to connect before SASL bind!
do {
if (servresp != NULL) {
InBuffDesc.ulVersion = 0;
InBuffDesc.cBuffers = 1;
InBuffDesc.pBuffers = &InSecBuff;
/* The digest-challenge will be passed as an input buffer to
InitializeSecurityContext function */
InSecBuff.cbBuffer = servresp->bv_len;
InSecBuff.BufferType = SECBUFFER_TOKEN;
InSecBuff.pvBuffer = servresp->bv_val;
/* The OutBuffDesc will contain the digest-response. */
res = InitializeSecurityContext(&credhandle, &newhandle, L"ldap/localhost", ISC_REQ_MUTUAL_AUTH | ISC_REQ_ALLOCATE_MEMORY,
0, 0, &InBuffDesc, 0, &newhandle, &OutBuffDesc, &contextattr, NULL);
}
else {
res = InitializeSecurityContext(&credhandle, NULL, L"ldap/localhost", ISC_REQ_MUTUAL_AUTH, 0, 0, NULL, 0, &newhandle, &OutBuffDesc, &contextattr, NULL);
}
switch (res) {
case SEC_I_COMPLETE_NEEDED:
case SEC_I_COMPLETE_AND_CONTINUE:
case SEC_E_OK:
case SEC_I_CONTINUE_NEEDED:
break;
case SEC_E_INVALID_HANDLE:
return -2;
case SEC_E_INVALID_TOKEN:
return -1;
default:
break;
}
struct berval cred;
cred.bv_len = OutSecBuff.cbBuffer;
/* The digest-response will be passed to the server
as credential after the second (loop)run. */
cred.bv_val = (char *)OutSecBuff.pvBuffer;
// The servresp will contain the digest-challange after the first call.
rc = ldap_sasl_bind_s(ld, L"", L"DIGEST-MD5", &cred, NULL, NULL, &servresp);
ldap_get_option(ld, LDAP_OPT_ERROR_NUMBER, &res)
} while (res == LDAP_SASL_BIND_IN_PROGRESS);
if (rc != LDAP_SUCCESS) {
printf("Bind failed with 0x%x\n", rc);
} else {
printf("Bind succeeded\n");
}
return 0;
}
I have a some multithreading application. This is a part of main function:
/* ...some code before... */
for(i=0; i<THREADS_COUNT; i++){
status = pthread_create(&threads[i], NULL, thread_main, NULL);
if(status < 0){
fprintf(stderr, "threads error\n");
exit(2);
}
}
status = sem_init(&sem, 0, 0);
if(status < 0){
fprintf(stderr, "sem_init error\n");
exit(4);
}
/* recv loop */
while (1) {
rv = recv(fd, buf, BUFSIZE, 0);
if(rv >= 0){
current = malloc(sizeof(struct queue_msg_list));
/* adding to our local queue */
if(current != NULL){
current->rv = rv;
current->h = h;
memcpy(&(current->buf), &buf, BUFSIZE);
current->next = NULL;
if(main_head == NULL){
main_head = main_tail = current;
}
else {
main_tail->next = current;
main_tail = current;
}
count++;
}
/* if we can carry the local queue to the queue for threads then we are doing it */
if(!pthread_mutex_trylock(&mlock)){
if(thread_head == NULL){
/* if the threads-queue is empty then replace queues */
thread_head = main_head;
thread_tail = main_tail;
} else {
/* if the threads-queue is not empty then add the local queue to the threads-queue */
thread_tail->next = main_head;
thread_tail = main_tail;
}
/* we increasing a semaphore of number of added elements */
for(i=0; i<count; i++){
sem_post(&sem);
printf("sem_post \n");
}
count = 0;
pthread_mutex_unlock(&mlock);
main_head = NULL;
main_tail = NULL;
}
}
}
/* ...some code after... */
And this is a function for threads:
void *thread_main(void *arg)
{
struct queue_msg_list *current;
char buf[BUFSIZE] __attribute__ ((aligned));
struct nfq_handle *h;
int rv;
while(1){
sem_wait(&sem);
pthread_mutex_lock(&mlock);
/* if no blocking then we are working with packet and removing it from list after */
current = thread_head;
rv = current->rv;
h = current->h;
memcpy(&buf, &(current->buf), BUFSIZE);
thread_head = thread_head->next;
pthread_mutex_unlock(&mlock);
nfq_handle_packet(h, buf, rv);
free(current);
}
}
This application always works true on PC. I have put this application to some router (linux kernel version in firmware is 2.6.30). It works correctly sometimes but sometimes it works incorrectly. :)
Threads hang on calling sem_wait(&sem); sometimes but the semaphore value is above zero (1, 2, 3, etc). Why?
P.S. I tried to check the return value of sem_wait(&sem); but did not get it.
code:
local void*
s_accept_connections(tmpsock)
void* tmpsock;
{
int32_t newfd;
int32_t tmp;
SOCKADDR_IN newsockaddr;
pthread_t id;
Connection* newconn;
const char *s;
char **splited;
int i;
StringVec *p;
StringVec* next;
Socket* sock;
tmp = sizeof(newsockaddr);
p = NULL;
next = NULL;
sock = (Socket *)tmpsock;
if (!sock)
return 0;
while (true){
newfd = accept(sock->fd,(SOCKADDR *)&newsockaddr,&tmp);
if (newfd <0){
if (check_error_async()){
pthread_mutex_lock(&g_socket_mutex);
#ifdef _WIN32
Sleep(1000);
#else
sleep(1);
#endif
pthread_mutex_unlock(&g_socket_mutex);
continue;
}
}else{
newconn = (Connection *)MyMalloc(sizeof(*newconn));
newconn->fd = newfd;
newconn->addr = newsockaddr;
s = (const char *)inet_ntoa(newsockaddr.sin_addr);
p = split_string(s,".");
if (p != NULL){
splited = (char **)MyMalloc(sizeof(*splited) + 12);
i = 0;
for (; p != NULL; p = next){
if (p && p->next){
next = p->next;
}else{ break; }
splited[i] = p->value;
i++;
}
newconn->ip = swap_uint32_t((uint32_t)(atoi(splited[0])) + (atoi(splited[1]) << 8) + (atoi(splited[2]) << 16) + (atoi(splited[3]) << 24));
MyFree((char *)splited);
}else{
newconn->ip = 0;
}
newconn->closed = false;
newconn->state = 0;
newconn->state |= S_NEED_LOGIN;
pthread_mutex_init(&g_ping_mutex,NULL);
pthread_cond_init(&g_ping_cond,NULL);
pthread_create(&id,NULL,s_ping_thread,(void *)newconn);
a_conn(&sock->conn,newconn);
#ifndef NDEBUG
_("Accepting connection...\n");
#endif
if (sock->has_callback){
sock->func(newconn);
#ifndef NDEBUG
_("Accepted connection\n");
#endif
}
}
}
return 0;
}
void
start_accept(sock,join)
Socket* sock;
bool join;
{
pthread_t id;
pthread_attr_t attr;
if (!sock)
return;
if (!sock->conn){
sock->conn = (Connection *)MyMalloc(sizeof(*sock->conn));
if (!sock->conn)
return;
}
set_nonblocking(sock->fd);
set_nonblocking(sock->conn->fd);
pthread_attr_init(&attr);
pthread_mutex_init(&g_socket_mutex,NULL);
if (join){
pthread_attr_setdetachstate(&attr,PTHREAD_CREATE_JOINABLE);
}else{
pthread_attr_setdetachstate(&attr,PTHREAD_CREATE_DETACHED);
}
pthread_create(&id,&attr,s_accept_connections,sock);
if (join){
pthread_join(id,NULL);
pthread_attr_destroy(&attr);
pthread_mutex_destroy(&g_socket_mutex);
}
}
It simply gives 100% cpu, any ideas? if more code needed, ill post
What makes you believe that pthread_mutex_lock() is responsible for the CPU usage ?
Use a debugger to find out what is happening.
My guess is there is something wrong with your socket, making your accept() call non-blocking.
Check the return value/message (with perror() if you are running linux).
EDIT :
You need to know which piece of code is looping a debugger can help you to find this.
You have a while(true) loop that is very likely to be responsible for the enless loop and the 100% CPU usage. It should be ok since you have a call to accept() (here :newfd = accept(sock->fd,(SOCKADDR *)&newsockaddr,&tmp);) that is supposed to stop the thread/process until the next client connection. But if your socket is not correctly initialized accept() may return an error without waiting.