I am working on threaded TCP socket server for handling multiple socket client connection. Clients can connect and disconnect asynchronously with server, upon connection, the client should send some data in predefined custom packet protocol format.
The protocol has start of frame(SOP) and end of frame (EOP) defined.
I have written a C code such that for each successful client connection, a thread gets created that keeps on receiving the bytes from client in the predefined packet format, the thread has a thread-local state machine because each client can connect asynchronously so the states for each client may be different.
Below is the thread that receives that data from client and maintains a state based on the type of byte received:
static void *receive_handler(void *args) {
struct thread_args *local_args = args;
struct sockaddr_in6 *client_address = local_args->client_address;
//struct itimerval timer_val;
int32_t conn_fd = local_args->conn_fd;
int32_t val_read = 0;
int32_t resp_code = 0;
uint32_t sendBuffLen = 0;
int8_t buffer[BUFFER_SIZE] = { 0 };
uint8_t RetBuff[1024] = { 0 };
int8_t rx_addr_str[INET6_ADDRSTRLEN];
int8_t byte = 0;
int16_t idx = ePacketType;
int16_t packet_len = 0;
int16_t calculated_crc = 0, recv_crc = 0;
uint16_t num_bytes = 0;
memset(rx_addr_str, 0, INET6_ADDRSTRLEN);
inet_ntop(AF_INET6, &(client_address->sin6_addr), rx_addr_str, INET6_ADDRSTRLEN);
printf("\nRx Thread (%d) Created for %s\n", local_args->connection_no, rx_addr_str);
int eState = eStart_Frame;
memcpy(rx_Packet_Info[local_args->connection_no].inet6, rx_addr_str, INET6_ADDRSTRLEN);
//timerclear(&timer_val.it_interval); /* zero interval means no reset of timer */
//timerclear(&timer_val.it_value);
//timer_val.it_value.tv_sec = 10; /* 10 second timeout */
//(void) signal(SIGALRM, state_reset_handler);
while (1) {
if (eState != eChecksum_Verify) {
val_read = -1;
val_read = recv(conn_fd, &byte, sizeof(byte), 0);
debug_printf(INFO, "Amount Read: %d Byte Rxd: 0x%x => 0x%X\n", val_read, (byte & 0xFF), byte);
if (val_read <= 0) {
if (parse_packet("ERR_DISCONNECT", rx_addr_str, local_args->connection_no) < 0) {
debug_printf(ERR, "Error parsing packet: %s\n", strerror(errno));
}
debug_printf(ERR, "May be closed by client %s: %s\n", rx_addr_str, strerror(errno));
debug_printf(ERR, "Exiting Rx Thread: ConnIdx: %d", num_connections);
close(conn_fd);
pthread_exit(NULL);
}
}
switch (eState) {
case eStart_Frame:
debug_printf(DEBG, "Current State: %d\n", eState);
if ((val_read > 0) && (byte & 0xFF) == SOP) {
memset(buffer, 0, BUFFER_SIZE);
val_read = -1;
buffer[eSOP] = (byte & 0xFF);
eState = eFrame_Len;
}
break;
case eFrame_Len: {
static char MSB_Rxd = 0;
debug_printf(DEBG, "Current State: %d\n", eState);
if (val_read > 0) {
if (MSB_Rxd == 0) {
buffer[ePacket_length] = byte;
MSB_Rxd = 1;
}
else {
buffer[ePacket_length + 1] = byte;
eState = eFrame;
num_bytes = 0;
MSB_Rxd = 0;
packet_len = (buffer[ePacket_length] & 0xFF << 8) | (buffer[ePacket_length + 1]);
debug_printf(INFO, "Packet Length: %d : 0x%x 0x%x\n", packet_len,
buffer[ePacket_length], buffer[ePacket_length + 1]);
}
}
}
break;
case eFrame:
debug_printf(DEBG, "Current State: %d\n", eState);
num_bytes++;
buffer[idx] = byte;
if (num_bytes == packet_len) {
eState = eEnd_Frame;
debug_printf(DEBG, "Num bytes: 0x%x\n", num_bytes);
}
else {
debug_printf(ERR, "Num bytes: 0x%x Pkt Len: 0x%x\n", num_bytes, packet_len);
}
idx++;
break;
case eEnd_Frame:
debug_printf(ERR, "Current State: %d val read %d\n", eState, val_read);
if ((val_read > 0) && (byte & 0xFF) == EOP) {
val_read = -1;
eState = eChecksum_Verify;
}
break;
case eChecksum_Verify: {
calculated_crc = crc_16(&buffer[ePacket_length], (num_bytes));
recv_crc = buffer[num_bytes + 1] << 8 | (buffer[num_bytes + 2] & 0xFF);
if (calculated_crc != recv_crc) {
debug_printf(ERR, "CRC Error! CRC do not match!!\n");
debug_printf(ERR, "Calculated CRC: 0x%X\nCRC Rxd: 0x%X\n", calculated_crc, recv_crc);
resp_code = CRC_ERR;
send(conn_fd, &resp_code, sizeof(resp_code), 0);
}
else {
if (rx_Packet_Info[local_args->connection_no].packetUUID != NULL) {
free(rx_Packet_Info[local_args->connection_no].packetUUID);
rx_Packet_Info[local_args->connection_no].packetUUID = NULL;
}
rx_Packet_Info[local_args->connection_no].packetUUID = calloc(buffer[ePacketUUIDLen],
sizeof(uint8_t));
memcpy(rx_Packet_Info[local_args->connection_no].packetUUID, &buffer[ePacketUUID],
buffer[ePacketUUIDLen]);
rx_Packet_Info[local_args->connection_no].packetUUIDlength = buffer[ePacketUUIDLen];
printf("\nRX-Thread-UUID %d: ConnNo: %d\n", buffer[ePacketUUIDLen],
local_args->connection_no);
for (char i = 0; i < buffer[ePacketUUIDLen]; i++) {
printf("0x%x ", rx_Packet_Info[local_args->connection_no].packetUUID[i]);
}
printf("\n");
if (parse_packet(buffer, rx_addr_str, local_args->connection_no) < 0) {
debug_printf(ERR, "Error parsing packet: %s\n", strerror(errno));
}
}
num_bytes = 0;
eState = eStart_Frame;
idx = ePacketType;
}
break;
default:
debug_printf(DEBG, "Invalid State!! Should not come here.\n");
num_bytes = 0;
eState = eStart_Frame;
idx = ePacketType;
break;
}
}
return NULL;
}
My question is how should I reset this state machine if let's say after receiving start of frame the client gets stuck and is not able to send frame length or complete frame till end of frame?
One way I thought is to implement timer callback but I am not sure how should I keep track of state machine of multiple threads.
Can any one please suggest what should I do in this scenario or if I am doing anything wrong?
If I'm parsing the question correctly, you're asking about how to handle gracefully the situation where the connecting client isn't sending data in a timely manner -- i.e. it has sent the first part of a message, but (due to a network problem or a client-side bug or whatever) never sends the rest, leaving your server-side I/O thread blocked inside a recv() call for a long/indefinite time.
If so, the first question to ask is: is this really a problem? If each connection gets its own thread, then having one particular thread/connection blocked shouldn't cause any issues to the other threads, since they all execute independently of each other. So maybe you can just ignore the problem entirely?
However, the more likely answer is that ignoring the problem isn't quite good enough, because of a couple of subsequent problems that aren't easily ignorable: (a) what if too many client connections "freeze up" at the same time? One or two stalled TCP connections/threads isn't a big deal, but if the same problem keeps happening, eventually you'll run out of resources to spawn more threads or TCP connections, and then your server can no longer function. And (b) what if the server process wants to quit now? (i.e. because the server's user has sent it a SIGINT interrupt or similar) If one or more threads are blocked indefinitely, then it is impossible for the server to exit in a timely-and-controlled manner, because the main thread needs to wait for all the TCP threads to exit first before it can clean up its process-wide resources, and any blocked threads will not exit for a long time, if ever.
So, assuming that the problem does need to be addressed, the most reliable way I've found to address it is to never block in recv() (or send()) in the first place. Instead, make sure to put each socket in non-blocking mode, and have the thread's while-loop block only in a select() call instead. Doing it this way makes your state machine a bit more complex (since it will now have to handle partial-sends as well as partial-receives), but the compensating benefit is that the thread is now in better control of its own blocking behavior. In particular, you can tell select() to always return after a certain amount of time, no matter what, and (better yet) you can tell select() to return whenever any of a number of sockets has bytes ready to be read on it. That means that if your main thread wants to exit, it can use a pipe() or socketpair() to send a dummy-byte to each TCP thread, and the TCP thread (which is presumably blocked inside select(), waiting for either data from its client or from the pipe/socketpair socket) will immediately return from select(), see that the main thread has sent it a byte, and respond by exiting immediately.
That should be sufficient -- in my experience it is better not to impose fixed timeouts if you can avoid it, since it's hard to predict what network performance will be like in all cases, and any rule-of-thumb you might come up with (like "a client that doesn't send the whole message in 5 seconds must be broken") is likely to be wrong, and you'll end up with false-positive problems if you try to enforce that rule. Better to just let each client take as long as it wants/needs to, while also having a mechanism by which the main thread can request that a particular client thread exit immediately if/when that becomes necessary (e.g. during server-process shutdown, or if there are too many TCP threads active and you want to prune some of the old/inactive ones before spawning more)
Related
I'm new to signals, I'm trying to set SIGALRM on UDP echo service, as a socket programming practice.
So here I have a UDP socket, the client sends a string to server and waits for response (any response, here the string is echoed by server).
The goals is to set SIGALRM and let the client resend the string a few times if no responses were made by server or UDP packets get lost.
Here, I used a small sample and simplified long lines with ..., you can get more details on my github repo (line 51)
sigALRM-Client.c
unsigned int tries = 0;
void CatchAlarm()
{
tries += 1;
}
int main(int argc, char **argv)
{
// SKIPPED
// ...
struct sigaction handler;
handler.sa_handler = CatchAlarm;
handler.sa_flags = 0;
if(sigfillset(&handler.sa_mask) < 0)
return 1;
if(sigaction(SIGALRM, &handler, 0) < 0)
return 2;
ssize_t bytes;
bytes = sendto(servSock,...);
while((bytes = recvfrom(servSock,...)) < 0) {
// alarm went off
if(errno == EINTR) {
// try 5 times
if(tries < 5) {
bytes = sendto(servSock,...);
} else {
fprintf(stdout, "no response, waiting...\n");
}
} else {
fprintf(stdout, "failed to get data\n");
return 3;
}
}
// recvfrom() got something, cancel timeout
alarm(0);
fprintf(stdout, "received %d bytes of data\n", bytes);
close(servSock);
}
When I run the client, it won't receive SIGALRM signal and UDP packets get lost in first attempt?!
Client won't retry sending string then exit after 5 attempts, instead, it waits for server response forever!
What prevents client to get SIGALRM?
Did I miss something here?
Your code in the GitHub repo never calls alarm() with a non-zero number. You'll never get an alarm signal delivered automatically unless you actually request one. Relying on some other process to send your process an alarm signal isn't resilient.
I have multiple slaves on a RS485 bus. I have been using pymodbus so far but I'm not quite happy with it's performance and other issues. So I wanted to to test libmodus and use that instead.
I wrote a minimal program that reads the model number of my slaves
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdint.h>
#include <modbus.h>
#include <errno.h>
char *bigendian_vec_to_str(uint16_t *vec, size_t vec_size, char *buff, size_t buff_len)
{
memset(buff, 0, sizeof *buff * buff_len);
int i;
for(i = 0; i < vec_size; ++i)
{
uint16_t fl = vec[i] >> 8;
uint16_t sl = vec[i] & 0xff;
if(2*i >= buff_len - 1)
return buff;
if(fl == 0)
return buff;
buff[2 * i] = fl;
if(2*i + 1 >= buff_len - 1)
return buff;
if(sl == 0)
return buff;
buff[2 * i + 1] = sl;
}
return buff;
}
char *get_model_name_of(modbus_t *modbus, int slave, char *buff, size_t buff_len)
{
modbus_flush(modbus);
modbus_set_slave(modbus, slave);
int rc;
uint16_t reg[9];
memset(reg, 0, sizeof reg);
rc = modbus_read_registers(modbus, 0xe, 8, reg);
if (rc == -1) {
fprintf(stderr, "Error %d while reading: %s\n", errno, modbus_strerror(errno));
return NULL;
}
return bigendian_vec_to_str(reg, 8, buff, buff_len);
}
int main(void)
{
modbus_t *modbus = modbus_new_rtu("/dev/ttyUSB0", 9600, 'N', 8, 1);
modbus_rtu_set_serial_mode(modbus, MODBUS_RTU_RS485);
if (modbus_connect(modbus) == -1) {
fprintf(stderr, "Connexion failed: %s\n", modbus_strerror(errno));
modbus_free(modbus);
return -1;
}
char buff[1024];
int i;
for(i = 2; i < 5; ++i)
{
printf("Model of slave %d: %s\n", i, get_model_name_of(modbus, i, buff, sizeof buff));
}
modbus_free(modbus);
return 0;
}
When I ran this code I got
Model of slave 2: LEFS25B-600
Error 110 while reading: Connection timed out
Model of slave 3: (null)
Model of slave 4: LEHF10K2-16
and it seemed strange that the 2nd module was not responding. So I looped get_model_name_of through 2,3,4,2,3,4,2,3,4.... and every second read attempt ended with Error 110 while reading: Connection timed out. After the iine modbus_set_slave(modbus, slave); I added
usleep(0.005 * 1000000);
and then I didn't get timeouts anymore. I read the man pages twice and I didn't find anything warning me about this. I also searched google but none of the "similar" threads I found were of any help.
What is the best way to deal with multiple slaves? Why does adding a sleep of half of milisecond help here? I mean the code on libmodus does
static int _modbus_set_slave(modbus_t *ctx, int slave)
{
/* Broadcast address is 0 (MODBUS_BROADCAST_ADDRESS) */
if (slave >= 0 && slave <= 247) {
ctx->slave = slave;
} else {
errno = EINVAL;
return -1;
}
return 0;
}
is setting an internal value in the context. Are there any time constraints between the change of an internal value in the context and reading/writing to the bus? If so, how long should I wait after a set_slave? Why does libmodbus set the slave id globally instead of having it as a parameter in the read/write method as other libraries (like pymodbus) do?
Or am I using this API just incorrectly?
Thanks
I may be wrong.. but.. as I understand it. The modbus master sends out a request, targeted at a specfic slave number. The intention is to recieve a reply from the targeted slave and then send a request to the next slave and await a reply from second slave. If the requests are sent out without waiting for reply from the first slave.. then there is a possibility to miss the reply from the second slave(or third or whatever number slave) , while the first slave reply is being sent and recieved by the master.
I am not good in C programming.. but I recommend you check this..as I think that may be why you adding a delay seems to help... ( Also.. part of Modbus protocol does require a pause in signal transmission to define start and end of transmission.)
If I am correct , then the use of a delay will only work well if you know the size of data being sent and the time to calculate a response..For other situations a handshake of some kind would be safe.. Such as read a coil.. that indicates whether data is refreshed and ready to be read from the slave as a possible traffic light . to control timing of requests going to other slave and to avoid collision of responses.
Again.. I am not good in C and if I have misinterpreted the program.. please ignore what I have said.. If it helps.. I would be happy hear.
Peter
I Have a typical client server C socket program.
SERVER:
if(ndp.cmd == 11)
{
//ack1 = 0;
puts("Query Command for Light 2");
pthread_mutex_lock(&lock);
// ..Some critical stuff
pthread_mutex_unlock(&lock);
printf("ID: %d, Level: %d\n", new.address, new.level);
ack1 = new.level; //This result is showing correct on server
ack1 = htonl(ack1);
send(client_sock, &ack1, sizeof(ack1), 0);
}
CLIENT:
printf("Query Actual level Light2\n");
dp.id = 2;dp.cmd = 11; dp.active = 0; dp.level = 0; dp.group = 0;
if( send(sock , &dp , sizeof(dp) , 0) < 0) { puts("Send failed"); }
sleep(1);
ret = recv(sock , &level1 , sizeof(level1) , 0);
printf("Number of bytes received: %d\n", ret); //Always gives 4
fflush(stdout);
printf("Light level %x\n", ntohl(level1) ); //This prints 0 (incorrect)
fflush(stdout);
sleep(5);
printf("Query Actual level Light2\n");
dp.id = 2;dp.cmd = 11; dp.active = 0; dp.level = 0; dp.group = 0;
if( send(sock , &dp , sizeof(dp) , 0) < 0) { puts("Send failed"); }
sleep(1);
ret = recv(sock , &level2 , sizeof(level2) , 0);
printf("Number of bytes received: %d\n", ret); // Always gives 4
fflush(stdout);
printf("Light level %x\n", ntohl(level2) ); //This prints correct value
fflush(stdout);
close(sock);
Expected output:
Light level 32 (hex value for 50);
Light level 32
Actual output:
Light level 0
Light level 32
So the problem is on same requests, 1st send from server is not received by client, but next recv() gives correct value.
Why is the data from 1st recv() getting lost, is it getting buffered and maybe I am getting the previous value in 2nd recv() ?
Please help.
Your assumption is not wrong. It may be buffered. Also note that when the receiver reads data faster than the sender does send it, some recv calls may read no data since the sender did not send any data yet. That could be the cause of what you're describing.
UPDATE: If you plan to always send/receive the same amount of data, for example a 20-byte packet containing what you defined as a command, then you can wrap your send and recv in a loop. This loop will write (or read) a single command at a time. You cannot assume a call to send or recv will succeed, so you MUST check for errors as #Joachim mentioned in comments. Another thing to note is that recv MAY read less bytes than the specified, so you may have to read multiple times in order to receive the complete command. The same applies to send.
You may use select/poll mechanism to check if there's new data has arrived to socket and waiting to be read properly. You can also check the amount of data has come to socket by invoking an ioctl call as the following:
{
int size = 0;
ioctl(socket, FIONREAD, &size);
if(size > 0)
{
// do recv operation(s)
}
}
I am attempting to have a TCP communication from my laptop to a SoC board where I send one message to initiate a process and It sends me a series of status messages until it has completed the process. This means that I would call send() multiple times over the socket from the SoC. The problem is that my laptop does not receive any status message unless the socket connection is closed by the SoC. As soon as the socket closes, I get a burst of all the data that the SoC sent. Is there a way to make this communication realtime so that I get the status messages immediately as it sends?
Code on the SoC that sends the data: This function is called multiple times:
INT16 upload(INT16* socket, INT8 HOB, INT8 LOB, INT8 msg_type, INT8* data) {
INT8* Tx_Data= NULL;
INT8 size = 0;
INT16 num= 0;
int temp = 0;
size = 256*HOB + LOB + NAME_SIZE + 3;
Tx_Data = (INT8*) malloc(size*sizeof(INT8));
for (num=0;num<NAME_SIZE ;num++) {
Tx_Data[num] = NAME[num];
}
Tx_Data[num++] = HOB;
Tx_Data[num++] = LOB;
Tx_Data[num++] = msg_type; // 11 byte header ends here
for(temp = 0;num<size;num++,temp++) {
Tx_Data[num] = data[temp]; // data
}
send(*socket, (INT8*)Tx_Data,num,0);
return PASS;
}
Code on my Laptop:
for(;;) {
printf("Start of for loop socket id: %d \n",socket_id);
RX_Data = (unsigned char*)malloc(2048);
unsigned char Command;
int No_of_Data_Bytes=0;
printf("Before read.... \n");
nbytes = read(socket_id,RX_Data,11); // get 11 bytes from EZ80
printf("Received %d bytes from socket\n",nbytes);
No_of_Data_Bytes = RX_Data[8]*256 + RX_Data[9];
Command = RX_Data[10];
printf("\n command:%c %d \n",Command,No_of_Data_Bytes);
if(Command=='I' || Command=='E'|| Command=='V') {
//read and analyse data
}
else {
break;
}
Command = '\0';
free(RX_Data);
}
In Linux you could just use fdopen, write and flush the stream. Not sure if it's applicable in your case however. Example:
FILE *f = fdopen(socketdescriptor, "w+");
. . .
n = write(socketdescriptor, "this is a message", 17);
fflush(f);
...
The reason for this behaviour is Nagel algorithm that is enabled on a socket by default. You can disable it via setsockopt() call.
You can use fflush() or just add '\n' character to the end of string being writen into the socket, but this will flush only stdio buffers in userland, which is not what you aim at.
I just wondered about how Instant Messengers and Online Games can accept and deliver messages so fast. (Network programming with sockets)
I read about that this is done with nonblocking sockets.
I tried blocking sockets with pthreads (each client gets its own thread) and nonblocking sockets with kqueue.Then I profiled both servers with a program which made 99 connections (each connection in one thread) and then writes some garbage to it (with a sleep of 1 second). When all threads are set up, I measured in the main thread how long it took to get a connection from the server (with wall clock time) (while "99 users" are writing to it).
threads (avg): 0.000350 // only small difference to kqueue
kqueue (avg): 0.000300 // and this is not even stable (client side)
The problem is, while testing with kqueue I got multiple times a SIGPIPE error (client-side). (With a little timeout usleep(50) this error was fixed). I think this is really bad because a server should be capable to handle thousands of connections. (Or is it my fault on the client side?) The crazy thing about this is the infamous pthread approach did just fine (with and without timeout).
So my question is: how can you build a stable socket server in C which can handle thousands of clients "asynchronously"? I only see the threads approach as a good thing, but this is considered bad practice.
Greetings
EDIT:
My test code:
double get_wall_time(){
struct timeval time;
if (gettimeofday(&time,NULL)){
// Handle error
return 0;
}
return (double)time.tv_sec + (double)time.tv_usec * .000001;
}
#define NTHREADS 100
volatile unsigned n_threads = 0;
volatile unsigned n_writes = 0;
pthread_mutex_t main_ready;
pthread_mutex_t stop_mtx;
volatile bool running = true;
void stop(void)
{
pthread_mutex_lock(&stop_mtx);
running = false;
pthread_mutex_unlock(&stop_mtx);
}
bool shouldRun(void)
{
bool copy;
pthread_mutex_lock(&stop_mtx);
copy = running;
pthread_mutex_unlock(&stop_mtx);
return copy;
}
#define TARGET_HOST "localhost"
#define TARGET_PORT "1336"
void *thread(void *args)
{
char tmp = 0x01;
if (__sync_add_and_fetch(&n_threads, 1) == NTHREADS) {
pthread_mutex_unlock(&main_ready);
fprintf(stderr, "All %u Threads are ready...\n", (unsigned)n_threads);
}
int fd = socket(res->ai_family, SOCK_STREAM, res->ai_protocol);
if (connect(fd, res->ai_addr, res->ai_addrlen) != 0) {
socket_close(fd);
fd = -1;
}
if (fd <= 0) {
fprintf(stderr, "socket_create failed\n");
}
if (write(fd, &tmp, 1) <= 0) {
fprintf(stderr, "pre-write failed\n");
}
do {
/* Write some garbage */
if (write(fd, &tmp, 1) <= 0) {
fprintf(stderr, "in-write failed\n");
break;
}
__sync_add_and_fetch(&n_writes, 1);
/* Wait some time */
usleep(500);
} while (shouldRun());
socket_close(fd);
return NULL;
}
int main(int argc, const char * argv[])
{
pthread_t threads[NTHREADS];
pthread_mutex_init(&main_ready, NULL);
pthread_mutex_lock(&main_ready);
pthread_mutex_init(&stop_mtx, NULL);
bzero((char *)&hint, sizeof(hint));
hint.ai_socktype = SOCK_STREAM;
hint.ai_family = AF_INET;
if (getaddrinfo(TARGET_HOST, TARGET_PORT, &hint, &res) != 0) {
return -1;
}
for (int i = 0; i < NTHREADS; ++i) {
pthread_create(&threads[i], NULL, thread, NULL);
}
/* wait for all threads to be set up */
pthread_mutex_lock(&main_ready);
fprintf(stderr, "Main thread is ready...\n");
{
double start, end;
int fd;
start = get_wall_time();
fd = socket(res->ai_family, SOCK_STREAM, res->ai_protocol);
if (connect(fd, res->ai_addr, res->ai_addrlen) != 0) {
socket_close(fd);
fd = -1;
}
end = get_wall_time();
if (fd > 0) {
fprintf(stderr, "Took %f ms\n", (end - start) * 1000);
socket_close(fd);
}
}
/* Stop all running threads */
stop();
/* Waiting for termination */
for (int i = 0; i < NTHREADS; ++i) {
pthread_join(threads[i], NULL);
}
fprintf(stderr, "Performed %u successfull writes\n", (unsigned)n_writes);
/* Lol.. */
freeaddrinfo(res);
return 0;
}
SIGPIPE comes when I try to connect to the kqueue server (after 10 connections are made, the server is "stuck"?). And when too many users are writing stuff, the server cannot open a new connection. (kqueue server code from http://eradman.com/posts/kqueue-tcp.html)
SIGPIPE means you're trying to write to a socket (or pipe) where the other end has already been closed (so noone will be able to read it). If you don't care about that, you can ignore SIGPIPE signals (call signal(SIGPIPE, SIG_IGN)) and the signals won't be a problem. Of course the write (or send) calls on the sockets will still be failing (with EPIPE), so you need to make you code robust enough to deal with that.
The reason that SIGPIPE normally kills the process is that its too easy to write programs that ignore errors on write/send calls and run amok using up 100% of CPU time otherwise. As long as you carefully always check for errors and deal with them, you can safely ignore SIGPIPEs
Or is it my fault?
It was your fault. TCP works. Most probably you didn't read all the data that was sent.
And when too many users are writing stuff, the server cannot open a new connection
Servers don't open connections. Clients open connections. Servers accept connections. If your server stops doing that, there something wrong with your accept loop. It should only do two things: accept a connection, and start a thread.