Here's a minimal example of what I'm trying to do. Right now you'll see, if you run this, that the example takes a little more than 10 seconds to complete. It should take less than 2 seconds. The problem is that there is a race condition. The loop takes too long and the SetEvent occurs before the WaitForSingle object can get it. What would be nice is if the event could trigger but the WaitForSingleObject could still know it triggered somehow.
What happens here is that data is generated which could take a long time. That data is then sent across the network which could take even longer. So I want to queue data to be sent then be about my merry way while another thread picks up the data and sends it out. That way I can queue infinitely until I have nothing left to do and then the thread joins to the sending thread until it's finished sending out all the network data.
#include <stdio.h>
#include <windows.h>
#include <unistd.h>
#define PIPE_NAME "\\\\.\\pipe\\testpipe"
void copy_protocol_buffer(struct protocol_buffer *in, struct protocol_buffer *out);
DWORD PipeClientStartSendBufferThread(struct client_pipe_settings *pipe_settings, LPDWORD lpThreadId);
DWORD InitializeClientPipeSettings(struct client_pipe_settings *pipe_settings);
void initialize_protocol_buffer(struct protocol_buffer *protocol_buffer);
struct protocol_buffer {
size_t length;
size_t location;
int data_type;
char *data;
struct protocol_buffer *next;
};
struct client_pipe_settings {
HANDLE hPipe;
LPCTSTR name;
DWORD pipe_timeout;
HANDLE write_event;
struct protocol_buffer *fifo;
HANDLE send_thread;
};
DWORD WINAPI PipeClientSendThread(LPVOID client_pipe_settings_object) {
struct client_pipe_settings *pipe_settings = (struct client_pipe_settings *)client_pipe_settings_object;
struct protocol_buffer *buf = NULL;
while(1) {
WaitForSingleObject(pipe_settings->write_event, 10000);
if (buf == NULL) {
buf = pipe_settings->fifo;
} else {
struct protocol_buffer *fifo_protocol_buffer = buf->next;
free(buf);
buf = fifo_protocol_buffer;
if(buf->length == 0)
//signal to quit
return 0;
}
//Send data over the network
Sleep(500);
}
return 0;
}
DWORD PipeQueueBuffer(struct client_pipe_settings *pipe_settings, struct protocol_buffer *buf)
{
struct protocol_buffer *out_protocol_buffer = (struct protocol_buffer *)malloc(sizeof *buf);
if(out_protocol_buffer == NULL)
exit(1);
copy_protocol_buffer(buf, out_protocol_buffer);
if (pipe_settings->fifo == NULL) {
pipe_settings->fifo = out_protocol_buffer;
}
else
{
struct protocol_buffer *last_protocol_buffer = pipe_settings->fifo;
while(last_protocol_buffer->next != NULL)
{
last_protocol_buffer = last_protocol_buffer->next;
}
last_protocol_buffer->next = out_protocol_buffer;
}
if(!SetEvent(pipe_settings->write_event))
return GetLastError();
return ERROR_SUCCESS;
}
int main(void) {
struct client_pipe_settings pipe_settings;
InitializeClientPipeSettings(&pipe_settings);
DWORD dwThreadId = 0;
PipeClientStartSendBufferThread(&pipe_settings, &dwThreadId);
//Generate data which could take a while
Sleep(1000);
struct protocol_buffer buf;
initialize_protocol_buffer(&buf);
buf.length = 5;
buf.data = (char *)malloc(5);
buf.data[0] = 'b';
buf.data[1] = 'l';
buf.data[2] = 'a';
buf.data[3] = 'h';
buf.data[4] = '\0';
PipeQueueBuffer(&pipe_settings, &buf);
Sleep(100);
PipeQueueBuffer(&pipe_settings, &buf);
buf.length = 0;
PipeQueueBuffer(&pipe_settings, &buf);
WaitForSingleObject(pipe_settings.send_thread, 100000);
}
DWORD InitializeClientPipeSettings(struct client_pipe_settings *pipe_settings)
{
pipe_settings->write_event = CreateEvent(NULL, 0, 0, NULL);
if(pipe_settings->write_event == NULL)
return GetLastError();
pipe_settings->hPipe = INVALID_HANDLE_VALUE;
pipe_settings->fifo = NULL;
pipe_settings->send_thread = NULL;
return ERROR_SUCCESS;
}
DWORD PipeClientStartSendBufferThread(struct client_pipe_settings *pipe_settings, LPDWORD lpThreadId)
{
HANDLE h = CreateThread(NULL, 0, PipeClientSendThread, pipe_settings, 0, lpThreadId);
if(h == NULL)
return GetLastError();
pipe_settings->send_thread = h;
return ERROR_SUCCESS;
}
void copy_protocol_buffer(struct protocol_buffer *in, struct protocol_buffer *out) {
out->data_type = in->data_type;
out->length = in->length;
out->location = in->location;
out->next = in->next;
out->data = (char*)malloc(in->length);
if (out->data == NULL) {
exit(1);
}
memcpy(out->data, in->data, in->length);
}
void initialize_protocol_buffer(struct protocol_buffer *protocol_buffer)
{
protocol_buffer->data = NULL;
protocol_buffer->length = 0;
protocol_buffer->location = 0;
protocol_buffer->next = NULL;
protocol_buffer->data_type = 0;
}
You mechanism is simply wrong. It isn't about a SetEvent that comes to early.
If the Event is set, it might be set "more than once". PipeClientSendThread should wait for the event, and if the event is set it should send all elements that have reached the queue. You code post 3 elements into the queue, but the event is set once, the thread runs and sends only one element a time, than the the next elements are only send when the timeout is reached....
Also you have a massive problem. You queue must be protected by a critical section or a mutex. You modify and loop over elements in the queue while another thread is also reading and modifying the queue.
Use a crtical section and a std::queue... this will also help you to get rid of the memory free/malloc stuff.
Related
I have to use a buffer of size max_cache_req_len to read in the value received from mq_receive. Here is my code that is receiving a value from shared memory and then placing it on a queue:
size_t received_bytes = 0;
char buffer[MAX_CACHE_REQUEST_LEN];
received_bytes = 0;
memset(&buffer, 0, MAX_CACHE_REQUEST_LEN);
received_bytes = mq_receive(mq, buffer, MAX_CACHE_REQUEST_LEN, NULL);
if (received_bytes != -1)
{
item_type *item = malloc(sizeof(item_type));
item->path = buffer;
pthread_mutex_lock(&queue_lock);
steque_enqueue(&queue, item);
pthread_cond_signal(&queue_cond);
pthread_mutex_unlock(&queue_lock);
}
Here is my code that is taking the item off the queue, and placing it into a char* value. When I print the path, I get "".
void *worker(void *arg)
{
item_type *new_item;
char *path;
int fd;
while (1)
{
pthread_mutex_lock(&queue_lock);
while (steque_isempty(&queue) == 1)
pthread_cond_wait(&queue_cond, &queue_lock);
new_item = steque_pop(&queue);
path = new_item->path;
free(new_item);
new_item = NULL;
pthread_mutex_unlock(&queue_lock);
fd = simplecache_get(path);
sleep(cache_delay);
printf("%d\n", fd);
printf("%s\n", path);
// MAKE WORKER THREAD TAKE
if (fd == CACHE_FAILURE)
{
}
else
{
}
}
}
If I hardcode something like:
item->path = "buffer";
Then it prints buffer from within my worker function. This is a multithreaded application, I am just unsure what to do with my char[size] array to transform it into a char* and allow it to transfer.
Nutshell:
(char*)&char[size] queued -> queue turns it into a void* -> popped off queue, turned into a char* and value is lost
I have this struct called String that contains a pointer to a char array and the length of that array, now when the callback function grabOutput is called it does copy the output into the ptr allocated buffer but the len is not change
#define BUF_SIZE 1024
typedef struct String
{
char * ptr;
int len;
} String;
HANDLE Child_IN_R = NULL;
HANDLE Child_IN_W = NULL;
HANDLE Child_OUT_R = NULL;
HANDLE Child_OUT_W = NULL;
HANDLE handle = NULL;
String str;
DWORD grabOutput(LPVOID lpParam )
{
BOOL success = FALSE;
DWORD dwRead, total =0;
char buffer[BUF_SIZE];
while(1)
{
success = ReadFile(Child_OUT_R, buffer, BUF_SIZE, &dwRead, NULL);
if (!success) break;
memcpy(str.ptr+total, buffer, dwRead);
total += dwRead;
str.ptr = realloc(str.ptr, total+BUF_SIZE);
}
str.len = total; // this does not change the value of str.len
return 0;
}
why doesn't this code change the value of str.len in the global String str?
I have created a list of an undefined size using malloc which keeps track of threads and their data. However, if I realloc while the threads are still running, they are no longer able to save their data as the struct's memory location has changed. My project is constantly adding/subtracting threads so I need realloc.
Code:
#include <windows.h>
#include <stdio.h>
typedef struct option_data {
char* contract[3];
} option_data;
typedef struct thread_data {
char* thread;
} thread_data;
DWORD WINAPI optionSymbol(void* dat) {
option_data* data = (option_data*)dat;
data->contract[0] = 6;
data->contract[1] = 7;
data->contract[2] = 5;
return 0;
}
int create_thread(void* data, void* dat2) {
HANDLE thread = CreateThread(NULL, 0, optionSymbol, data, 0, NULL);
thread_data* t_data = (thread_data*)dat2;
t_data->thread = thread;
return 0;
}
void reallocation(lista, sizeOfList)
{
char** list = lista;
char* listThingy = realloc(*list, sizeOfList * sizeof * list);
if (listThingy == NULL)
free(listThingy);
else
*list = listThingy;
}
int getChains(void)
{
option_data* optionDataList = malloc(sizeof(option_data));
thread_data* threadDataList = malloc(sizeof(thread_data));
create_thread(&optionDataList[0], &threadDataList[0]);
//reallocation(&optionDataList, 2)
//reallocation(*threadDataList, 2) <-- The code returns unpredictably when un-noting these two lines.
create_thread(&optionDataList[0], &threadDataList[0]);
WaitForSingleObject(threadDataList[0].thread, INFINITE);
CloseHandle(threadDataList[0].thread);
printf("%i", optionDataList[0].contract[0]);
return 0;
}
int main()
{
getChains();
return 0;
}
How would I realloc without changing the original memory location of the structs or send the new memory location to the threads?
i am trying to understand the round robin code for Mulltipath tcp scheduling . it is available here
https://github.com/multipath-tcp/mptcp/blob/mptcp_v0.95/net/mptcp/mptcp_rr.c
i had too much difficulties since there is no documentation
here is the code
`/* MPTCP Scheduler module selector. Highly inspired by tcp_cong.c */
#include <linux/module.h>
#include <net/mptcp.h>
static unsigned char num_segments __read_mostly = 1;
module_param(num_segments, byte, 0644);
MODULE_PARM_DESC(num_segments, "The number of consecutive segments that are part of a burst");
static bool cwnd_limited __read_mostly = 1;
module_param(cwnd_limited, bool, 0644);
MODULE_PARM_DESC(cwnd_limited, "if set to 1, the scheduler tries to fill the congestion-window on all subflows");
struct rrsched_priv {
unsigned char quota;
};
static struct rrsched_priv *rrsched_get_priv(const struct tcp_sock *tp)
{
return (struct rrsched_priv *)&tp->mptcp->mptcp_sched[0];
}
/* If the sub-socket sk available to send the skb? */
static bool mptcp_rr_is_available(const struct sock *sk, const struct sk_buff *skb,
bool zero_wnd_test, bool cwnd_test)
{
const struct tcp_sock *tp = tcp_sk(sk);
unsigned int space, in_flight;
/* Set of states for which we are allowed to send data */
if (!mptcp_sk_can_send(sk))
return false;
/* We do not send data on this subflow unless it is
* fully established, i.e. the 4th ack has been received.
*/
if (tp->mptcp->pre_established)
return false;
if (tp->pf)
return false;
if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) {
/* If SACK is disabled, and we got a loss, TCP does not exit
* the loss-state until something above high_seq has been acked.
* (see tcp_try_undo_recovery)
*
* high_seq is the snd_nxt at the moment of the RTO. As soon
* as we have an RTO, we won't push data on the subflow.
* Thus, snd_una can never go beyond high_seq.
*/
if (!tcp_is_reno(tp))
return false;
else if (tp->snd_una != tp->high_seq)
return false;
}
if (!tp->mptcp->fully_established) {
/* Make sure that we send in-order data */
if (skb && tp->mptcp->second_packet &&
tp->mptcp->last_end_data_seq != TCP_SKB_CB(skb)->seq)
return false;
}
if (!cwnd_test)
goto zero_wnd_test;
in_flight = tcp_packets_in_flight(tp);
/* Not even a single spot in the cwnd */
if (in_flight >= tp->snd_cwnd)
return false;
/* Now, check if what is queued in the subflow's send-queue
* already fills the cwnd.
*/
space = (tp->snd_cwnd - in_flight) * tp->mss_cache;
if (tp->write_seq - tp->snd_nxt > space)
return false;
zero_wnd_test:
if (zero_wnd_test && !before(tp->write_seq, tcp_wnd_end(tp)))
return false;
return true;
}
/* Are we not allowed to reinject this skb on tp? */
static int mptcp_rr_dont_reinject_skb(const struct tcp_sock *tp, const struct sk_buff *skb)
{
/* If the skb has already been enqueued in this sk, try to find
* another one.
*/
return skb &&
/* Has the skb already been enqueued into this subsocket? */
mptcp_pi_to_flag(tp->mptcp->path_index) & TCP_SKB_CB(skb)->path_mask;
}
/* We just look for any subflow that is available */
static struct sock *rr_get_available_subflow(struct sock *meta_sk,
struct sk_buff *skb,
bool zero_wnd_test)
{
const struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb;
struct sock *sk = NULL, *bestsk = NULL, *backupsk = NULL;
struct mptcp_tcp_sock *mptcp;
/* Answer data_fin on same subflow!!! */
if (meta_sk->sk_shutdown & RCV_SHUTDOWN &&
skb && mptcp_is_data_fin(skb)) {
mptcp_for_each_sub(mpcb, mptcp) {
sk = mptcp_to_sock(mptcp);
if (tcp_sk(sk)->mptcp->path_index == mpcb->dfin_path_index &&
mptcp_rr_is_available(sk, skb, zero_wnd_test, true))
return sk;
}
}
/* First, find the best subflow */
mptcp_for_each_sub(mpcb, mptcp) {
struct tcp_sock *tp;
sk = mptcp_to_sock(mptcp);
tp = tcp_sk(sk);
if (!mptcp_rr_is_available(sk, skb, zero_wnd_test, true))
continue;
if (mptcp_rr_dont_reinject_skb(tp, skb)) {
backupsk = sk;
continue;
}
bestsk = sk;
}
if (bestsk) {
sk = bestsk;
} else if (backupsk) {
/* It has been sent on all subflows once - let's give it a
* chance again by restarting its pathmask.
*/
if (skb)
TCP_SKB_CB(skb)->path_mask = 0;
sk = backupsk;
}
return sk;
}
/* Returns the next segment to be sent from the mptcp meta-queue.
* (chooses the reinject queue if any segment is waiting in it, otherwise,
* chooses the normal write queue).
* Sets *#reinject to 1 if the returned segment comes from the
* reinject queue. Sets it to 0 if it is the regular send-head of the meta-sk,
* and sets it to -1 if it is a meta-level retransmission to optimize the
* receive-buffer.
*/
static struct sk_buff *__mptcp_rr_next_segment(const struct sock *meta_sk, int *reinject)
{
const struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb;
struct sk_buff *skb = NULL;
*reinject = 0;
/* If we are in fallback-mode, just take from the meta-send-queue */
if (mpcb->infinite_mapping_snd || mpcb->send_infinite_mapping)
return tcp_send_head(meta_sk);
skb = skb_peek(&mpcb->reinject_queue);
if (skb)
*reinject = 1;
else
skb = tcp_send_head(meta_sk);
return skb;
}
static struct sk_buff *mptcp_rr_next_segment(struct sock *meta_sk,
int *reinject,
struct sock **subsk,
unsigned int *limit)
{
const struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb;
struct sock *choose_sk = NULL;
struct mptcp_tcp_sock *mptcp;
struct sk_buff *skb = __mptcp_rr_next_segment(meta_sk, reinject);
unsigned char split = num_segments;
unsigned char iter = 0, full_subs = 0;
/* As we set it, we have to reset it as well. */
*limit = 0;
if (!skb)
return NULL;
if (*reinject) {
*subsk = rr_get_available_subflow(meta_sk, skb, false);
if (!*subsk)
return NULL;
return skb;
}
retry:
/* First, we look for a subflow who is currently being used */
mptcp_for_each_sub(mpcb, mptcp) {
struct sock *sk_it = mptcp_to_sock(mptcp);
struct tcp_sock *tp_it = tcp_sk(sk_it);
struct rrsched_priv *rr_p = rrsched_get_priv(tp_it);
if (!mptcp_rr_is_available(sk_it, skb, false, cwnd_limited))
continue;
iter++;
/* Is this subflow currently being used? */
if (rr_p->quota > 0 && rr_p->quota < num_segments) {
split = num_segments - rr_p->quota;
choose_sk = sk_it;
goto found;
}
/* Or, it's totally unused */
if (!rr_p->quota) {
split = num_segments;
choose_sk = sk_it;
}
/* Or, it must then be fully used */
if (rr_p->quota >= num_segments)
full_subs++;
}
/* All considered subflows have a full quota, and we considered at
* least one.
*/
if (iter && iter == full_subs) {
/* So, we restart this round by setting quota to 0 and retry
* to find a subflow.
*/
mptcp_for_each_sub(mpcb, mptcp) {
struct sock *sk_it = mptcp_to_sock(mptcp);
struct tcp_sock *tp_it = tcp_sk(sk_it);
struct rrsched_priv *rr_p = rrsched_get_priv(tp_it);
if (!mptcp_rr_is_available(sk_it, skb, false, cwnd_limited))
continue;
rr_p->quota = 0;
}
goto retry;
}
found:
if (choose_sk) {
unsigned int mss_now;
struct tcp_sock *choose_tp = tcp_sk(choose_sk);
struct rrsched_priv *rr_p = rrsched_get_priv(choose_tp);
if (!mptcp_rr_is_available(choose_sk, skb, false, true))
return NULL;
*subsk = choose_sk;
mss_now = tcp_current_mss(*subsk);
*limit = split * mss_now;
if (skb->len > mss_now)
rr_p->quota += DIV_ROUND_UP(skb->len, mss_now);
else
rr_p->quota++;
return skb;
}
return NULL;
}
static struct mptcp_sched_ops mptcp_sched_rr = {
.get_subflow = rr_get_available_subflow,
.next_segment = mptcp_rr_next_segment,
.name = "roundrobin",
.owner = THIS_MODULE,
};
static int __init rr_register(void)
{
BUILD_BUG_ON(sizeof(struct rrsched_priv) > MPTCP_SCHED_SIZE);
if (mptcp_register_scheduler(&mptcp_sched_rr))
return -1;
return 0;
}
static void rr_unregister(void)
{
mptcp_unregister_scheduler(&mptcp_sched_rr);
}
module_init(rr_register);
module_exit(rr_unregister);
MODULE_AUTHOR("Christoph Paasch");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ROUNDROBIN MPTCP");
MODULE_VERSION("0.89");`
please help with this part of the code i didn't understand what it does can any one help me with understanding it ? :
struct rrsched_priv {
unsigned char quota;
};
static struct rrsched_priv *rrsched_get_priv(const struct tcp_sock *tp)
{
return (struct rrsched_priv *)&tp->mptcp->mptcp_sched[0];
}
The Multipath TCP Scheduler is responsible for chosing in which subflow a packet will be forward and transmited. In the round-robin case, this choice is "random" for each subflow available and the following code is basically a constructor that regards the linux kernel tcp stack given by tcp_sock and the mptcp stack given by net/mptcp.h.
In the following code, there's a constructor regarding quota. It means the amount of packets that will be consume by each subflow.
struct rrsched_priv {
unsigned char quota;
};
At the next step, rrsched_priv is taking linux-kernel tcp_sock attributes.
Then, the reference *tp is taking mptcp->mptcp_sched[0] from the mptcp stack;
Finally it returns as an struct for rrsched_priv
static struct rrsched_priv *rrsched_get_priv(const struct tcp_sock *tp)
{
return (struct rrsched_priv *)&tp->mptcp->mptcp_sched[0];
}
References
[Round-Robin Scheduler] : http://progmp.net/progmp.html#/dissertation_round_robin
[tcp_sock] : https://docs.huihoo.com/doxygen/linux/kernel/3.7/structtcp__sock.html
[Quota] : https://tuprints.ulb.tu-darmstadt.de/7709/13/Dissertation_Froemmgen.pdf
[MPTCP Scheduler] : http://progmp.net/
The message queue simply stops working when dealing with many many threads. It only seems to work okay with 10 threads, for exmaple. GDB tells me
Program received signal SIGSEGV, Segmentation fault.
__GI_____strtol_l_internal (nptr=0x0, endptr=endptr#entry=0x0, base=base#entry=10, group=group#entry=0, loc=0x7ffff78b0060 <_nl_global_locale>)
at ../stdlib/strtol_l.c:298
298 ../stdlib/strtol_l.c: No such file or directory.
But I have no idea what this means. The same code on Windows works fine but on linux it doesn't, which confuses me more.
You can see below how this queue works. It is a singly linked list with locking while receiving messages. Please help me find where I messed up.
typedef struct Message {
unsigned type;
unsigned code;
void *data;
} Message;
typedef struct MessageQueueElement {
Message message;
struct MessageQueueElement *next;
} MessageQueueElement;
typedef struct MessageQueue {
MessageQueueElement *first;
MessageQueueElement *last;
} MessageQueue;
MessageQueue mq;
pthread_mutex_t emptyLock, sendLock;
pthread_cond_t emptyCond;
void init() {
mq.first = malloc(sizeof(MessageQueueElement));
mq.last = mq.first;
pthread_mutex_init(&emptyLock, NULL);
pthread_mutex_init(&sendLock, NULL);
pthread_cond_init(&emptyCond, NULL);
}
void clean() {
free(mq.first);
pthread_mutex_destroy(&emptyLock);
pthread_mutex_destroy(&sendLock);
pthread_cond_destroy(&emptyCond);
}
void sendMessage(MessageQueue *this, Message *message) {
pthread_mutex_lock(&sendLock);
if (this->first == this->last) {
pthread_mutex_lock(&emptyLock);
this->last->message = *message;
this->last = this->last->next = malloc(sizeof(MessageQueueElement));
pthread_cond_signal(&emptyCond);
pthread_mutex_unlock(&emptyLock);
} else {
this->last->message = *message;
this->last = this->last->next = malloc(sizeof(MessageQueueElement));
}
pthread_mutex_unlock(&sendLock);
}
int waitMessage(MessageQueue *this, int (*readMessage)(unsigned, unsigned, void *)) {
pthread_mutex_lock(&emptyLock);
if (this->first == this->last) {
pthread_cond_wait(&emptyCond, &emptyLock);
}
pthread_mutex_unlock(&emptyLock);
int n = readMessage(this->first->message.type, this->first->message.code, this->first->message.data);
MessageQueueElement *temp = this->first;
this->first = this->first->next;
free(temp);
return n;
}
some test code:
#define EXIT_MESSAGE 0
#define THREAD_MESSAGE 1
#define JUST_A_MESSAGE 2
#define EXIT 0
#define CONTINUE 1
int readMessage(unsigned type, unsigned code, void *data) {
if (type == THREAD_MESSAGE) {
printf("message from thread %d: %s\n", code, (char *)data);
free(data);
} else if (type == JUST_A_MESSAGE) {
puts((char *)data);
free(data);
} else if (type == EXIT_MESSAGE) {
puts("ending the program");
return EXIT;
}
return CONTINUE;
}
int nThreads;
int counter = 0;
void *worker(void *p) {
double pi = 0.0;
for (int i = 0; i < 1000000; i += 1) {
pi += (4.0 / (8.0 * i + 1.0) - 2.0 / (8.0 * i + 4.0) - 1.0 / (8.0 * i + 5.0) - 1.0 / (8.0 * i + 6.0)) / pow(16.0, i);
}
char *s = malloc(100);
sprintf(s, "pi equals %.8f", pi);
sendMessage(&mq, &(Message){.type = THREAD_MESSAGE, .code = (int)(intptr_t)p, .data = s});
counter += 1;
char *s2 = malloc(100);
sprintf(s2, "received %d message%s", counter, counter == 1 ? "" : "s");
sendMessage(&mq, &(Message){.type = JUST_A_MESSAGE, .data = s2});
if (counter == nThreads) {
sendMessage(&mq, &(Message){.type = EXIT_MESSAGE});
}
}
int main(int argc, char **argv) {
clock_t timer = clock();
init();
nThreads = atoi(argv[1]);
pthread_t threads[nThreads];
for (int i = 0; i < nThreads; i += 1) {
pthread_create(&threads[i], NULL, worker, (void *)(intptr_t)i);
}
while (waitMessage(&mq, readMessage));
for (int i = 0; i < nThreads; i += 1) {
pthread_join(threads[i], NULL);
}
clean();
timer = clock() - timer;
printf("%.2f\n", (double)timer / CLOCKS_PER_SEC);
return 0;
}
--- EDIT ---
Okay I managed to fix the problem by changing the program a bit using semaphores. The waitMessage function doesn't have to be locked since it is accessed by only one thread and the values that it modifies does not clash with sendMessage.
MessageQueue mq;
pthread_mutex_t mutex;
sem_t sem;
void init() {
mq.first = malloc(sizeof(MessageQueueElement));
mq.last = mq.first;
pthread_mutex_init(&mutex, NULL);
sem_init(&sem, 0, 0);
}
void clean() {
free(mq.first);
pthread_mutex_destroy(&mutex);
sem_destroy(&sem);
}
void sendMessage(MessageQueue *this, Message *message) {
pthread_mutex_lock(&mutex);
this->last->message = *message;
this->last = this->last->next = malloc(sizeof(MessageQueueElement));
pthread_mutex_unlock(&mutex);
sem_post(&sem);
}
int waitMessage(MessageQueue *this, int (*readMessage)(unsigned, unsigned, void *)) {
sem_wait(&sem);
int n = readMessage(this->first->message.type, this->first->message.code, this->first->message.data);
MessageQueueElement *temp = this->first;
this->first = this->first->next;
free(temp);
return n;
}
Your waitMessage function is modifying this->first outside of any locking. This is a bad thing.
It's often not worth recreating things that are already provided for you by an OS. You're effectively trying to set up a pipe of Message structures. You could simply use an anonymous pipe instead (see here for Linux, or here for Windows) and write/read Message structures to/from it. There's also POSIX message queues which are probably a bit more efficient.
In your case with multiple worker threads you'd have to have a supplementary mutex semaphore to control which worker is trying to read from the pipe or message queue.