my code is only using in one producer-one consumer situation.
here is my test code:
static void *afunc(void * arg) {
Queue* q = arg;
for(int i= 0; i< 100000; i++) {
*queue_pull(q) = i; //get one element space
queue_push(q); //increase the write pointer
}
return NULL;
}
static void *bfunc(void * arg) {
Queue* q = arg;
for(;;) {
int *i = queue_fetch(q); //get the first element in queue
printf("%d\n", *i);
queue_pop(q); //increase the read pointer
}
}
int main() {
Queue queue;
pthread_t a, b;
queue_init(&queue);
pthread_create(&a, NULL, afunc, &queue);
pthread_create(&b, NULL, bfunc, &queue);
sleep(100000);
return 0;
}
and here is the implementation of the circular queue
#define MAX_QUEUE_SIZE 3
typedef struct Queue{
int data[MAX_QUEUE_SIZE] ;
int read,write;
pthread_mutex_t mutex, mutex2;
pthread_cond_t not_empty, not_full;
}Queue;
int queue_init(Queue *queue) {
memset(queue, 0, sizeof(Queue));
pthread_mutex_init(&queue->mutex, NULL);
pthread_cond_init(&queue->not_empty, NULL);
pthread_mutex_init(&queue->mutex2, NULL);
pthread_cond_init(&queue->not_full, NULL);
return 0;
}
int* queue_fetch(Queue *queue) {
int* ret;
if (queue->read == queue->write) {
pthread_mutex_lock(&queue->mutex);
pthread_cond_wait(&queue->not_empty, &queue->mutex);
pthread_mutex_unlock(&queue->mutex);
}
ret = &(queue->data[queue->read]);
return ret;
}
void queue_pop(Queue *queue) {
nx_atomic_set(queue->read, (queue->read+1)%MAX_QUEUE_SIZE);
pthread_cond_signal(&queue->not_full);
}
int* queue_pull(Queue *queue) {
int* ret;
if ((queue->write+1)%MAX_QUEUE_SIZE == queue->read) {
pthread_mutex_lock(&queue->mutex2);
pthread_cond_wait(&queue->not_full, &queue->mutex2);
pthread_mutex_unlock(&queue->mutex2);
}
ret = &(queue->data[queue->write]);
return ret;
}
void queue_push(Queue *queue) {
nx_atomic_set(queue->write, (queue->write+1)%MAX_QUEUE_SIZE);
pthread_cond_signal(&queue->not_empty);
}
after a few moments, it seems the two child threads will turn into deadlock..
EDIT: i use two semaphore, but it also has some problem.. it's pretty
weird, if if just execute ./main, it seems fine, but if i redirect into a file, like ./main > a.txt, then wc -l a.txt, the result is not equal the enqueue number..
int queue_init(Queue *queue) {
memset(queue, 0, sizeof(Queue));
pthread_mutex_init(&queue->mutex, NULL);
sem_unlink("/not_empty");
queue->not_empty = sem_open("/not_empty", O_CREAT, 644, 0);
sem_unlink("/not_full");
queue->not_full = sem_open("/not_full", O_CREAT, 644, MAX_QUEUE_SIZE);
return 0;
}
int* queue_fetch(Queue *queue) {
sem_wait(queue->not_empty);
return &(queue->data[queue->read]);
}
void queue_pop(Queue *queue) {
nx_atomic_set(queue->read, (queue->read+1)%MAX_QUEUE_SIZE);
sem_post(queue->not_full);
}
int* queue_pull(Queue *queue) {
sem_wait(queue->not_full);
return &(queue->data[queue->write]);
}
void queue_push(Queue *queue) {
nx_atomic_set(queue->write, (queue->write+1)%MAX_QUEUE_SIZE);
sem_post(queue->not_empty);
}
You are manipulating the state of the queue outside the mutex, this is inherently racey.
I would suggest using a single mutex, but take it whenever you change or test the read & write indicies. This also means that you don't need the atomic sets.
Quite possibly one of your threads is waiting for a condition to be signalled after the signalling has occurred, causing both threads to wait for each other indefinitely.
Pthreads condition variables don't remain signalled -- the signalling is a momentary action. The condition variable isn't used determine whether to wait -- it's just used to wake up a thread that's already waiting; you need a different means for determining whether or not to wait, such as checking a flag or some sort of test condition.
Normally, you signal as follows:
Lock the mutex
Do your updates, generally leaving your test condition 'true' (eg. setting your flag)
Call pthread_cond_signal() or pthread_cond_broadcast()
Unlock the mutex
...and wait as follows:
Lock the mutex
Loop until your test expression is 'true' (eg. until your flag is set), calling pthread_cond_wait() only if the test is false (inside the loop).
After the loop, when your test has succeeded, do your work.
Unlock the mutex
For example, signalling might go something like this:
pthread_mutex_lock(&mtx); /* 1: lock mutex */
do_something_important(); /* 2: do your work... */
ready_flag = 1; /* ...and set the flag */
pthread_cond_signal(&cond); /* 3: signal the condition (before unlocking) */
pthread_mutex_unlock(&mtx); /* 4: unlock mutex */
and waiting might be something like this:
pthread_mutex_lock(&mtx); /* 1: lock mutex */
while (ready_flag == 0) /* 2: Loop until flag is set... */
pthread_cond_wait(&cond, &mtx); /* ...waiting when it isn't */
do_something_else(); /* 3: Do your work... */
ready_flag = 0; /* ...and clear the flag if it's all done */
pthread_mutex_unlock(&mtx); /* 4: unlock mutex */
The waiter won't miss the condition this way, because the mutex ensures that the waiter's test-and-wait and the signaller's set-and-signal cannot occur simultaneously.
This section of your queue_fetch() function:
if (queue->read == queue->write) {
pthread_mutex_lock(&queue->mutex);
pthread_cond_wait(&queue->not_empty, &queue->mutex);
pthread_mutex_unlock(&queue->mutex);
}
ret = &(queue->data[queue->read]);
..might be rewritten as follows:
pthread_mutex_lock(&queue->mutex);
while (queue->read == queue->write)
pthread_cond_wait(&queue->not_empty, &queue->mutex);
ret = &(queue->data[queue->read]);
pthread_mutex_unlock(&queue->mutex);
...where:
The lock/unlock of the mutex are moved around the if, so the mutex is held while the test expression is evaluated, and still held until the condition wait starts
The if is changed to a while in case the condition wait is prematurely interrupted
Access to queue->read and queue->write is done with the mutex held
Similar changes would be made to queue_pull().
As for the signalling code, the following section of queue_pop():
nx_atomic_set(queue->read, (queue->read+1)%MAX_QUEUE_SIZE);
pthread_cond_signal(&queue->not_full);
..might be changed to:
pthread_mutex_lock(&queue->mutex);
queue->read = (queue->read + 1) % MAX_QUEUE_SIZE;
pthread_cond_signal(&queue->not_full);
pthread_mutex_unlock(&queue->mutex);
..where:
The mutex is held while signalling the condition (this ensures the condition can't be signalled between the waiter deciding whether to wait and actually starting to wait, since the waiter would hold the mutex during that interval)
The mutex is held while changing queue->read as well rather than using nx_atomic_set() since the mutex is needed when signalling the condition anyway
Similar changes would be made to queue_push().
Additionally, you should just use a single mutex (so that the same mutex is always held when accessing read and write), and once the while loops are added to the condition waits there's little compelling reason to use more than one condition variable. If switching to a single condition variable, just signal the condition again after completing a wait:
pthread_mutex_lock(&queue->mutex);
while (queue->read == queue->write) {
pthread_cond_wait(&queue->cond, &queue->mutex);
pthread_cond_signal(&queue->cond); /* <-- signal next waiter, if any */
}
ret = &(queue->data[queue->read]);
pthread_mutex_unlock(&queue->mutex);
Related
while(1) {
char message_buffer[SIZE];
ssize_t message_length = mq_receive(mq_identifier, message_buffer, _mqueue_max_msg_size NULL);
if(message_len == -1) { /* error handling... */}
pthread_t pt1;
int ret = pthread_create(&pt1, NULL, handle_message, message_buffer);
if(ret) { /* error handling ... */}
}
void * handle_message (void * message) {
puts((char *) message);
return NULL;
}
The above example is not an MRE but it is extremely simple:
I've got a main thread with a loop that constantly consumes messages from a message queue. Once a new message is received, it is stored in the local message_buffer buffer. Then, a new thread is spawned to "take care" of said new message, and thus the message buffer's address is passed into handle_message, which the new thread subsequently executes.
The problem
Often, 2 threads will print the same message, even though I can verify with a 100% certainty that the messages in the queue were not the same.
I am not completely certain, but I think I understand why this is happening:
say that I push 2 different messages to the mqueue and only then I begin consuming them.
In the first iteration of the while loop, the message will get consumed from the queue and saved to message_buffer. A new thread will get spawned and the address of message_length passed to it. But that thread may not be fast enough to print the buffer's contents to the stream before the next message gets consumed (on the next iteration of the loop), and the contents of message_buffer subsequently overridden. Thus the first and second thread now print the same value.
My question is: what is the most efficient way to solve this? I'm pretty new to parallel programming and threading/pthreads and I'm pretty overwhelmed by the different synchronization primitives.
Mutex trouble
static pthread_mutex_t m = PTHREAD_MUTEX_INITIALIZER;
while(1) {
char message_buffer[SIZE];
pthread_mutex_lock(&m);
ssize_t message_length = mq_receive(mq_identifier, message_buffer, _mqueue_max_msg_size NULL);
pthred_mutex_unlock(&m);
if(message_len == -1) { /* error handling... */}
pthread_t pt1;
int ret = pthread_create(&pt1, NULL, handle_message, message_buffer);
if(ret) { /* error handling ... */}
}
void * handle_message (void * message) {
char own_buffer[SIZE];
pthread_mutex_lock(&m);
strncpy(own_buffer, (char *) message, SIZE);
pthread_mutex_unlock(&m);
puts(own_buffer);
return NULL;
}
I don't think my current mutex implementation is right as the threads are still receiving duplicate messages. The main thread can lock the mutex, consume a message into the buffer, unlock the mutex, spawn a thread, but that thread still may hang and the main one could just rewrite the buffer again (as the buffer mutex was never locked by the new thread), effectively making my current mutex implementation useless? How do I overcome this?
The problem is that you end the loop that contains message_buffer before guaranteeing that the thread has finished with that memory.
while (1) {
char message_buffer[SIZE];
ssize_t message_length = mq_receive(...);
if (message_len == -1) { /* error handling */ }
pthread_t pt1;
int ret = pthread_create(&pt1, NULL, handle_message, message_buffer);
if (ret) { /* error handling */ }
/****** Can't go beyond here until thread is done with message_buffer. ******/
}
void * handle_message (void * message) {
char own_buffer[SIZE];
strncpy(own_buffer, (char *) message, SIZE);
/******* Only now can the caller loop back. ******/
puts(own_buffer);
return NULL;
}
You could use a semaphore or similar.
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
static int copied = 0;
while (1) {
char message_buffer[SIZE];
ssize_t message_length = mq_receive(...);
if (message_len == -1) { /* error handling */ }
pthread_t pt1;
int ret = pthread_create(&pt1, NULL, handle_message, message_buffer);
if (ret) { /* error handling */ }
// Wait until threads is done with message_buffer.
pthread_mutex_lock(&mutex);
while (!copied) pthread_cond_wait(&cond, &mutex);
copied = 0;
pthread_mutex_unlock(&mutex);
}
void * handle_message (void * message) {
char own_buffer[SIZE];
strncpy(own_buffer, (char *) message, SIZE);
// Done with caller's buffer.
// Signal caller to continue.
pthread_mutex_lock(&mutex);
copied = 1;
pthread_cond_signal(&cond);
pthread_mutex_unlock(&mutex);
puts(own_buffer);
return NULL;
}
(The added chunks effectively perform semaphore operations. See the last snippet of this answer for a more generic implementation.)
But there's a simpler solution: Make the copy before creating the thread.
while (1) {
char message_buffer[SIZE];
ssize_t message_length = mq_receive(...);
if (message_len == -1) { /* error handling */ }
pthread_t pt1;
int ret = pthread_create(&pt1, NULL, handle_message, strdup(message_buffer));
if (ret) { /* error handling */ }
}
void * handle_message (void * message) {
char * own_buffer = message;
puts(own_buffer);
free(own_buffer);
return NULL;
}
I am trying to create two threads resembling TaskA and TaskB. Both TaskA and TaskB do some kind of computation that it is not very interesting for this post. TaskA and TaskB have to be executed 10 times in order to cover the whole array. TaskA has an input AA and an output BB. BB is also the input of TaskB. CC is the output of TaskB. Because BB is written by taskA and read by taskB we need mutexes.
The behavior I would like to achieve is that when TaskA operates on i, TaskB operates on i-1 in parallel, where i is the number of arrays that are processed.
I want to avoid TaskB to wait for TaskA to finish for every i.
The problem here is that I have a deadlock. ThreadA and ThreadB represent TaskA and TaskB. To make it easier I removed all the computations and I left only synchronization instructions. The deadlock is caused because ThreadA signals the conditional variable CV[0] before threadB is in the state that waits for CV[0].
Do you know any way to remove the deadlock but without TaskA waiting for TaskB to finish and vice versa. Ideally when TaskA operates on array i TaskB should operate on array i-1.
/* Includes */
#include <unistd.h> /* Symbolic Constants */
#include <sys/types.h> /* Primitive System Data Types */
#include <errno.h> /* Errors */
#include <stdio.h> /* Input/Output */
#include <stdlib.h> /* General Utilities */
#include <pthread.h> /* POSIX Threads */
#include <string.h> /* String handling */
#include <semaphore.h> /* Semaphore */
#include <stdint.h>
#define ARRAY_SIZE 2048*2400
#define DEBUG
//#define CHECK_RESULTS
pthread_mutex_t mutex[10];
pthread_cond_t cv[10];
/* prototype for thread routine */
void threadA ( void *ptr );
void threadB ( void *ptr );
struct thread_arg
{
uint32_t *in;
uint32_t *out;
uint32_t ID;
};
int main()
{
pthread_t pthA;
pthread_t pthB;
//Memory allocation
uint32_t *AA = malloc(10*ARRAY_SIZE*sizeof(uint32_t));
uint32_t *BB = malloc(10*ARRAY_SIZE*sizeof(uint32_t));
uint32_t *CC = malloc(10*ARRAY_SIZE*sizeof(uint32_t));
unsigned int j,i;
// THread Arguments
struct thread_arg arguments[2];
arguments[0].in = AA;
arguments[0].out = BB;
arguments[0].ID = 1;
arguments[1].in = BB;
arguments[1].out = CC;
arguments[1].ID = 2;
//Init arguments data
for (j=0;j<10;j++)
{
for (i=0;i<ARRAY_SIZE;i++)
{
AA[j*ARRAY_SIZE+i] = i;
BB[j*ARRAY_SIZE+i] = 0;
CC[j*ARRAY_SIZE+i] = 99 ;
}
}
//Semaphore and conditional variables init
for (i=0;i<10;i++){
pthread_mutex_init(&mutex[i], NULL);
pthread_cond_init (&cv[i], NULL);
}
pthread_create (&pthA, NULL, (void *) &threadA, (void *) &arguments[0]);
pthread_create (&pthB, NULL, (void *) &threadB, (void *) &arguments[1]);
pthread_join(pthA, NULL);
pthread_join(pthB, NULL);
// Destroy Semaphores and CVs
for (i=0;i<10;i++)
{
pthread_mutex_destroy(&mutex[i]);
pthread_cond_destroy(&cv[i]);
}
// Checking results
exit(0);
} /* main() */
void threadA ( void *ptr )
{
int i;
struct thread_arg *arg = (struct thread_arg *) ptr;
for (i=0;i<10;i++)
{
pthread_mutex_lock(&mutex[i]);
printf("TA: LOCK_M%d \n",i);
pthread_cond_signal(&cv[i]);
printf("TA: SIG_CV%d\n",i);
pthread_mutex_unlock(&mutex[i]);
printf("TA: UNL_M%d\n",i);
}
pthread_exit(0); /* exit thread */
}
void threadB ( void *ptr )
{
int i;
struct thread_arg *arg = (struct thread_arg *) ptr;
for (i=0;i<10;i++)
{
pthread_mutex_lock(&mutex[i]);
printf("TB: WAIT_CV%d\n",i,i);
pthread_cond_wait(&cv[i], &mutex[i]);
printf("TB CV%d_PASSED\n",i);
pthread_mutex_unlock(&mutex[i]);
printf("TB UNL_M%d \n",i);
}
pthread_exit(NULL);
}
As WhozCraig commented, a condition variable needs to be paired with a condition over some shared state, known as a predicate. The mutex is used to protect the shared state.
In this example, your shared state could be an integer that contains the highest index of BB[] that ThreadA has produced. ThreadB then waits for this number to reach the index that it is up to reading. In this design, you only need one mutex and one condition variable. The globals would then be:
pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
pthread_cond_t cv = PTHREAD_COND_INITIALIZER;
int BB_ready = -1; /* Protected by 'mutex' */
(Using the static PTHREAD_*_INITIALIZER initialisers means that you don't need to bother with pthread_*_init() and pthread_*_destroy()).
The loop in ThreadA would then be:
for (i=0;i<10;i++)
{
/* Process AA[i] into BB[i] here */
/* Now mark BB[i] as ready */
pthread_mutex_lock(&mutex);
printf("TA: LOCK_M%d \n",i);
BB_ready = i;
pthread_cond_signal(&cv);
printf("TA: SIG_CV%d\n",i);
pthread_mutex_unlock(&mutex);
printf("TA: UNL_M%d\n",i);
}
..and in ThreadB:
for (i=0;i<10;i++)
{
/* Wait for BB[i] to be ready */
pthread_mutex_lock(&mutex);
printf("TB: WAIT_CV%d\n",i);
while (BB_ready < i)
pthread_cond_wait(&cv, &mutex);
printf("TB CV%d_PASSED\n",i);
pthread_mutex_unlock(&mutex);
printf("TB UNL_M%d \n",i);
/* Now process BB[i] into CC[i] here */
}
Notice that pthread_cond_signal() is called whenever the shared state has changed, which allows the other thread to wake up and re-check the state, if it's waiting.
The waiting thread always loops around, checking the state and then waiting on the condition variable if the state isn't ready yet.
Can someone provide some examples/tips/indications of how to solve the following assignment: a resource may be used by 2 types of processes: black and white. When the resource is used by the white processes, it can not be used by the black processes and vice-versa. Implement the access to the resource avoiding starvation. In an older post I was advised to use a variation on the seqlock algorithm, but I can't figure how to adjust that algorithm for this assignment.
EDIT: this is the code I've written so far
#include <stdio.h>
#include <pthread.h>
#include <sys/wait.h>
#include <unistd.h>
#include <stdlib.h>
struct RW;
struct RW
{
volatile int num_reads_in_progress;
volatile int num_writes;
pthread_cond_t reader_cv;
pthread_cond_t writer_cv;
pthread_mutex_t lock;
};
char *buf;
void signal_next(struct RW *b);
extern char *xx_read(struct RW *);
extern void xx_write(struct RW *, char *);
// Precondition: b->lock must be locked before this function is called
void signal_next(struct RW *b)
{
if (b->num_writes > 0)
{
// if any writes are waiting wake one up
pthread_cond_signal(&b->writer_cv);
}
else
{
// if are no writes pending, wake up all the readers
pthread_cond_broadcast(&b->reader_cv);
}
}
void *ts_read(void *vb);
void *ts_read(void *vb)
{
struct RW *b = vb;
pthread_mutex_lock(&b->lock);
while (b->num_writes > 0)
{
// cond_wait unlocks the mutex, waits to be signaled, then re-acquires the mutex
pthread_cond_wait(&b->reader_cv, &b->lock);
}
// By there b->num_writes must be 0
b->num_reads_in_progress++;
pthread_mutex_unlock(&b->lock);
buf = xx_read(b);
pthread_mutex_lock(&b->lock);
b->num_reads_in_progress--;
signal_next(b);
pthread_mutex_unlock(&b->lock);
return 0;
}
void *ts_write(void *vb);
void *ts_write(void *vb)
{
struct RW *b = vb;
pthread_mutex_lock(&b->lock);
b->num_writes++;
if (b->num_writes > 1 || b->num_reads_in_progress > 0)
{
// cond_wait unlocks the mutex, waits to be signaled,
// then re-acquires the mutex
pthread_cond_wait(&b->writer_cv, &b->lock);
}
pthread_mutex_unlock(&b->lock);
xx_write(b, buf);
pthread_mutex_lock(&b->lock);
b->num_writes--;
signal_next(b);
pthread_mutex_unlock(&b->lock);
return 0;
}
int main(void)
{
pthread_t white[3];
pthread_t black[3];
struct RW *rw;
rw = malloc(sizeof(struct RW));
int i;
for (i = 0; i < 3; i++)
{
pthread_create(&white[i], NULL, &ts_read, &rw);
}
for (i = 0; i < 3; i++)
{
pthread_create(&black[i], NULL, ts_write, &rw);
}
for (i = 0; i < 3; i++)
{
pthread_join(white[i], NULL);
}
for (i = 0; i < 3; i++)
{
pthread_join(black[i], NULL);
}
return 0;
}
You need a Mutex that locks and unlocks. Basically you can think of a mutex as a boolean value that is either true or false(locked or unlocked if you prefer).
When black process accesses the resource, the mutex should be locked. And, on the other hand when white tries to access it, it should first check for the mutex's status. If the status of mutex is locked, then it will have to wait until the mutex is unlocked.
Pseudocode:
unsigned char mutex = 0;
//processBlack tries to access resource
if(mutex == 1)
while(mutex != 0);
mutex = 1;
//now the mutex is unlocked, do whatever you want
mutex = 0; //do not forget to unlock it.
//processWhite tries to access resource
if(mutex == 1)
while(mutex != 0);
mutex = 1;
//now the mutex is unlocked, do whatever you want
mutex = 0; //do not forget to unlock it.
If you want to use the seqlock mechanism take a look here, section 5.7.4.:
http://www.makelinux.net/ldd3/chp-5-sect-7
and here for writers example:
http://www.sao.ru/hq/sts/linux/doc/porting_to_26/22818.html
I have a worker thread processing a queue of work items.
//producer
void push_into_queue(char *item) {
pthread_mutex_lock (&queueMutex);
if(workQueue.full) { // full }
else{
add_item_into_queue(item);
pthread_cond_signal (&queueSignalPush);
}
pthread_mutex_unlock(&queueMutex);
}
// consumer1
void* worker(void* arg) {
while(true) {
pthread_mutex_lock(&queueMutex);
while(workQueue.empty)
pthread_cond_wait(&queueSignalPush, &queueMutex);
item = workQueue.front; // pop from queue
add_item_into_list(item);
// do I need another signal here for thread2?
pthread_cond_signal(&queueSignalPop);
pthread_mutex_unlock(&queueMutex);
}
return NULL;
}
pthread_create (&thread1, NULL, (void *) &worker, NULL);
Now I would like to have thread2 consume the data inserted in add_item_into_list() but only if items have been added to the list. Note that the list is permanent and can't be emptied nor freed for the entire duration of the program.
So my question is: do I need another pthread_cond_signal?, if yes, where would this signal go? and how my other worker would look like (canonical form)?
I see 2 possible ways of solving the problem:
a. Introduce another condition variable (e.g. signalList) for the list, so that consumer2 thread would wait for events on it. In this case consumer1 have to signal twice: once on queueSignalPop and once on signalList:
// consumer1
void* worker(void* arg) {
while(true) {
// ...
pthread_cond_signal(&queueSignalPop);
pthread_cond_signal(&signalList);
pthread_mutex_unlock(&queueMutex);
}
return NULL;
}
b. Use existing condition queueSignalPop variable inside consumer2 to wait for events, and use broadcast instead of signal inside consumer1. Broadcast means all the waiting threads on condition variable will wake up:
// consumer1
void* worker(void* arg) {
while(true) {
// ...
pthread_cond_broadcast(&queueSignalPop);
pthread_mutex_unlock(&queueMutex);
}
return NULL;
}
// consumer2
void* worker2(void* arg) {
while(true) {
while(list.empty)
pthread_cond_wait(&queueSignalPop, &queueMutex);
// ...
}
return NULL;
}
I would propose to go for the first approach, since it better distinguish the purpose of each condition variable.
I would like to wake up a pthread from another pthread - but after some time. I know signal or pthread_signal with pthread_cond_wait can be used to wake another thread, but I can't see a way to schedule this. The situation would be something like:
THREAD 1:
========
while(1)
recv(low priority msg);
dump msg to buffer
THREAD 2:
========
while(1)
recv(high priority msg);
..do a little bit of processing with msg ..
dump msg to buffer
wake(THREAD3, 5-seconds-later); <-- **HOW TO DO THIS? **
//let some msgs collect for at least a 5 sec window.
//i.e.,Don't wake thread3 immediately for every msg rcvd.
THREAD 3:
=========
while(1)
do some stuff ..
Process all msgs in buffer
sleep(60 seconds).
Any simple way to schedule a wakeup (short of creating a 4th thread that wakes up every second and decides if there is a scheduled entry for thread-3 to wakeup). I really don't want to wakeup thread-3 frequently if there are only low priority msgs in queue. Also, since the messages come in bursts (say 1000 high priority messages in a single burst), I don't want to wake up thread-3 for every single message. It really slows things down (as there is a bunch of other processing stuff it does every time it wakes up).
I am using an ubuntu pc.
How about the use of the pthread_cond_t object available through the pthread API ?
You could share such an object within your threads and let them act on it appropriately.
The resulting code should look like this :
/*
* I lazily chose to make it global.
* You could dynamically allocate the memory for it
* And share the pointer between your threads in
* A data structure through the argument pointer
*/
pthread_cond_t cond_var;
pthread_mutex_t cond_mutex;
int wake_up = 0;
/* To call before creating your threads: */
int err;
if (0 != (err = pthread_cond_init(&cond_var, NULL))) {
/* An error occurred, handle it nicely */
}
if (0 != (err = pthread_mutex_init(&cond_mutex, NULL))) {
/* Error ! */
}
/*****************************************/
/* Within your threads */
void *thread_one(void *arg)
{
int err = 0;
/* Remember you can embed the cond_var
* and the cond_mutex in
* Whatever you get from arg pointer */
/* Some work */
/* Argh ! I want to wake up thread 3 */
pthread_mutex_lock(&cond_mutex);
wake_up = 1; // Tell thread 3 a wake_up rq has been done
pthread_mutex_unlock(&cond_mutex);
if (0 != (err = pthread_cond_broadcast(&cond_var))) {
/* Oops ... Error :S */
} else {
/* Thread 3 should be alright now ! */
}
/* Some work */
pthread_exit(NULL);
return NULL;
}
void *thread_three(void *arg)
{
int err;
/* Some work */
/* Oh, I need to sleep for a while ...
* I'll wait for thread_one to wake me up. */
pthread_mutex_lock(&cond_mutex);
while (!wake_up) {
err = pthread_cond_wait(&cond_var, &cond_mutex);
pthread_mutex_unlock(&cond_mutex);
if (!err || ETIMEDOUT == err) {
/* Woken up or time out */
} else {
/* Oops : error */
/* We might have to break the loop */
}
/* We lock the mutex again before the test */
pthread_mutex_lock(&cond_mutex);
}
/* Since we have acknowledged the wake_up rq
* We set "wake_up" to 0. */
wake_up = 0;
pthread_mutex_unlock(&cond_mutex);
/* Some work */
pthread_exit(NULL);
return NULL;
}
If you want your thread 3 to exit the blocking call to pthread_cond_wait() after a timeout, consider using pthread_cond_timedwait() instead (read the man carefully, the timeout value you supply is the ABSOLUTE time, not the amount of time you don't want to exceed).
If the timeout expires, pthread_cond_timedwait() will return an ETIMEDOUT error.
EDIT : I skipped error checking in the lock / unlock calls, don't forget to handle this potential issue !
EDIT² : I reviewed the code a little bit
You can have the woken thread do the wait itself. In the waking thread:
pthread_mutex_lock(&lock);
if (!wakeup_scheduled) {
wakeup_scheduled = 1;
wakeup_time = time() + 5;
pthread_cond_signal(&cond);
}
pthread_mutex_unlock(&lock);
In the waiting thread:
pthread_mutex_lock(&lock);
while (!wakeup_scheduled)
pthread_cond_wait(&cond, &lock);
pthread_mutex_unlock(&lock);
sleep_until(wakeup_time);
pthread_mutex_lock(&lock);
wakeup_scheduled = 0;
pthread_mutex_unlock(&lock);
Why not just compare the current time to one save earlier?
time_t last_uncond_wakeup = time(NULL);
time_t last_recv = 0;
while (1)
{
if (recv())
{
// Do things
last_recv = time(NULL);
}
// Possible other things
time_t now = time(NULL);
if ((last_recv != 0 && now - last_recv > 5) ||
(now - last_uncond_wakeup > 60))
{
wake(thread3);
last_uncond_wakeup = now;
last_recv = 0;
}
}