Related
I'm working on a project that solves the classic problem of producer / consumer scheduling.
Linux Open Suse 42.3 Leep, API System V, C language
The project consists of three programs: producer, consumer and scheduler.
The purpose of schedulers is to create 3 semaphores, shared memory in which there is a buffer (array) in which write (producer) and read (consumer) and to run n producer and m consumer processes.
Each producer must perform k write cycles to the buffer, and the consumer must perform k read cycles.
3 semaphores were used: mutex, empty and full. The value of the full semaphore is used in the program as an index in the array.
The problem is that: for example, when the buffer size is 3, producers write 4 portions of data, when the buffer size is 4 - 5 portions of data (although there should be 4) ...
Consumers read normally.
In addition, the program does not behave predictably when calling get_semVal fucntion.
Please help, I will be very, very grateful for the answer.
producer
#define BUFFER_SIZE 3
#define MY_RAND_MAX 99 // Highest integer for random number generator
#define LOOP 3 //the number of write / read cycles for each process
#define DATA_DIMENSION 4 // size of portion of data for 1 iteration
struct Data {
int buf[DATA_DIMENSION];
};
typedef struct Data buffer_item;
buffer_item buffer[BUFFER_SIZE];
void P(int semid)
{
struct sembuf op;
op.sem_num = 0;
op.sem_op = -1;
op.sem_flg = 0;
semop(semid,&op,1);
}
void V(int semid)
{
struct sembuf op;
op.sem_num = 0;
op.sem_op = +1;
op.sem_flg = 0;
semop(semid,&op,1);
}
void Init(int semid,int index,int value)
{
semctl(semid,index,SETVAL,value);
}
int get_semVal(int sem_id)
{
int value = semctl(sem_id,0,GETVAL,0);
return value;
}
int main()
{
sem_mutex = semget(KEY_MUTEX,1,0);
sem_empty = semget(KEY_EMPTY,1,0);
sem_full = semget(KEY_FULL,1,0);
srand(time(NULL));
const int SIZE = sizeof(buffer[BUFFER_SIZE]);
shm_id = shmget(KEY_SHARED_MEMORY,SIZE, 0);
int i=0;
buffer_item *adr;
do {
buffer_item nextProduced;
P(sem_empty);
P(sem_mutex);
//prepare portion of data
for(int j=0;j<DATA_DIMENSION;j++)
{
nextProduced.buf[j]=rand()%5;
}
adr = (buffer_item*)shmat(shm_id,NULL,0);
int full_value = get_semVal(sem_full);//get index of array
printf("-----%d------\n",full_value-1);//it’s for test the index of array in buffer
// write the generated portion of data by index full_value-1
adr[full_value-1].buf[0] = nextProduced.buf[0];
adr[full_value-1].buf[1] = nextProduced.buf[1];
adr[full_value-1].buf[2] = nextProduced.buf[2];
adr[full_value-1].buf[3] = nextProduced.buf[3];
shmdt(adr);
printf("producer %d produced %d %d %d %d\n", getpid(), nextProduced.buf[0],nextProduced.buf[1],nextProduced.buf[2],nextProduced.buf[3]);
V(sem_mutex);
V(sem_full);
i++;
} while (i<LOOP);
V(sem_empty);
sleep(1);
}
consumer
…
int main()
{
sem_mutex = semget(KEY_MUTEX,1,0);
sem_empty = semget(KEY_EMPTY,1,0);
sem_full = semget(KEY_FULL,1,0);
srand(time(NULL));
const int SIZE = sizeof(buffer[BUFFER_SIZE]);
shm_id = shmget(KEY_SHARED_MEMORY,SIZE,0);
int i=0;
buffer_item *adr;
do
{
buffer_item nextConsumed;
P(sem_full);
P(sem_mutex);
int full_value = get_semVal(sem_full);
adr = (buffer_item*)shmat(shm_id,NULL,0);
for(int i=0;i<BUFFER_SIZE;i++)
{
printf("--%d %d %d %d\n",adr[i].buf[0],adr[i].buf[1],adr[i].buf[2],adr[i].buf[3]);
}
for(int i=0;i<BUFFER_SIZE;i++)
{
buffer[i].buf[0] = adr[i].buf[0];
buffer[i].buf[1] = adr[i].buf[1];
buffer[i].buf[2] = adr[i].buf[2];
buffer[i].buf[3] = adr[i].buf[3];
}
tab(nextConsumed);
nextConsumed.buf[0]=buffer[full_value-1].buf[0];
nextConsumed.buf[1]=buffer[full_value-1].buf[1];
nextConsumed.buf[2]=buffer[full_value-1].buf[2];
nextConsumed.buf[3]=buffer[full_value-1].buf[3];
// Set buffer to 0 since we consumed that item
for(int j=0;j<DATA_DIMENSION;j++)
{
buffer[full_value-1].buf[j]=0;
}
for(int i=0;i<BUFFER_SIZE;i++)
{
adr[i].buf[0]=buffer[i].buf[0];
adr[i].buf[1]=buffer[i].buf[1];
adr[i].buf[2]=buffer[i].buf[2];
adr[i].buf[3]=buffer[i].buf[3];
}
shmdt(adr);
printf("consumer %d consumed %d %d %d %d\n", getpid() ,nextConsumed.buf[0],nextConsumed.buf[1],nextConsumed.buf[2],nextConsumed.buf[3]);
V(sem_mutex);
// increase empty
V(sem_empty);
i++;
} while (i<LOOP);
V(sem_full);
sleep(1);
}
Scheduler
…
struct Data {
int buf[DATA_DIMENSION];
};
typedef struct Data buffer_item;
buffer_item buffer[BUFFER_SIZE];
struct TProcList
{
pid_t processPid;
};
typedef struct TProcList ProcList;
…
ProcList createProcess(char *name)
{
pid_t pid;
ProcList a;
pid = fork();
if (!pid){
kill(getpid(),SIGSTOP);
execl(name,name,NULL);
exit(0);
}
else if(pid){
a.processPid=pid;
}
else
cout<<"error forking"<<endl;
return a;
}
int main()
{
sem_mutex = semget(KEY_MUTEX,1,IPC_CREAT|0600);
sem_empty = semget(KEY_EMPTY,1,IPC_CREAT|0600);
sem_full = semget(KEY_FULL,1,IPC_CREAT|0600);
Init(sem_mutex,0,1);//unlock mutex
Init(sem_empty,0,BUFFER_SIZE);
Init(sem_full,0,0);//unlock empty
const int SIZE = sizeof(buffer[BUFFER_SIZE]);
shm_id = shmget(KEY_SHARED_MEMORY,SIZE,IPC_CREAT|0600);
buffer_item *adr;
adr = (buffer_item*)shmat(shm_id,NULL,0);
for(int i=0;i<BUFFER_SIZE;i++)
{
buffer[i].buf[0]=0;
buffer[i].buf[1]=0;
buffer[i].buf[2]=0;
buffer[i].buf[3]=0;
}
for(int i=0;i<BUFFER_SIZE;i++)
{
adr[i].buf[0] = buffer[i].buf[0];
adr[i].buf[1] = buffer[i].buf[1];
adr[i].buf[2] = buffer[i].buf[2];
adr[i].buf[3] = buffer[i].buf[3];
}
int consumerNumber = 2;
int produserNumber = 2;
ProcList producer_pids[produserNumber];
ProcList consumer_pids[consumerNumber];
for(int i=0;i<produserNumber;i++)
{
producer_pids[i]=createProcess("/home/andrey/build-c-unknown-Debug/c");//create sleeping processes
}
for(int i=0;i<consumerNumber;i++)
{
consumer_pids[i]=createProcess("/home/andrey/build-p-unknown-Debug/p");
}
sleep(3);
for(int i=0;i<produserNumber;i++)
{
kill(producer_pids[i].processPid,SIGCONT);//continue processes
sleep(1);
}
for(int i=0;i<consumerNumber;i++)
{
kill(consumer_pids[i].processPid,SIGCONT);
sleep(1);
}
for(int i=0;i<produserNumber;i++)
{
waitpid(producer_pids[i].processPid,&stat,WNOHANG);//wait
}
for(int i=0;i<consumerNumber;i++)
{
waitpid(consumer_pids[i].processPid,&stat,WNOHANG);
}
shmdt(adr);
semctl(sem_mutex,0,IPC_RMID);
semctl(sem_full,0,IPC_RMID);
semctl(sem_empty,0,IPC_RMID);
}
It is not fun to try and unravel uncommented code someone else has written, so instead, I'll explain a verified working scheme.
(Note that comments should always explain programmer intent or idea, and never what the code does; we can read the code to see what it does. The problem is, we need to first understand the programmer idea/intent first, before we can compare that to the implementation. Without comments, I would need to first read the code to try and guess at the intent, then compare that to the code itself; it's like double the work.)
(I suspect OP's underlying problem is trying to use semaphore values as buffer indexes, but didn't pore through all of the code to be 100% certain.)
Let's assume the shared memory structure is something like the following:
struct shared {
sem_t lock; /* Initialized to value 1 */
sem_t more; /* Initialized to 0 */
sem_t room; /* Initialized to MAX_ITEMS */
size_t num_items; /* Initialized to 0 */
size_t next_item; /* Initialized to 0 */
item_type item[MAX_ITEMS];
};
and we have struct shared *mem pointing to the shared memory area.
Note that you should, at runtime, include <limits.h>, and verify that MAX_ITEMS <= SEM_VALUE_MAX. Otherwise MAX_ITEMS is too large, and this semaphore scheme may fail. (SEM_VALUE_MAX on Linux is usually INT_MAX, so big enough, but it may vary. And, if you use -O to optimize when compiling, the check will be optimized completely away. So it is a very cheap and reasonable check to have.)
The mem->lock semaphore is used like a mutex. That is, to lock the structure for exclusive access, a process waits on it. When it is done, it posts on it.
Note that while sem_post(&(mem->lock)) will always succeed (ignoring bugs like mem being NULL or pointing to uninitialized memory or having been overwritten with garbage), technically, sem_wait() can be interrupted by a signal delivery to an userspace handler installed without SA_RESTART flag. This is why I recommend using a static inline helper function instead of sem_wait():
static inline int semaphore_wait(sem_t *const s)
{
int result;
do {
result = sem_wait(s);
} while (result == -1 && errno == EINTR);
return result;
}
static inline int semaphore_post(sem_t *const s)
{
return sem_post(s);
}
In cases where signal delivery should not interrupt waiting on the semaphore, you use semaphore_wait(). If you do want a signal delivery to interrupt waiting on a semaphore, you use sem_wait(); if it returns -1 with errno == EINTR, the operation was interrupted due to signal delivery, and the semaphore wasn't actually decremented. (Many other low-level functions, like read(), write(), send(), recv(), can be interrupted in the exact same way; they can also just return a short count, in case the interruption occurred part way.)
The semaphore_post() is just a wrapper, so that you can use "matching` post and wait operations. Doing that sort of "useless" wrappers does help understand the code, you see.
The item[] array is used as a circular queue. The num_items indicates the number of items in it. If num_items > 0, the next item to be consumed is item[next_item]. If num_items < MAX_ITEMS, the next item to be produced is item[(next_item + num_items) % MAX_ITEMS].
The % is the modulo operator. Here, because next_item and num_items are always positive, (next_item + num_items) % MAX_ITEMS is always between 0 and MAX_ITEMS - 1, inclusive. This is what makes the buffer circular.
When a producer has constructed a new item, say item_type newitem;, and wants to add it to the shared memory, it basically does the following:
/* Omitted: Initialize and fill in 'newitem' members */
/* Wait until there is room in the buffer */
semaphore_wait(&(mem->room));
/* Get exclusive access to the structure members */
semaphore_wait(&(mem->lock));
mem->item[(mem->next_item + mem->num_items) % MAX_ITEMS] = newitem;
mem->num_items++;
sem_post(&(mem->more));
semaphore_post(&(mem->lock));
The above is often called enqueue, because it appends an item to a queue (which happends to be implemented via a circular buffer).
When a consumer wants to consume an item (item_type nextitem;) from the shared buffer, it does the following:
/* Wait until there are items in the buffer */
semaphore_wait(&(mem->more));
/* Get exclusive access to the structure members */
semaphore_wait(&(mem->lock));
nextitem = mem->item[mem->next_item];
mem->next_item = (mem->next_item + 1) % MAX_ITEMS;
mem->num_items = mem->num_items - 1;
semaphore_post(&(mem->room));
mem->item[(mem->next_item + mem->num_items) % MAX_ITEMS] = newitem;
mem->num_items++;
sem_post(&(mem->more));
semaphore_post(&(mem->lock));
/* Omitted: Do work on 'nextitem' here. */
This is often called dequeue, because it obtains the next item from the queue.
I would recommend you first write a single-process test case, which enqueues MAX_ITEMS, then dequeues them, and verifies the semaphore values are back to initial values. That is not a guarantee of correctness, but it takes care of the most typical bugs.
In practice, I would personally write the queueing functions as static inline helpers in the same header file that describes the shared memory structure. Pretty much
static inline int shared_get(struct shared *const mem, item_type *const into)
{
int err;
if (!mem || !into)
return errno = EINVAL; /* Set errno = EINVAL, and return EINVAL. */
/* Wait for the next item in the buffer. */
do {
err = sem_wait(&(mem->more));
} while (err == -1 && errno == EINTR);
if (err)
return errno;
/* Exclusive access to the structure. */
do {
err = sem_wait(&(mem->lock));
} while (err == -1 && errno == EINTR);
/* Copy item to caller storage. */
*into = mem->item[mem->next_item];
/* Update queue state. */
mem->next_item = (mem->next_item + 1) % MAX_ITEMS;
mem->num_items--;
/* Account for the newly freed slot. */
sem_post(&(mem->room));
/* Done. */
sem_post(&(mem->lock));
return 0;
}
and
static inline int shared_put(struct shared *const mem, const item_type *const from)
int err;
if (!mem || !into)
return errno = EINVAL; /* Set errno = EINVAL, and return EINVAL. */
/* Wait for room in the buffer. */
do {
err = sem_wait(&(mem->room));
} while (err == -1 && errno == EINTR);
if (err)
return errno;
/* Exclusive access to the structure. */
do {
err = sem_wait(&(mem->lock));
} while (err == -1 && errno == EINTR);
/* Copy item to queue. */
mem->item[(mem->next_item + mem->num_items) % MAX_ITEMS] = *from;
/* Update queue state. */
mem->num_items++;
/* Account for the newly filled slot. */
sem_post(&(mem->more));
/* Done. */
sem_post(&(mem->lock));
return 0;
}
but note that I wrote these from memory, and not copy-pasted from my test program, because I want you to learn and not to just copy-paste code from others without understanding (and being suspicious of) it.
Why do we need separate counters (first_item, num_items) when we have the semaphores, with corresponding values?
Because we cannot capture the semaphore value at the point where sem_wait() succeeded/continued/stopped blocking.
For example, initially the room semaphore is initialized to MAX_ITEMS, so up to that many producers can run in parallel. Any one of them running sem_getvalue() immediately after sem_wait() will get some later value, not the value or transition that caused sem_wait() to return. (Even with SysV semaphores you cannot obtain the semaphore value that caused wait to return for this process.)
So, instead of indexes or counters to the buffer, we think of the more semaphore as having the value of how many times one can dequeue from the buffer without blocking, and room as having the value of how many times one can enqueue to the buffer without blocking. The lock semaphore grants exclusive access, so that we can modify the shared memory structures (well, next_item and num_items) atomically, without different processes trying to change the values at the same time.
I am not 100% certain that this is the best or optimum pattern, this is one of the most commonly used ones. It is not as robust as I'd like: for each increment (of one) in num_items, one must post on more exactly once; and for each decrement (of one) in num_items, one must increment next_item by exactly one and post on room exactly once, or the scheme falls apart.
There is one final wrinkle, though:
How do producers indicate they are done?
How would the scheduler tell producers and/or consumers to stop?
My preferred solution is to add a flag into the shared memory structure, say unsigned int status;, with specific bit masks telling the producers and consumers what to do, that is examined immediately after waiting on the lock:
#define STOP_PRODUCING (1 << 0)
#define STOP_CONSUMING (1 << 1)
static inline int shared_get(struct shared *const mem, item_type *const into)
{
int err;
if (!mem || !into)
return errno = EINVAL; /* Set errno = EINVAL, and return EINVAL. */
/* Wait for the next item in the buffer. */
do {
err = sem_wait(&(mem->more));
} while (err == -1 && errno == EINTR);
if (err)
return errno;
/* Exclusive access to the structure. */
do {
err = sem_wait(&(mem->lock));
} while (err == -1 && errno == EINTR);
/* Need to stop consuming? */
if (mem->state & STOP_CONSUMING) {
/* Ensure all consumers see the state immediately */
sem_post(&(mem->more));
sem_post(&(mem->lock));
/* ENOMSG == please stop. */
return errno = ENOMSG;
}
/* Copy item to caller storage. */
*into = mem->item[mem->next_item];
/* Update queue state. */
mem->next_item = (mem->next_item + 1) % MAX_ITEMS;
mem->num_items--;
/* Account for the newly freed slot. */
sem_post(&(mem->room));
/* Done. */
sem_post(&(mem->lock));
return 0;
}
static inline int shared_put(struct shared *const mem, const item_type *const from)
int err;
if (!mem || !into)
return errno = EINVAL; /* Set errno = EINVAL, and return EINVAL. */
/* Wait for room in the buffer. */
do {
err = sem_wait(&(mem->room));
} while (err == -1 && errno == EINTR);
if (err)
return errno;
/* Exclusive access to the structure. */
do {
err = sem_wait(&(mem->lock));
} while (err == -1 && errno == EINTR);
/* Time to stop? */
if (mem->state & STOP_PRODUCING) {
/* Ensure all producers see the state immediately */
sem_post(&(mem->lock));
sem_post(&(mem->room));
/* ENOMSG == please stop. */
return errno = ENOMSG;
}
/* Copy item to queue. */
mem->item[(mem->next_item + mem->num_items) % MAX_ITEMS] = *from;
/* Update queue state. */
mem->num_items++;
/* Account for the newly filled slot. */
sem_post(&(mem->more));
/* Done. */
sem_post(&(mem->lock));
return 0;
}
which return ENOMSG to the caller if the caller should stop. When the state is changed, one should of course be holding the lock. When adding STOP_PRODUCING, one should also post on the room semaphore (once) to start a "cascade" so all producers stop; and when adding STOP_CONSUMING, post on the more semaphore (once) to start the consumer stop cascade. (Each of them will post on it again, to ensure each producer/consumer sees the state as soon as possible.)
There are other schemes, though; for example signals (setting a volatile sig_atomic_t flag), but it is generally hard to ensure there are no race windows: a process checking the flag just before it is changed, and then blocking on a semaphore.
In this scheme, it would be good to verify that both MAX_ITEMS + NUM_PRODUCERS <= SEM_VALUE_MAX and MAX_ITEMS + NUM_CONSUMERS <= SEM_VALUE_MAX, so that even during the stop cascades, the semaphore value will not overflow.
I have a writer process which outputs its status at regular intervals as a readable chunck of wchar_t.
I would need to ensure the following properties:
When there's and update, the readers shouldn't read partial/corrupted data
The file should be volatile in memory so that when the writer quits, the file is gone
The file content size is variable
Multiple readers could read the file in parallel, doesn't matter if the content is synced, as long as is non partial for each client
If using truncate and then write, clients should only read the full file and not observe such partial operations
How could I implement such /procfs-like file, outside /procfs filesystem?
I was thinking to use classic c Linux file APIs and create something under /dev/shm by default, but I find it hard to implement effectively point 1 and 5 most of all.
How could I expose such file?
Typical solution is to create a new file in the same directory, then rename (hardlink) it over the old one.
This way, processes see either an old one or a new one, never a mix; and it only depends on the moment when they open the file.
The Linux kernel takes care of the caching, so if the file is accessed often, it will be in RAM (page cache). The writer must, however, remember to delete the file when it exits.
A better approach is to use fcntl()-based advisory record locks (typically over the entire file, i.e. .l_whence = SEEK_SET, .l_start = 0, .l_len = 0).
The writer will grab a write/exclusive lock before truncating and rewriting the contents, and readers a read/shared lock before reading the contents.
This requires cooperation, however, and the writer must be prepared to not be able to lock (or grabbing the lock may take undefined amount of time).
A Linux-only scheme would be to use atomic replacement (via rename/hardlinking), and file leases.
(When the writer process has an exclusive lease on an open file, it gets a signal whenever another process wants to open that same file (inode, not file name). It has at least a few seconds to downgrade or release the lease, at which point the opener gets access to the contents.)
Basically, the writer process creates an empty status file, and obtains exclusive lease on it. Whenever the writer receives a signal that a reader wants to access the status file, it writes the current status to the file, releases the lease, creates a new empty file in the same directory (same mount suffices) as the status file, obtains an exclusive lease on that one, and renames/hardlinks it over the status file.
If the status file contents do not change all the time, only periodically, then the writer process creates an empty status file, and obtains exclusive lease on it. Whenever the writer receives a signal that a reader wants to access the (empty) status file, it writes the current status to the file, and releases the lease. Then, when the writer process' status is updated, and there is no lease yet, it creates a new empty file in the status file directory, takes an exclusive lease on it, and renames/hardlinks over the status file.
This way, the status file is always updated just before a reader opens it, and only then. If there are multiple readers at the same time, they can open the status file without interruption when the writer releases the lease.
It is important to note that the status information should be collected in a single structure or similar, so that writing it out to the status file is efficient. Leases are automatically broken if not released soon enough (but there are a few seconds at least to react), and the lease is on the inode – file contents – not the file name, so we still need the atomic replacement.
Here's a crude example implementation:
#define _POSIX_C_SOURCE 200809L
#define _GNU_SOURCE
#include <stdlib.h>
#include <stdarg.h>
#include <inttypes.h>
#include <unistd.h>
#include <fcntl.h>
#include <pthread.h>
#include <signal.h>
#include <limits.h>
#include <string.h>
#include <stdio.h>
#include <errno.h>
#define LEASE_SIGNAL (SIGRTMIN+0)
static pthread_mutex_t status_lock = PTHREAD_MUTEX_INITIALIZER;
static int status_changed = 0;
static size_t status_len = 0;
static char *status = NULL;
static pthread_t status_thread;
static char *status_newpath = NULL;
static char *status_path = NULL;
static int status_fd = -1;
static int status_errno = 0;
char *join2(const char *src1, const char *src2)
{
const size_t len1 = (src1) ? strlen(src1) : 0;
const size_t len2 = (src2) ? strlen(src2) : 0;
char *dst;
dst = malloc(len1 + len2 + 1);
if (!dst) {
errno = ENOMEM;
return NULL;
}
if (len1 > 0)
memcpy(dst, src1, len1);
if (len2 > 0)
memcpy(dst+len1, src2, len2);
dst[len1+len2] = '\0';
return dst;
}
static void *status_worker(void *payload __attribute__((unused)))
{
siginfo_t info;
sigset_t mask;
int err, num;
/* This thread blocks all signals except LEASE_SIGNAL. */
sigfillset(&mask);
sigdelset(&mask, LEASE_SIGNAL);
err = pthread_sigmask(SIG_BLOCK, &mask, NULL);
if (err)
return (void *)(intptr_t)err;
/* Mask for LEASE_SIGNAL. */
sigemptyset(&mask);
sigaddset(&mask, LEASE_SIGNAL);
/* This thread can be canceled at any cancellation point. */
pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
while (1) {
num = sigwaitinfo(&mask, &info);
if (num == -1 && errno != EINTR)
return (void *)(intptr_t)errno;
/* Ignore all but the lease signals related to the status file. */
if (num != LEASE_SIGNAL || info.si_signo != LEASE_SIGNAL || info.si_fd != status_fd)
continue;
/* We can be canceled at this point safely. */
pthread_testcancel();
/* Block cancelability for a sec, so that we maintain the mutex correctly. */
pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, NULL);
pthread_mutex_lock(&status_lock);
status_changed = 0;
/* Write the new status to the file. */
if (status && status_len > 0) {
const char *ptr = status;
const char *const end = status + status_len;
ssize_t n;
while (ptr < end) {
n = write(status_fd, ptr, (size_t)(end - ptr));
if (n > 0) {
ptr += n;
} else
if (n != -1) {
if (!status_errno)
status_errno = EIO;
break;
} else
if (errno != EINTR) {
if (!status_errno)
status_errno = errno;
break;
}
}
}
/* Close and release lease. */
close(status_fd);
status_fd = -1;
/* After we release the mutex, we can be safely canceled again. */
pthread_mutex_unlock(&status_lock);
pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
pthread_testcancel();
}
}
static int start_status_worker(void)
{
sigset_t mask;
int result;
pthread_attr_t attrs;
/* This thread should block LEASE_SIGNAL signals. */
sigemptyset(&mask);
sigaddset(&mask, LEASE_SIGNAL);
result = pthread_sigmask(SIG_BLOCK, &mask, NULL);
if (result)
return errno = result;
/* Create the worker thread. */
pthread_attr_init(&attrs);
pthread_attr_setstacksize(&attrs, 2*PTHREAD_STACK_MIN);
result = pthread_create(&status_thread, &attrs, status_worker, NULL);
pthread_attr_destroy(&attrs);
/* Ready. */
return 0;
}
int set_status(const char *format, ...)
{
va_list args;
char *new_status = NULL;
int len;
if (!format)
return errno = EINVAL;
va_start(args, format);
len = vasprintf(&new_status, format, args);
va_end(args);
if (len < 0)
return errno = EINVAL;
pthread_mutex_lock(&status_lock);
free(status);
status = new_status;
status_len = len;
status_changed++;
/* Do we already have a status file prepared? */
if (status_fd != -1 || !status_newpath) {
pthread_mutex_unlock(&status_lock);
return 0;
}
/* Prepare the status file. */
do {
status_fd = open(status_newpath, O_WRONLY | O_CREAT | O_CLOEXEC, 0666);
} while (status_fd == -1 && errno == EINTR);
if (status_fd == -1) {
pthread_mutex_unlock(&status_lock);
return 0;
}
/* In case of failure, do cleanup. */
do {
/* Set lease signal. */
if (fcntl(status_fd, F_SETSIG, LEASE_SIGNAL) == -1)
break;
/* Get exclusive lease on the status file. */
if (fcntl(status_fd, F_SETLEASE, F_WRLCK) == -1)
break;
/* Replace status file with the new, leased one. */
if (rename(status_newpath, status_path) == -1)
break;
/* Success. */
pthread_mutex_unlock(&status_lock);
return 0;
} while (0);
if (status_fd != -1) {
close(status_fd);
status_fd = -1;
}
unlink(status_newpath);
pthread_mutex_unlock(&status_lock);
return 0;
}
int main(int argc, char *argv[])
{
char *line = NULL;
size_t size = 0;
ssize_t len;
if (argc != 2 || !strcmp(argv[1], "-h") || !strcmp(argv[1], "--help")) {
const char *argv0 = (argc > 0 && argv[0]) ? argv[0] : "(this)";
fprintf(stderr, "\n");
fprintf(stderr, "Usage: %s [ -h | --help ]\n", argv0);
fprintf(stderr, " %s STATUS-FILE\n", argv0);
fprintf(stderr, "\n");
fprintf(stderr, "This program maintains a pseudofile-like status file,\n");
fprintf(stderr, "using the contents from standard input.\n");
fprintf(stderr, "Supply an empty line to exit.\n");
fprintf(stderr, "\n");
return EXIT_FAILURE;
}
status_path = join2(argv[1], "");
status_newpath = join2(argv[1], ".new");
unlink(status_path);
unlink(status_newpath);
if (start_status_worker()) {
fprintf(stderr, "Cannot start status worker thread: %s.\n", strerror(errno));
return EXIT_FAILURE;
}
if (set_status("Empty\n")) {
fprintf(stderr, "Cannot create initial empty status: %s.\n", strerror(errno));
return EXIT_FAILURE;
}
while (1) {
len = getline(&line, &size, stdin);
if (len < 1)
break;
line[strcspn(line, "\n")] = '\0';
if (line[0] == '\0')
break;
set_status("%s\n", line);
}
pthread_cancel(status_thread);
pthread_join(status_thread, NULL);
if (status_fd != -1)
close(status_fd);
unlink(status_path);
unlink(status_newpath);
return EXIT_SUCCESS;
}
Save the above as server.c, then compile using e.g.
gcc -Wall -Wextra -O2 server.c -lpthread -o server
This implements a status server, storing each line from standard input to the status file if necessary. Supply an empty line to exit. For example, to use the file status in the current directory, just run
./server status
Then, if you use another terminal window to examine the directory, you see it has a file named status (with typically zero size). But, cat status shows you its contents; just like procfs/sysfs pseudofiles.
Note that the status file is only updated if necessary, and only for the first reader/accessor after status changes. This keeps writer/server overhead and I/O low, even if the status changes very often.
The above example program uses a worker thread to catch the lease-break signals. This is because pthread mutexes cannot be locked or released safely in a signal handler (pthread_mutex_lock() etc. are not async-signal safe). The worker thread maintains its cancelability, so that it won't be canceled when it holds the mutex; if canceled during that time, it will be canceled after it releases the mutex. It is careful that way.
Also, the temporary replacement file is not random, it is just the status file name with .new appended at end. Anywhere on the same mount would work fine.
As long as other threads also block the lease break signal, this works fine in multithreaded programs, too. (If you create other threads after the worker thread, they'll inherit the correct signal mask from the main thread; start_status_worker() sets the signal mask for the calling thread.)
I do trust the approach in the program, but there may be bugs (and perhaps even thinkos) in this implementation. If you find any, please comment or edit.
I pass a struct in pthread_create which contains a char* and I lock the main and the thread with mutexes so I can protect this string because when the second thread will be created the string will change and the first thread will use the second string and not the first. Here is the code:
main.c
while( th_num < th_size )
{
pthread_mutex_lock(&lock);
received = 0;
/* Read the desired readable size */
if( read(newsock, &size, sizeof(size)) < 0 )
{ perror("Read"); exit(1); }
/* Read all data */
while( received < size )
{
if( (nread = read(newsock, buffer + received, size - received)) < 0 )
{ perror("Read"); exit(1); }
received += nread;
}
printf("Received string: %s\n",buffer);
Q->receiver = (char*) malloc(sizeof(char)*strlen(buffer)+1);
strncpy(Q->receiver, buffer, strlen(buffer)+1);
if( (err = pthread_create(&thread_server[th_num], NULL, thread_start, (void*) Q)) == true )
{ show_error("pthread_create", err); }
/* -------------------------------------------------- */
th_num++;
pthread_mutex_unlock(&lock);
usleep(500);
}
pthread_server.c
pthread_mutex_lock(&lock);
/*
do some stuff here
*/
pthread_mutex_unlock(&lock);
The program works fine but the problem is that it only works if I put usleep(500). My guess is that the thread cant lock the mutex in time so it needs sleep to do this right. Is there a way to do it without usleep()?
Assuming I don't understand why you need to call pthread_create(); in a mutual exclusion portion of code, your problems is:
you use threads but the flow of your program is approaching to be sequential because of the large mutual exclusion portion of code.
Let X a generic thread in your program.
Without the usleep(500); when the X thread finish it releases the mutex with pthread_mutex_unlock(&lock); but afterwards the thread X reacquires the lock so no one else can access in the mutual exclusion portion of code.
Now I don't know what your shared data is, so I can only suggest you:
1) Reduce the mutual exclusion portion of code, only use it when you access to a shared data;
2) Rethink about your program structure.
I'm trying to write a program that backs up multiple files to a folder called .backups. It creates one thread for each file or subdirectory, and that thread is responsible for the copy operation. However it's not working correctly. Sometimes files are never copied, sometimes they are but there are 0 bytes, and sometimes it works correctly. It seems completely random and I have no idea what's causing it. Can anyone help me figure it out?
// Copies the file from source to destination and returns number of bytes written
ssize_t copy_file(char* from, char *to)
{
const int BUFFER_SIZE = 4096;
char buffer[BUFFER_SIZE];
ssize_t n;
ssize_t written = 0;
FILE* file_from = fopen(from, "r");
FILE* file_to = fopen(to, "w");
if (file_from == NULL || file_to == NULL)
return -1;
while ((n = fread(buffer, 1, BUFFER_SIZE, file_from)) > 0) {
if (fwrite(buffer, sizeof(char), n, file_to) != n)
return -1;
written += n;
}
return written;
}
// Thread responsible for handling the backup of a single file or subdirectory
void* backup_thread(void* arg)
{
struct backup_info* info = (struct backup_info*) arg;
ssize_t written;
written = copy_file(info->file, info->destination);
int rc = pthread_detach(pthread_self());
if (rc != 0)
exit(EXIT_FAILURE);
free(info);
return NULL;
}
EDIT: Also, this is how I am creating each of the threads.
struct backup_info* info = malloc(sizeof(struct backup_info));
if ((rc = pthread_create(&thread_id, NULL, backup_thread, info)) != 0)
fprintf(stderr, "pthread_create() failed (%d): %s", rc, strerror(rc));
How does the main thread exit?
If it just returns from main(), then that is the same as calling exit(), and will result in all the other threads being unceremoniously killed in the middle of whatever they happen to be doing.
If instead you call pthread_exit() explicitly in main(), then the detached threads will be allowed to finish before the process exits.
Note also that the exit(EXIT_FAILURE) in backup_thread() has the same problem - if that error case fires, it will tear down the entire process immediately. pthread_exit() may be better used here as well.
I spotted something wrong that may be causing your problem. You never call fclose() on any of your files. That will eventually lead to using up all of your file descriptors (which are shared among your threads). I don't know if that is the only thing wrong, but you should make the fix and see what happens.
I'm new to using the pthread library in C and I have an assignment for my class to write a simple program using them. The basic description of the program is it takes 1 or more input files containing website names and 1 output file name. I then need to create 1 thread per input file to read in the website names and push them onto a queue. Then I need to create a couple of threads to pull those names off of the queue, find their IP Address, and then write that information out to the output file. The command line arguments are expected as follows:
./multi-lookup [one or more input files] [single output file name]
My issue is this. Whenever I run the program with only 1 thread to push information to the output file then everything works properly. When I make it two threads then the program hangs and none of my testing "printf" statements are even printed. My best guess is that deadlock is occurring somehow and that I'm not using my mutexes properly but I can't figure out how to fix it. Please help!
If you need any information that I'm not providing then just let me know. Sorry for the lack of comments in the code.
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <errno.h>
#include <pthread.h>
#include "util.h"
#include "queue.h"
#define STRING_SIZE 1025
#define INPUTFS "%1024s"
#define USAGE "<inputFilePath> <outputFilePath>"
#define NUM_RESOLVERS 2
queue q;
pthread_mutex_t locks[2];
int requestors_finished;
void* requestors(void* input_file);
void* resolvers(void* output_file);
int main(int argc, char* argv[])
{
FILE* inputfp = NULL;
FILE* outputfp = NULL;
char errorstr[STRING_SIZE];
pthread_t requestor_threads[argc - 2];
pthread_t resolver_threads[NUM_RESOLVERS];
int return_code;
requestors_finished = 0;
if(queue_init(&q, 10) == QUEUE_FAILURE)
fprintf(stderr, "Error: queue_init failed!\n");
if(argc < 3)
{
fprintf(stderr, "Not enough arguments: %d\n", (argc - 1));
fprintf(stderr, "Usage:\n %s %s\n", argv[0], USAGE);
return 1;
}
pthread_mutex_init(&locks[0], NULL);
pthread_mutex_init(&locks[1], NULL);
int i;
for(i = 0; i < (argc - 2); i++)
{
inputfp = fopen(argv[i+1], "r");
if(!inputfp)
{
sprintf(errorstr, "Error Opening Input File: %s", argv[i]);
perror(errorstr);
break;
}
return_code = pthread_create(&(requestor_threads[i]), NULL, requestors, inputfp);
if(return_code)
{
printf("ERROR: return code from pthread_create() is %d\n", return_code);
exit(1);
}
}
outputfp = fopen(argv[i+1], "w");
if(!outputfp)
{
sprintf(errorstr, "Errord opening Output File: %s", argv[i+1]);
perror(errorstr);
exit(1);
}
for(i = 0; i < NUM_RESOLVERS; i++)
{
return_code = pthread_create(&(resolver_threads[i]), NULL, resolvers, outputfp);
if(return_code)
{
printf("ERROR: return code from pthread_create() is %d\n", return_code);
exit(1);
}
}
for(i = 0; i < (argc - 2); i++)
pthread_join(requestor_threads[i], NULL);
requestors_finished = 1;
for(i = 0; i < NUM_RESOLVERS; i++)
pthread_join(resolver_threads[i], NULL);
pthread_mutex_destroy(&locks[0]);
pthread_mutex_destroy(&locks[1]);
return 0;
}
void* requestors(void* input_file)
{
char* hostname = (char*) malloc(STRING_SIZE);
FILE* input = input_file;
while(fscanf(input, INPUTFS, hostname) > 0)
{
while(queue_is_full(&q))
usleep((rand()%100));
if(!queue_is_full(&q))
{
pthread_mutex_lock(&locks[0]);
if(queue_push(&q, (void*)hostname) == QUEUE_FAILURE)
fprintf(stderr, "Error: queue_push failed on %s\n", hostname);
pthread_mutex_unlock(&locks[0]);
}
hostname = (char*) malloc(STRING_SIZE);
}
printf("%d\n", queue_is_full(&q));
free(hostname);
fclose(input);
pthread_exit(NULL);
}
void* resolvers(void* output_file)
{
char* hostname;
char ipstr[INET6_ADDRSTRLEN];
FILE* output = output_file;
int is_empty = queue_is_empty(&q);
//while(!queue_is_empty(&q) && !requestors_finished)
while((!requestors_finished) || (!is_empty))
{
while(is_empty)
usleep((rand()%100));
pthread_mutex_lock(&locks[0]);
hostname = (char*) queue_pop(&q);
pthread_mutex_unlock(&locks[0]);
if(dnslookup(hostname, ipstr, sizeof(ipstr)) == UTIL_FAILURE)
{
fprintf(stderr, "DNSlookup error: %s\n", hostname);
strncpy(ipstr, "", sizeof(ipstr));
}
pthread_mutex_lock(&locks[1]);
fprintf(output, "%s,%s\n", hostname, ipstr);
pthread_mutex_unlock(&locks[1]);
free(hostname);
is_empty = queue_is_empty(&q);
}
pthread_exit(NULL);
}
Although I'm not familiar with your "queue.h" library, you need to pay attention to the following:
When you check whether your queue is empty you are not acquiring the mutex, meaning that the following scenario might happen:
Some requestors thread checks for emptiness (let's call it thread1) and just before it executes pthread_mutex_lock(&locks[0]); (and after if(!queue_is_full(&q)) ) thread1 gets contex switched
Other requestors threads fill the queue up and when out thread1 finally gets hold of the mutex if will try to insert to the full queue. Now if your queue implementation crashes when one tries to insert more elements into an already full queue thread1 will never unlock the mutex and you'll have a deadlock.
Another scenario:
Some resolver thread runs first requestors_finished is initially 0 so (!requestors_finished) || (!is_empty) is initially true.
But because the queue is still empty is_empty is true.
This thread will reach while(is_empty) usleep((rand()%100)); and sleep forever, because you pthread_join this thread your program will never terminate because this value is never updated in the loop.
The general idea to remember is that when you access some resource that is not atomic and might be accessed by other threads you need to make sure you're the only one performing actions on this resource.
Using a mutex is OK but you should consider that you cannot anticipate when will a context switch occur, so if you want to chech e.g whether the queue is empty you should do this while having the mutex locked and not unlock it until you're finished with it otherwise there's no guarantee that it'll stay empty when the next line executes.
You might also want to consider reading more about the consumer producer problem.
To help you know (and control) when the consumers (resolver) threads should run and when the producer threads produce you should consider using conditional variables.
Some misc. stuff:
pthread_t requestor_threads[argc - 2]; is using VLA and not in a good way - think what will happen if I give no parameters to your program. Either decide on some maximum and define it or create it dynamically after having checked the validity of the input.
IMHO the requestors threads should open the file themselves
There might be some more problems but start by fixing those.