printf giving segmentation fault after print in schedule process function - c

my round robin schedule takes an input text file and parses it to make processes in an upcoming queue, when the computer time reaches the arrival time of a process on the upcoming queue it is enqueued into the round robin queue and scheduled on a computer core. However, when I go to schedule a process from the round robin queue it segmentation faults directly after a printf. I have no clue how to fix it, the exact point of the fault is in sched_proc function after the printf.
I've tried printing before and after, not removing from the round robin queue and just sending the head (that gives very odd output because it ends up scheduling one process on multiple cores because it isn't taken out of the queue) and nothing is working.
input.txt-
process ID | Service Time | Arrival Time
t1 20 5
t2 30 10
t3 40 10
t4 50 10
t5 60 20
t6 45 30
t7 90 31
t9 80 32
t10 85 33
t11 80 34
t12 50 40
t13 60 50
t14 67 55
t15 70 57
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
struct core{
struct process* p; // pointer to the process currently running on this core
int proc_time; // cumulative time this process has been running on the core.
int busy; // either 0 or 1, 0 if there is no process, 1 if there is process running on the core
};
// virtual computer struct
struct computer
{
struct core cores[4]; //this computer has 4 cores
long time; // computer time in millisecond
};
// struct to store process information
struct process
{
char * process_ID;
int arrival_time; // when this process arrives (e.g., being created)
int service_time; // the amount of time this process needs to run on a core to finish
int io; // boolean io vlaue (C does not have bool value (c89/90), so use int to demonstrate boolean).
};
// one link in a queue that the scheduler uses to chain process structs,
// this is a standard struct to create linked list which is generally leveraged to implement queue
struct node
{
struct process* p; // the process pointed by this link
struct node *next; // the pointer to the next link
};
//head for the processes queue
struct node* head;
//tail for the processes queue
struct node* tail;
int proc_num; //number of processes in the queue, waiting to be scheduled and not running
int quantum; // the amount of time in milliseconds a process can run on a core before being swapped out
//struct for computer
struct computer computer;
//QUEUE STRUCT AND LOGIC
typedef struct Queue Queue;
struct Queue {
struct node* head;
struct node* tail;
unsigned int process_num;
};
struct node* create_node(struct process* p) {
struct node* node = malloc(sizeof(node));
if (node) {
node->p = p;
node->next = NULL;
}
return node;
}
Queue* create_queue (void) {
Queue* queue = malloc(sizeof(Queue));
if (queue) {
queue->head = NULL;
queue->tail = NULL;
queue->process_num = 0;
}
return queue;
}
void clear_queue (Queue* queue) {
struct node *current, *next;
current = queue->head;
while (current != NULL) {
next = current->next;
free(current->p->process_ID);
free(current->p);
free(current);
current = next;
}
}
void delete_queue (Queue* queue) {
if (queue) {
clear_queue(queue);
free(queue);
}
}
void print_queue (Queue* queue) {
for (struct node* current = queue->head; current != NULL; current = current->next) {
printf("Process: %s, Arrival Time: %d, Service Time: %d, IO: %d\n", current->p->process_ID,
current->p->arrival_time, current->p->service_time, current->p->io);
}
}
void add_last_queue (Queue* queue, struct node* to_add) {
if (queue->head == NULL) {
queue->head = to_add;
queue->tail = to_add;
} else {
queue->tail->next = to_add;
queue->tail = to_add;
}
queue->process_num++;
}
struct process* remove_first_queue (Queue* queue) {
struct process* data = NULL;
if (queue->head) {
struct node* temp = queue->head;
if (queue->head->next) {
queue->head = queue->head->next;
} else {
queue->head = NULL;
queue->tail = NULL;
}
data = temp->p;
free(temp);
queue->process_num--;
if (queue->process_num == 1) {
queue->tail = queue->head;
}
return data;
}
}
void rotate_queue_left (Queue* queue) {
if (queue) {
if (queue->head) {
queue->tail->next = queue->head;
queue->head = queue->head->next;
queue->tail = queue->tail->next;
queue->tail->next = NULL;
}
}
}
//END QUEUE STRUCT AND LOGIC
Queue* future_proc;
Queue* round_robin;
void read_file(void)
{
int i,i2;
FILE* file = fopen("input.txt", "r");
char line[90];
char name[100];
char service_time[3];
char arrival_time[3];
fgets(line, sizeof(line), file);
while (fgets(line, sizeof(line), file)) {
i=0;
while(line[i]!=' '&&i<90){name[i]=line[i];i++;}
if(i>90)break;
name[i]=0;
i2=++i;
while(line[i]!=' '&&i<90){service_time[i-i2]=line[i];i++;}
if(i>90)break;
service_time[i]=0;
i2=++i;
while(line[i]!=' '&&i<90){arrival_time[i-i2]=line[i];i++;}
if(i>90)break;
arrival_time[i]=0;
/* add your code here, you are to create the upcoming processes queue here.
essentially create a node for each process and chain them in a queue.
note this queue is *not* the process queue used for round robin scheduling
*/
//BUILD FUTURE PROCESS QUEUE
struct process* process;
process = malloc(sizeof(struct process));
process->process_ID=malloc(sizeof(50));
strcpy(process->process_ID, name);
process->arrival_time = atoi(arrival_time);
process->service_time = atoi(service_time);
process->io = 0;
struct node* to_add = create_node(process);
add_last_queue(future_proc, to_add);
}
fclose(file);
return;
}
//this function call simulates one millisecond of time on the computer
void run_one_step(void)
{
int i;
computer.time++;
printf("Processing all 4 cores, current Computer time=%lu \n",computer.time);
for(i=0;i<4;i++)
{
if(computer.cores[i].busy)
{
computer.cores[i].p->service_time--; // deduct the remaining service time of the running process by one millisecond
computer.cores[i].proc_time++; // increment the running time for the process by one millisecond in current quantum
printf("Core[%d]: %s, service_time= %d,been on core for: %d \n",i,computer.cores[i].p->process_ID,computer.cores[i].p->service_time,computer.cores[i].proc_time);
// you need to swap out or terminate a process if it uses up the current quantum,
// or finishes all its service time. The code for this should be located in the main()
// function, not here.
// Also if your code is done right, the following warning messages should never print.
if(computer.cores[i].proc_time>quantum)
printf("WARNING: Process on Core[%d] should not run longer than quantum\n",i);
if(computer.cores[i].p->service_time<0)
printf("WARNING: Process on core[%d] stayed longer than its service time.\n",i);
}
}
}
void run_one_step_p3(void)
{
int rndm,i;
computer.time++;
printf("Processing all 4 cores, current Computer time=%lu \n",computer.time);
for(i=0;i<4;i++)
{
if(computer.cores[i].busy)
{
if(computer.cores[i].p->io==0)
{
computer.cores[i].p->service_time--;
// with 10% probability, generate an io event
rndm=rand()%10+1;
if(rndm==10)computer.cores[i].p->io=1;
}
computer.cores[i].proc_time++;
printf("Core[%d]: process %s, service_time= %d,been on core for: %d \n",i,computer.cores[i].p->process_ID,computer.cores[i].p->service_time,computer.cores[i].proc_time);
// you need to swap out or terminate a process if it uses up the current quantum, has an i/o event;
// or finishes all its service time. The code for this should be located in the main()
// function, not here.
// Also if your code is done right, the following warning messages should never print.
if(computer.cores[i].p->io==1)
printf("WARNING: Process on core[%d] has io trigerred, please remove from core, reset io signal and place it back in queue\n",i);
if(computer.cores[i].proc_time>quantum)
printf("WARNING: Process on Core[%d] should not run longer than quantum\n",i);
if(computer.cores[i].p->service_time<0)
printf("WARNING: Process on core[%d] stayed longer than its service time.\n",i);
}
}
}
//NOTE: you must free struct node after taking a link off the round robin queue, and scheduling the respective
// process to run on the core. Make sure you free the struct node to avoid memory leak.
void sched_proc(struct process* p,int core_id)
{
if(computer.cores[core_id].busy==0)
{
printf("Process[%s] with service_time %d has been added to core %d\n",p->process_ID,p->service_time,core_id);
computer.cores[core_id].busy=1;
computer.cores[core_id].p=p;
computer.cores[core_id].proc_time=0;
}
else printf("ERROR: must call remove_proc to remove current process before adding another to the core.\n");
}
// This handles removing a process from a core, and either discarding the process if its service_time is <=0
// or adding it to the back of the round robin queue
void remove_proc(int core_id)
{
printf("Process[%s] at core %d has been removed from core with remaining service_time=%d\n",
computer.cores[core_id].p->process_ID,core_id,computer.cores[core_id].p->service_time);
// if the process has finished all its service time, terminate and clean up
if(computer.cores[core_id].p->service_time<=0)
{
computer.cores[core_id].busy=0;
// free up allocated memory for process ID and struct upon termination of a process
free(computer.cores[core_id].p->process_ID);
free(computer.cores[core_id].p);
computer.cores[core_id].proc_time=0;
}
// the process needs to run for more time, put it back into the queue for future scheduling
else
{
computer.cores[core_id].proc_time=0;
// reinsert back to the queue
if(round_robin->tail==NULL)
{
// in case queue is empty, i.e. all nodes struct were freed and there are no processes in the queue, this will become the first one
round_robin->tail=round_robin->head=malloc(sizeof(struct node));
round_robin->head->p=computer.cores[core_id].p;
round_robin->head->next=NULL;
round_robin->process_num++;
computer.cores[core_id].busy=0;
}
else
{
round_robin->tail->next = malloc(sizeof(struct node));
round_robin->tail=tail->next;
round_robin->tail->p=computer.cores[core_id].p;
round_robin->tail->next=NULL;
round_robin->process_num++;
computer.cores[core_id].busy=0;
}
}
}
// a demo running 4 processes until they're finished. The scheduling is done explicitly, not using
// a scheduling algorithm. This is just to demonstrate how processes will be scheduled. In main()
// you need to write a generic scheduling algorithm for arbitrary number of processes.
void demo(void)
{
int i;
struct process *p0,*p1,*p2,*p3;
p0=malloc(sizeof(struct process));
p1=malloc(sizeof(struct process));
p2=malloc(sizeof(struct process));
p3=malloc(sizeof(struct process));
p0->process_ID=malloc(sizeof(50));//you can assume process ID will never exceed 50 characters
p1->process_ID=malloc(sizeof(50));
p2->process_ID=malloc(sizeof(50));
p3->process_ID=malloc(sizeof(50));
strcpy(p0->process_ID,"first");
strcpy(p1->process_ID,"Second");
strcpy(p2->process_ID,"Third");
strcpy(p3->process_ID,"Fourth");
//assign arrival time
p0->arrival_time=0;
p1->arrival_time=0;
p2->arrival_time=0;
p3->arrival_time=0;
//assign service time
p0->service_time=16;
p1->service_time=17;
p2->service_time=19;
p3->service_time=21;
p0->io = 0;
p1->io = 0;
p2->io = 0;
p3->io = 0;
// Queue* queue = create_queue();
// add_last_queue(queue, create_node(p0));
// add_last_queue(queue, create_node(p1));
// add_last_queue(queue, create_node(p2));
// add_last_queue(queue, create_node(p3));
// printf("\nDemo queue:\n");
// print_queue(queue);
//
// remove_first_queue(queue);
// remove_first_queue(queue);
// remove_first_queue(queue);
// remove_first_queue(queue);
// printf("After removing:");
// print_queue(queue);
// we will skip queue construction here because it's just 4 processes.
// you must use the round robin queue for the scheduling algorithm for generic cases where many processes
// exist and may need more than one quantum to finish
// xx 4 processes are waiting to be scheduled. No queue is built in demo for simplicity.
// in your generic algorithm, you should create actual queues, and proc_num should be the number of processes whose
// arrival time has come, and are waiting in the round robin queue to be scheduled.
proc_num=4;
//schedule process to each core
sched_proc(p0,0);
sched_proc(p1,1);
sched_proc(p2,2);
sched_proc(p3,3);
for(i=0;i<16;i++)run_one_step();
remove_proc(0);
run_one_step();
remove_proc(1);
run_one_step();
run_one_step();
remove_proc(2);
run_one_step();
remove_proc(3);
sched_proc(head->p,0);
//NOTE: you must free struct node after scheduling the process. The demo code is not doing it here
// for simplification, but you have to do it in your code or you will have memory leakage
//head==tail since it was the only one added now to remove it we just make pointer pointing to NULL
head=NULL;
tail=NULL;
run_one_step();
remove_proc(0);
printf("DONE\n");
}
void init(void)
{
quantum=20;
future_proc = create_queue();
round_robin = create_queue();
// head=tail=NULL;
}
int main(void)
{
init();
// printf("\t*******Starting Demo*******\n");
//demo();
// printf("\t*******Reading Input*******\n");
//
// printf("Start file read:\n");
// printf("End file read.\n");
/* your code goes here for part2. In part 2, you create one node for each process, and put them on an
* 'upcoming process' queue first. Then your code calls run_one_step(), for each process whose arrival time
* has come, take the node off the 'upcoming process' queue, and place it on round robin queue. For each
* process that's selected to run on a core, take the node off round robin queue.
*
* Repeat run_one_step() until all processes finish. Please handle memory allocation/deallocation properly so there's no leak
*/
read_file();
printf("\nBuilt Queue After File Read:\n");
print_queue(future_proc);
printf("\nProcess Num: %d\n", future_proc->process_num);
while (future_proc->process_num > 0 /*|| round_robin->process_num > 0*/) {
while (future_proc->process_num > 0 && computer.time == future_proc->head->p->arrival_time) {
struct node* to_add = create_node(remove_first_queue(future_proc));
add_last_queue(round_robin, to_add);
printf("Process %s added at time %ld\n", to_add->p->process_ID, computer.time);
for (int i = 0; i < 4; i++) {
if (computer.cores[i].busy == 1) {
//thead process time exceeded quantum or is finished
if (computer.cores[i].proc_time > quantum || computer.cores[i].p->service_time <= 0) {
remove_proc(i);
}
}
if (computer.cores[i].busy == 0) { //the core is not busy
sched_proc(round_robin->head->p, i);
remove_first_queue(round_robin);
}
}
}
run_one_step();
}
printf("\nAfter adding at each step to round_robin queue:\n");
print_queue(round_robin);
/* After part 2 is done, you clean up everything, e.g., freeing up all memory allocated,
* reset queues to empty etc.
* Then restart for part 3: read input file for all processes, initialize queues,
* run processes using run_one_step_p3() so random i/o event can happen at each step on each core,
* until all processes finish. Remember to clean up again at the end!
*/
return 0;
}

I'm putting this in an answer simply because I can't format a comment, but this is a partial answer anyway. When I run this program in gdb, it's gets so far and then:
Program received signal SIGSEGV, Segmentation fault.
main () at scheduler.c:443
443 sched_proc(round_robin->head->p, i);
(gdb) backtrace
#0 main () at scheduler.c:443
As you see gdb is saying you crashed in the call to sched_proc(). Inspecting the value of round_robin, we can see that round_robin itself is a valid object, but its pointers are still set to NULL
(gdb) print *round_robin
$1 = {head = 0x0, tail = 0x0, process_num = 0}
(gdb) quit
As to why that is, you may have to think a bit or learn to use a debugger to step through code (an IDE will help make this easier)
edit: Here's a hint, if you add a printf in the for loop there, it will be nulled out in the second round:
for (int i = 0; i < 4; i++) {
printf("i = %i, round_robin->head, tail: %p, %p\n", i, round_robin->head, round_robin->tail);
Output:
i = 0, round_robin->head, tail: 0x55555555b520, 0x55555555b520
Process[t1] with service_time 20 has been added to core 0
i = 1, round_robin->head, tail: (nil), (nil)

Perhaps other problems, yet allocation is the wrong size.
// struct node* node = malloc(sizeof(node)); // size of a pointer
struct node* node = malloc(sizeof *node); // size of what `node` points to.
Also suggest
// Queue* queue = malloc(sizeof(Queue));
Queue* queue = malloc(sizeof *queue);
Or in general:
// ptr = malloc(sizeof(ptr_type));
ptr = malloc(sizeof *ptr);

Related

Dividing task to N threads in accessing a queue in C

How do you delegate tasks for N threads such that the workload is evenly distributed?
Say we have a queue
[the] -> [quick] -> [brown] -> [fox] -> [jumps] -> [over] -> [the] -> [lazy] -> [dog]
And we have N threads to split up the workload of dequeuing the queue and output the words where at least one word is only printed by one thread.
Here's my attempt (Updated, fixed null printing):
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#define threadCount 8 // Set number of threads
pthread_t* thread;
pthread_mutex_t lock;
//========= Setup for Queue =========
struct node
{
char *key;
struct node *next;
};
struct Q
{
struct node *front, *rear;
};
struct node* newNode(char *key)
{
struct node* temp = (struct node*)malloc(sizeof(struct node));
temp->key = key;
temp->next = NULL;
return temp;
}
struct Q* q;
void enqueue(char* key)
{
struct node* temp = newNode(key);
if(q->rear == NULL)
{
q->front = q->rear = temp;
return;
}
q->rear->next = temp;
q->rear = temp;
}
char* dequeue()
{
if (q->front == NULL)
{
return NULL;
}
struct node* temp = q->front;
char *key = temp->key;
q->front = q->front->next;
if(q->front == NULL)
{
q->rear = NULL;
}
free(temp);
return key;
}
//========= Setup for Queue =========
void *run(void* arg)
{
int id = *(int*)arg;
char* node;
while(q->front != NULL)
{
pthread_mutex_lock(&lock);
node = dequeue();
pthread_mutex_unlock(&lock);
if(node == NULL)
{
return NULL;
}
printf("Thread %d: %s\n", id, node);
}
return 0;
}
int main()
{
q = (struct Q*)malloc(sizeof(struct Q));
q->front = NULL;
q->rear = NULL;
enqueue("the");
enqueue("quick");
enqueue("brown");
enqueue("fox");
enqueue("jumps");
enqueue("over");
enqueue("the");
enqueue("lazy");
enqueue("dog");
thread = malloc(sizeof(pthread_t)*threadCount);
// Should output lines be only N-1 due to how the id is generated?
for(int id = 0; id < threadCount; id++)
{
pthread_create(&thread[id], NULL, (void *) run, &id);
}
for(int id = 0; id < threadCount; id++)
{
pthread_join(thread[id], NULL);
}
free(thread);
free(q);
return 0;
}
Here is my unresolved problem:
Sometimes there are lines also that print Thread N, but according to how I implemented (see main), max output for thread number should be N-1.
Lock the mutex
Take an item from the queue
Unlock the mutex
Do the work with the item
You don't want to hold the mutex while doing the work (e.g. printf) because then only one thread can do work at a time - you aren't really using multiple threads.
The reason you get (null) printed is that your code checks q->front != NULL while it hasn't locked the mutex. Then by the time it locks the mutex, the queue is empty. The solution is that the thread should dequeue, and then after it dequeues and unlocks the mutex, check whether it dequeued NULL.
You can't expect the work to be done in the "correct" order when using threads. You use threads when you don't care about the order.
If some threads don't print any words, that's normal - your "work" is very quick and you have too many threads for the amount of work. It's understandable that some threads don't get any work because all the work is already done by the time they start up. You can fix this for the demonstration by putting a call like sleep(1) before or after the printf, to slow the thread down.
Sometimes your thread IDs are wrong. When each thread starts, it accesses the id variable in main. The pthread_create function doesn't wait for the thread to read the variable. main sets id to 0, and it starts a thread, and it sets id to 1, and it starts a thread, ..., and it sets id to N and stops looping. When does the thread read the variable? No idea, could be any time, so the number it reads could be higher than the right number.
You can fix this by either making a new variable per thread to hold its ID (with new or just with an array of 8 separate ints) or you can use a little trick, and cast the ID to a pointer since it doesn't have to be a real pointer:
// No &. Just take the number and use that number as a pointer.
// vvvvvvvv
pthread_create(&thread[id], NULL, (void *) run, (void *)id);
// ^^^^^^^^
// don't know why you wrote (void *) here
in the run function:
int id = (int)arg;
// no *. This isn't a pointer to an int, it's just an int hiding in a pointer variable

C - How To Close All Threads In A ThreadPool When Queue Is Empty And Won't Recieve Any More Data

Right now, I have an unbounded synchronous queue that enqueues data and sends a dequeueReady signal. The main thread will spawn a thread pool that will start to dequeue from this queue. However, there will come a point when the queue is closed and no more data will be coming in, at that point, I want all the threads to wake up and terminate.
Here is my queue implementation:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#include "unbounded_queue.h"
struct Node { // These are private and not meant to be accessed by client code.
void *data;
struct Node *next;
};
struct Queue *initQueue() {
struct Queue *queue = (struct Queue *) malloc(sizeof(struct Queue));
if (queue == NULL) {
perror("Init Queue Memory Allocation Failed:");
exit(2);
}
queue->head = queue->tail = NULL;
queue->queueSize = 0;
queue->jobComplete = false;
pthread_mutex_init(&queue->lock, NULL);
pthread_cond_init(&queue->dequeueReady, NULL);
return queue;
}
void *enqueue(struct Queue *queue, void *item, size_t itemSize) {
pthread_mutex_lock(&queue->lock); // FIXME: CHECK RETURN VALUES OF ALL THIS! IF IT FAILS,
struct Node *tempNode = (struct Node *) malloc(sizeof(struct Node));
tempNode->data = malloc(itemSize); // Don't forget your null terminator
memcpy(tempNode->data, item, itemSize);
tempNode->next = NULL;
if (isEmpty(queue)) {
queue->head = queue->tail = tempNode; // If empty, head and tail point to the same thing.
pthread_mutex_unlock(&queue->lock);
return tempNode->data;
}
queue->tail->next = tempNode; // tail points to old tempNode. This says old tempNode now points to new tempNode
queue->tail = tempNode; // Current tail also needs to point to new tempNode
queue->queueSize++;
pthread_cond_signal(&queue->dequeueReady);
pthread_mutex_unlock(&queue->lock); // FIXME: Check return value
return item;
}
void *dequeue(struct Queue *queue) {
pthread_mutex_lock(&queue->lock); // FIXME: CHECK RETURN VALUES OF ALL THIS! IF IT FAILS,
while (isEmpty(queue)) {
if (queue->jobComplete) {
pthread_mutex_unlock(&queue->lock);
return NULL;
}
pthread_cond_wait(&queue->dequeueReady, &queue->lock);
}
struct Node *tempNode = queue->head; // Get node to dequeue
char *data = tempNode->data; // Need to store the data so we can use it later. Client is responsible for freeing this because it points to dynamic memory. This could be improved so the client doesn't have to do anything, but it's fine for now.
queue->head = queue->head->next;
if (queue->head == NULL) {
queue->tail = NULL;
}
queue->queueSize--;
free(tempNode);
pthread_mutex_unlock(&queue->lock); // FIXME: CHECK RETURN VALUES OF ALL THIS! IF IT FAILS,
return data;
}
void jobComplete(struct Queue *queue) {
pthread_mutex_lock(&queue->lock);
queue->jobComplete = true;
pthread_cond_broadcast(&queue->dequeueReady);
pthread_mutex_unlock(&queue->lock);
return;
}
bool isEmpty(struct Queue *queue) {
return (queue->head == NULL);
}
From the main thread, I am creating the thread pool and starting up the threads by dequeuing from the queue. However, eventually, some of the threads will be waiting for a dequeue signal that will never come. Hence, I created the jobComplete function. Once the main thread realizes that queue1 is empty, that means no more data will be coming in and we can call jobComplete and stop.
Here is how I am spawning the worker threads:
while (!isEmpty(queue1)) {
// The main thread here will call a function that enqueues data to queue1 and queue2.
// Data from queue1 is dequeued here and used.
while (!isEmpty(queue2)) { // While the queue is not empty, keep dequeuing and spawn a new worker thread.
for (int x = 0; x < threadNum; x++) {
pthread_create(&threads[x], NULL, wrapFile, dequeue(queue2));
}
for (int x = 0; x < threadNum; x++) {
if (isEmpty(queue1)) { // If queue1 is empty, that means nothing else will be added to queue2.
jobComplete(queue2);
}
pthread_join(threads[x], NULL); // Wait for thread to finish.
}
}
}
When I try to run this, it won't detect that the queue is empty and therefore jobCompleted won't run. I don't know how to join threads that are done and broadcast to all the other threads that are asleep to terminate. As of right now, the queue is infinitely waiting for more data to be enqueued, but I know more data won't be enqueued because queue1 is empty at this point.

Trouble with Linked Lists and pthreads

I'm having some trouble understanding exactly what I'm doing wrong here.
I have a program, written in C, which adds elements to a queue from multiple threads. From my debugging, I can see that the mutex locks are working as intended, but every time the worker thread adds an element to the linked list, it is lost by the time the next thread comes around.
For Example, if thread 1 comes in first, the queue will have element 1 in slot 0.
Next, thread 2 comes in and appends itself, which works correctly (printing out the contents of the array before the method returns back will have both threads 1 and 2 in the queue). However, the next time a thread calls pushToQueue, the queue will only have one element (the original element) inside, and it will append itself to what it believes is the only element in the array
Here is my simplified code, the function schedule() is called by a thread in another class:
//Global Variables
struct node* readyQueue = NULL;
pthread_mutex_t readyLock = PTHREAD_MUTEX_INITIALIZER;
pthread_cond_t computeReadyCond = PTHREAD_COND_INITIALIZER;
void pushToQueue(struct node* inthread) {
struct node* cur = inthread;
pthread_mutex_lock(&readyLock);
if (findInReadyQueue(cur->tid) != NULL) {
pthread_mutex_unlock(&readyLock);
return;
}
if (readyQueue == NULL) {
// No list exists, so we must create it.
readyQueue = cur;
readyQueue->next = NULL;
readyQueue->prev = NULL;
}
else {
// append the current thread to the end
struct node* tmp = readyQueue;
struct node* prev = NULL;
while (tmp != NULL) {
prev = tmp;
tmp = tmp->next;
}
prev->next = cur;
cur->next = NULL;
}
pthread_cond_signal(&computeReadyCond); //Signal other threads that the queue is ready
pthread_mutex_unlock(&readyLock);
}
//Calling method (called from a pthread in another class)
int schedule(float currentTime, int tid, int remainingTime, int tprio) {
// update global time
global_currentTime = currentTime;
// create a new structure for this thread to put in the queue
struct node curThread;
curThread.tid = tid;
curThread.remainingTime = remainingTime;
curThread.tprio = tprio;
curThread.calledAt = currentTime;
curThread.state = NEW;
if (curThread.remainingTime <= 0) {
removeFromQueue(tid);
return global_currentTime;
}
// push the thread to its queue based on scheduler rules
pushToQueue(&curThread);
waitForCompute(tid);
printf("wait thread has joined from thread %d\n", tid);
return global_currentTime;
}
void waitForCompute(int input) {
int tid = input;
pthread_mutex_lock(&readyLock);
while (readyQueue->tid != tid) {
pthread_cond_wait(&computeReadyCond, &readyLock);
}
pthread_mutex_unlock(&readyLock);
}
thanks in advance for any help
You pass the address of a local variable to pushToQueue from schedule, which you never should.
int schedule(float currentTime, int tid, int remainingTime, int tprio) {
// ...
// create a new structure for this thread to put in the queue
struct node curThread;
// ...
pushToQueue(&curThread);
When schedule returns, that memory area is deallocated. You set the global variable struct node* readyQueue to this memory address in pushToQueue method, and add items to readyQueue, but a redyQueue points to variable on the stack, which is discarded when the function returns.
It can happen that always the same memory address is used for struct node curThread; in schedule method, so readyQueue always points to a node struct, which is being recreated each time schedule is called.

reader priority using semaphores

I am hitting a wall with a project I am working on and I was wondering if anyone can provide me with some help. I'll try to simplify my code so you aren't staring at 100+ lines of jibberish. I am pretty sure the only the first code block is relevant, the others are my system calls in sys.c for reference.
I am supposed to create a program to simulate a reader priority implementation of process synchronization using my own semaphores. When I run it I reach a deadlock whenever a reader gets to the critical section. I don't know what I did wrong.
An example output is:
Writer 0 wrote- 6
writer 1 wrote- 4
Writer 2 wrote- 2
Reader 0 read- 2
and then it freezes.
From what I can tell the Critcal Section semaphore is never getting released.
I believe the problem is with my program, and not with my semaphore and wait and signal opertaions, but I included them below for reference.
Thanks in advance.
My attempt so far is as follows:
I left out my initialization and memory mapping, this is the basic structure
Notes:
RWwait and RWsignal are wrapper methods which successfully make systems calls to my semaphore operations.
csMutex is to control access to the critical section.
nrMutex is to control the queue of readers to the critical section
Both mutex values are originally 1.
*number is a pointer to number which both readers and writers have access to
int i;
//create writers
for(i=0; i < writers; i++){
if (fork()==0){
while(1){
RWwait(csMutex); //wait for the critical section and lock
*number = rand() % 10;
printf("Writer %d wrote- %d\n", i, *number);
RWsignal(csMutex);//unlock critical section
}
}
}
int nr = 0; //number of readers
//create readers
for(i=0; i < readers; i++){
if (fork()==0){
while(1){
RWwait(nrMutex);
nr++;
if (nr == 1)
RWwait(csMutex);
RWsignal(nrMutex);
printf("Reader %d read- %d\n", i, *number);
RWwait(nrMutex);
nr--;
if (nr == 0)
RWsignal(csMutex);
RWsignal(nrMutex);
}
}
}
THESE ARE MY SYSTEM CALLS AND STRUCTURES IN sys.c
AGAIN, JUST FOR REFERENCE
struct ProcQ {
struct task_struct *ts;
struct ProcQ *next;
};
and
struct RW_Sem {
int value;
char *type;
//Front and back nodes for the queue
struct ProcQ *front;
struct ProcQ *back;
};
Wait Operation:
asmlinkage long sys_RW_wait(struct RW_Sem *sem){
spin_lock(&sem_lock); //locks the program
sem->value -= 1; //decrement value
if (sem->value < 0){ //insert into queue
struct ProcQ *node; //create a new node for the queue
node = (struct ProcQ*)kmalloc(sizeof(struct ProcQ), GFP_KERNEL);
node->ts = current; //assign this process to task_struct
node->next = NULL; //assign the next node to null
if(sem->front == NULL){ //if the process queue is empty
sem->front = node;
sem->back = node;
}
else{ //if the queue is NOT empty
sem->back->next = node;
sem->back = node;
}
set_current_state(TASK_INTERRUPTIBLE); //sleep my child
spin_unlock(&sem_lock); //unlock
schedule();
}
else{ //queue bypass
spin_unlock(&sem_lock);
}
return 0;
}
Signal Operation:
asmlinkage long sys_RW_signal(struct RW_Sem *sem){
spin_lock(&sem_lock); //locks the program
sem->value += 1; //increment value
if(sem->value <= 0){ //wake up process, otherwise bypass
struct ProcQ *dqProc; //temporary node pointer for signaled process
struct task_struct *wake; //temp for task struct to wake
dqProc = sem->front;
if (dqProc != NULL) {
wake = dqProc->ts;
if(sem->front==sem->back){ //if only item in queue
sem->front = NULL;
sem->back = NULL;
}
else{
sem->front = dqProc->next;
}
wake_up_process(wake); //wake up!
kfree(dqProc); //free that space
}
}
spin_unlock(&sem_lock); //unlock
return 0; //success!
}
I would say in your reader:
nr++;
if (nr == 1)
RWwait(csMutex);
Is a race condition.
Similarly I would not do:
if (nr == 0)
RWsignal(csMutex);

How to implement thread safe queues

I have used multithreading library before in Python, but this is the first time I am trying threading in C. I want to create pool of workers. In turn, these workers supposed to push to or pop from queue.Following code is not quite there yet, but is what I have done so far:
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#define NUMTHREADS 20 /* number of threads to create */
typedef struct node node;
typedef struct queue queue;
struct node {
char *name;
node *next;
};
struct queue {
node *head;
node *tail;
};
/* pop: remove and return first name from a queue */
char *pop(queue *q)
{
if (q->head == NULL)
return NULL;
char *name = q->head->name;
node *tmp = q->head;
q->head = q->head->next;
free(tmp);
return name;
}
/* push: add name to the end of the queue */
int push(queue *q, char *name)
{
node *new = malloc(sizeof(node));
if (new == NULL)
return -1;
new->name = name;
new->next = NULL;
if (q->tail != NULL)
q->tail->next = new;
q->tail = new;
if (q->head == NULL) /* first value */
q->head = new;
return 0;
}
/* printname: get a name from the queue, and print it. */
void *printname(void *sharedQ)
{
queue *q = (queue *) sharedQ;
char *name = pop(q);
if (name == NULL)
pthread_exit(NULL);
printf("%s\n",name);
pthread_exit(NULL);
}
int main()
{
size_t i;
int rc;
pthread_t threads[NUMTHREADS];
char *names[] = {
"yasar",
"arabaci",
"osman",
"ahmet",
"mehmet",
"zeliha"
};
queue *q = malloc(sizeof(queue));
q->head = NULL;
q->tail = NULL;
/* number of elements in the array */
size_t numelems = sizeof(names) / sizeof(char *);
for (i = 0; i < numelems; i++) /* push each name */
push(q, names[i]);
for (i = 0; i < NUMTHREADS; i++) { /* fire up threads */
rc = pthread_create(&threads[i], NULL, printname,
(void *)q);
if (rc) {
printf("Error, return code from pthread is %d\n", rc);
exit(-1);
}
}
pthread_exit(NULL);
}
I tried above code, and it always printed each name exactly once. It didn't skip any names, or printed same name twice. On the other hand, I am not sure how thread safe this queue implementation is. So my question is, Is this a threadsafe queue? If not, why not? And how to make it thread safe?
The code is not thread safe.
The push and pop functions are not thread safe. In the code, the push is only being executed by a single thread, so it doesn't matter, but the pops are being executed by multiple threads.
1. char *name = q->head->name;
2. node *tmp = q->head;
3. q->head = q->head->next;
4. free(tmp);
Imagine thread A executes up to and including line 2. Thread B then executes up to and including line 4. Thread A resumes execution. It finds that q->head has already been free()ed.
Now, this so far discusses logical issues.
However, there are physical issues to consider.
Imagine we had a locking mechanism whereby threads could synchronize their behaviour, such that only one thread at a time could execute the code in the lines 1 to 4, e.g. a mutex, which is an object only one thread can 'hold' at a time, and where attempting to get the mutex blocks the thread until the holding thread releases.
0. get mutex
1. char *name = q->head->name;
2. node *tmp = q->head;
3. q->head = q->head->next;
4. free(tmp);
5. release mutex
We would still have a problem, in that the writes performed by any given CPU core (not thread) are visible immediately only to threads on that core; not to threads on other cores.
It is not enough merely to sychronize execution; at the same time, we must also ensure the writes performed by a core become visible to other cores.
(Un)fortunately, all modern sychronization methods also perform this write flushing (e.g. when you get a mutex, you also flush all writes to memory). I say unfortunately, because you don't -always- need this behaviour and it is harmful to performance.
It is not thread-safe since multiple threads may modify the pointers in the linked list at the same time, potentially corrupting it.
Here you have an answer for a very similar question:
Multiple-writer thread-safe queue in C
There you can see how to make the queue thread-safe.

Resources