Round robin scheduler C implementation for Multipath Tcp / linux kernel - c

i am trying to understand the round robin code for Mulltipath tcp scheduling . it is available here
https://github.com/multipath-tcp/mptcp/blob/mptcp_v0.95/net/mptcp/mptcp_rr.c
i had too much difficulties since there is no documentation
here is the code
`/* MPTCP Scheduler module selector. Highly inspired by tcp_cong.c */
#include <linux/module.h>
#include <net/mptcp.h>
static unsigned char num_segments __read_mostly = 1;
module_param(num_segments, byte, 0644);
MODULE_PARM_DESC(num_segments, "The number of consecutive segments that are part of a burst");
static bool cwnd_limited __read_mostly = 1;
module_param(cwnd_limited, bool, 0644);
MODULE_PARM_DESC(cwnd_limited, "if set to 1, the scheduler tries to fill the congestion-window on all subflows");
struct rrsched_priv {
unsigned char quota;
};
static struct rrsched_priv *rrsched_get_priv(const struct tcp_sock *tp)
{
return (struct rrsched_priv *)&tp->mptcp->mptcp_sched[0];
}
/* If the sub-socket sk available to send the skb? */
static bool mptcp_rr_is_available(const struct sock *sk, const struct sk_buff *skb,
bool zero_wnd_test, bool cwnd_test)
{
const struct tcp_sock *tp = tcp_sk(sk);
unsigned int space, in_flight;
/* Set of states for which we are allowed to send data */
if (!mptcp_sk_can_send(sk))
return false;
/* We do not send data on this subflow unless it is
* fully established, i.e. the 4th ack has been received.
*/
if (tp->mptcp->pre_established)
return false;
if (tp->pf)
return false;
if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) {
/* If SACK is disabled, and we got a loss, TCP does not exit
* the loss-state until something above high_seq has been acked.
* (see tcp_try_undo_recovery)
*
* high_seq is the snd_nxt at the moment of the RTO. As soon
* as we have an RTO, we won't push data on the subflow.
* Thus, snd_una can never go beyond high_seq.
*/
if (!tcp_is_reno(tp))
return false;
else if (tp->snd_una != tp->high_seq)
return false;
}
if (!tp->mptcp->fully_established) {
/* Make sure that we send in-order data */
if (skb && tp->mptcp->second_packet &&
tp->mptcp->last_end_data_seq != TCP_SKB_CB(skb)->seq)
return false;
}
if (!cwnd_test)
goto zero_wnd_test;
in_flight = tcp_packets_in_flight(tp);
/* Not even a single spot in the cwnd */
if (in_flight >= tp->snd_cwnd)
return false;
/* Now, check if what is queued in the subflow's send-queue
* already fills the cwnd.
*/
space = (tp->snd_cwnd - in_flight) * tp->mss_cache;
if (tp->write_seq - tp->snd_nxt > space)
return false;
zero_wnd_test:
if (zero_wnd_test && !before(tp->write_seq, tcp_wnd_end(tp)))
return false;
return true;
}
/* Are we not allowed to reinject this skb on tp? */
static int mptcp_rr_dont_reinject_skb(const struct tcp_sock *tp, const struct sk_buff *skb)
{
/* If the skb has already been enqueued in this sk, try to find
* another one.
*/
return skb &&
/* Has the skb already been enqueued into this subsocket? */
mptcp_pi_to_flag(tp->mptcp->path_index) & TCP_SKB_CB(skb)->path_mask;
}
/* We just look for any subflow that is available */
static struct sock *rr_get_available_subflow(struct sock *meta_sk,
struct sk_buff *skb,
bool zero_wnd_test)
{
const struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb;
struct sock *sk = NULL, *bestsk = NULL, *backupsk = NULL;
struct mptcp_tcp_sock *mptcp;
/* Answer data_fin on same subflow!!! */
if (meta_sk->sk_shutdown & RCV_SHUTDOWN &&
skb && mptcp_is_data_fin(skb)) {
mptcp_for_each_sub(mpcb, mptcp) {
sk = mptcp_to_sock(mptcp);
if (tcp_sk(sk)->mptcp->path_index == mpcb->dfin_path_index &&
mptcp_rr_is_available(sk, skb, zero_wnd_test, true))
return sk;
}
}
/* First, find the best subflow */
mptcp_for_each_sub(mpcb, mptcp) {
struct tcp_sock *tp;
sk = mptcp_to_sock(mptcp);
tp = tcp_sk(sk);
if (!mptcp_rr_is_available(sk, skb, zero_wnd_test, true))
continue;
if (mptcp_rr_dont_reinject_skb(tp, skb)) {
backupsk = sk;
continue;
}
bestsk = sk;
}
if (bestsk) {
sk = bestsk;
} else if (backupsk) {
/* It has been sent on all subflows once - let's give it a
* chance again by restarting its pathmask.
*/
if (skb)
TCP_SKB_CB(skb)->path_mask = 0;
sk = backupsk;
}
return sk;
}
/* Returns the next segment to be sent from the mptcp meta-queue.
* (chooses the reinject queue if any segment is waiting in it, otherwise,
* chooses the normal write queue).
* Sets *#reinject to 1 if the returned segment comes from the
* reinject queue. Sets it to 0 if it is the regular send-head of the meta-sk,
* and sets it to -1 if it is a meta-level retransmission to optimize the
* receive-buffer.
*/
static struct sk_buff *__mptcp_rr_next_segment(const struct sock *meta_sk, int *reinject)
{
const struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb;
struct sk_buff *skb = NULL;
*reinject = 0;
/* If we are in fallback-mode, just take from the meta-send-queue */
if (mpcb->infinite_mapping_snd || mpcb->send_infinite_mapping)
return tcp_send_head(meta_sk);
skb = skb_peek(&mpcb->reinject_queue);
if (skb)
*reinject = 1;
else
skb = tcp_send_head(meta_sk);
return skb;
}
static struct sk_buff *mptcp_rr_next_segment(struct sock *meta_sk,
int *reinject,
struct sock **subsk,
unsigned int *limit)
{
const struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb;
struct sock *choose_sk = NULL;
struct mptcp_tcp_sock *mptcp;
struct sk_buff *skb = __mptcp_rr_next_segment(meta_sk, reinject);
unsigned char split = num_segments;
unsigned char iter = 0, full_subs = 0;
/* As we set it, we have to reset it as well. */
*limit = 0;
if (!skb)
return NULL;
if (*reinject) {
*subsk = rr_get_available_subflow(meta_sk, skb, false);
if (!*subsk)
return NULL;
return skb;
}
retry:
/* First, we look for a subflow who is currently being used */
mptcp_for_each_sub(mpcb, mptcp) {
struct sock *sk_it = mptcp_to_sock(mptcp);
struct tcp_sock *tp_it = tcp_sk(sk_it);
struct rrsched_priv *rr_p = rrsched_get_priv(tp_it);
if (!mptcp_rr_is_available(sk_it, skb, false, cwnd_limited))
continue;
iter++;
/* Is this subflow currently being used? */
if (rr_p->quota > 0 && rr_p->quota < num_segments) {
split = num_segments - rr_p->quota;
choose_sk = sk_it;
goto found;
}
/* Or, it's totally unused */
if (!rr_p->quota) {
split = num_segments;
choose_sk = sk_it;
}
/* Or, it must then be fully used */
if (rr_p->quota >= num_segments)
full_subs++;
}
/* All considered subflows have a full quota, and we considered at
* least one.
*/
if (iter && iter == full_subs) {
/* So, we restart this round by setting quota to 0 and retry
* to find a subflow.
*/
mptcp_for_each_sub(mpcb, mptcp) {
struct sock *sk_it = mptcp_to_sock(mptcp);
struct tcp_sock *tp_it = tcp_sk(sk_it);
struct rrsched_priv *rr_p = rrsched_get_priv(tp_it);
if (!mptcp_rr_is_available(sk_it, skb, false, cwnd_limited))
continue;
rr_p->quota = 0;
}
goto retry;
}
found:
if (choose_sk) {
unsigned int mss_now;
struct tcp_sock *choose_tp = tcp_sk(choose_sk);
struct rrsched_priv *rr_p = rrsched_get_priv(choose_tp);
if (!mptcp_rr_is_available(choose_sk, skb, false, true))
return NULL;
*subsk = choose_sk;
mss_now = tcp_current_mss(*subsk);
*limit = split * mss_now;
if (skb->len > mss_now)
rr_p->quota += DIV_ROUND_UP(skb->len, mss_now);
else
rr_p->quota++;
return skb;
}
return NULL;
}
static struct mptcp_sched_ops mptcp_sched_rr = {
.get_subflow = rr_get_available_subflow,
.next_segment = mptcp_rr_next_segment,
.name = "roundrobin",
.owner = THIS_MODULE,
};
static int __init rr_register(void)
{
BUILD_BUG_ON(sizeof(struct rrsched_priv) > MPTCP_SCHED_SIZE);
if (mptcp_register_scheduler(&mptcp_sched_rr))
return -1;
return 0;
}
static void rr_unregister(void)
{
mptcp_unregister_scheduler(&mptcp_sched_rr);
}
module_init(rr_register);
module_exit(rr_unregister);
MODULE_AUTHOR("Christoph Paasch");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ROUNDROBIN MPTCP");
MODULE_VERSION("0.89");`
please help with this part of the code i didn't understand what it does can any one help me with understanding it ? :
struct rrsched_priv {
unsigned char quota;
};
static struct rrsched_priv *rrsched_get_priv(const struct tcp_sock *tp)
{
return (struct rrsched_priv *)&tp->mptcp->mptcp_sched[0];
}

The Multipath TCP Scheduler is responsible for chosing in which subflow a packet will be forward and transmited. In the round-robin case, this choice is "random" for each subflow available and the following code is basically a constructor that regards the linux kernel tcp stack given by tcp_sock and the mptcp stack given by net/mptcp.h.
In the following code, there's a constructor regarding quota. It means the amount of packets that will be consume by each subflow.
struct rrsched_priv {
unsigned char quota;
};
At the next step, rrsched_priv is taking linux-kernel tcp_sock attributes.
Then, the reference *tp is taking mptcp->mptcp_sched[0] from the mptcp stack;
Finally it returns as an struct for rrsched_priv
static struct rrsched_priv *rrsched_get_priv(const struct tcp_sock *tp)
{
return (struct rrsched_priv *)&tp->mptcp->mptcp_sched[0];
}
References
[Round-Robin Scheduler] : http://progmp.net/progmp.html#/dissertation_round_robin
[tcp_sock] : https://docs.huihoo.com/doxygen/linux/kernel/3.7/structtcp__sock.html
[Quota] : https://tuprints.ulb.tu-darmstadt.de/7709/13/Dissertation_Froemmgen.pdf
[MPTCP Scheduler] : http://progmp.net/

Related

C : dereferencing pointer to a struct has wrong values [mqtt-c, pointers]

I suspect this is a basic issue, I just have a moment of dumbness.
I'm trying to make very simple mqtt client with MQTT-C library as a part of testing tool of my other solution.When I receive subscribed message I have weired issue : inside mqtt-c library (just before a call to a function pointer which happens to be mine "subsriber callback") everything seems ok, but right after a call to my function, dereferencing a pointer shows struct with completly wrong values inside. If I go one step up in callstack, so back to mqtt, the values inspector gives (set to dereference uint32_t value - precisely pointer's address), again, correct structure.
The structure, pointer to which mqtt passes to callback, is allocated on stack inside function in mqtt library. I don't think that thread could change, which would invalidate stack. I have also included mqtt.h, which means that my function should understand the structure.
I will paste some relevant code snippets, any help will be appreciated, including general advices.
ssize_t __mqtt_recv(struct mqtt_client *client)
{
struct mqtt_response response;
ssize_t mqtt_recv_ret = MQTT_OK;
MQTT_PAL_MUTEX_LOCK(&client->mutex);
/* read until there is nothing left to read, or there was an error */
while(mqtt_recv_ret == MQTT_OK) {
/* read in as many bytes as possible */
ssize_t rv, consumed;
struct mqtt_queued_message *msg = NULL;
rv = mqtt_pal_recvall(client->socketfd, client->recv_buffer.curr, client->recv_buffer.curr_sz, 0);
if (rv < 0) {
/* an error occurred */
client->error = (enum MQTTErrors)rv;
MQTT_PAL_MUTEX_UNLOCK(&client->mutex);
return rv;
} else {
client->recv_buffer.curr += rv;
client->recv_buffer.curr_sz -= (unsigned long)rv;
}
/* attempt to parse */
consumed = mqtt_unpack_response(&response, client->recv_buffer.mem_start, (size_t) (client->recv_buffer.curr - client->recv_buffer.mem_start));
if (consumed < 0) {
client->error = (enum MQTTErrors)consumed;
MQTT_PAL_MUTEX_UNLOCK(&client->mutex);
return consumed;
} else if (consumed == 0) {
/* if curr_sz is 0 then the buffer is too small to ever fit the message */
if (client->recv_buffer.curr_sz == 0) {
printf("receive buff sz is zero??\n");
client->error = MQTT_ERROR_RECV_BUFFER_TOO_SMALL;
MQTT_PAL_MUTEX_UNLOCK(&client->mutex);
return MQTT_ERROR_RECV_BUFFER_TOO_SMALL;
}
/* just need to wait for the rest of the data */
MQTT_PAL_MUTEX_UNLOCK(&client->mutex);
return MQTT_OK;
}
switch (response.fixed_header.control_type) {
//(...)
case MQTT_CONTROL_PUBLISH:
/* stage response, none if qos==0, PUBACK if qos==1, PUBREC if qos==2 */
if (response.decoded.publish.qos_level == 1) {
rv = __mqtt_puback(client, response.decoded.publish.packet_id);
if (rv != MQTT_OK) {
client->error = (enum MQTTErrors)rv;
mqtt_recv_ret = rv;
break;
}
} else if (response.decoded.publish.qos_level == 2) {
/* check if this is a duplicate */
if (mqtt_mq_find(&client->mq, MQTT_CONTROL_PUBREC, &response.decoded.publish.packet_id) != NULL) {
break;
}
rv = __mqtt_pubrec(client, response.decoded.publish.packet_id);
if (rv != MQTT_OK) {
client->error = (enum MQTTErrors)rv;
mqtt_recv_ret = rv;
break;
}
}
/* call publish callback */
printf("address: %d; size: %d\n", (uint32_t) &response.decoded.publish, response.decoded.publish.application_message_size);
//all ok here.
client->publish_response_callback(&client->publish_response_callback_state, &response.decoded.publish);
break;
//(...)
{
/* we've handled the response, now clean the buffer */
void* dest = (unsigned char*)client->recv_buffer.mem_start;
void* src = (unsigned char*)client->recv_buffer.mem_start + consumed;
size_t n = (size_t) (client->recv_buffer.curr - client->recv_buffer.mem_start - consumed);
memmove(dest, src, n);
client->recv_buffer.curr -= consumed;
client->recv_buffer.curr_sz += (unsigned long)consumed;
}
}
/* In case there was some error handling the (well formed) message, we end up here */
MQTT_PAL_MUTEX_UNLOCK(&client->mutex);
return mqtt_recv_ret;
}
struct mqtt_response {
/** #brief The mqtt_fixed_header of the deserialized packet. */
struct mqtt_fixed_header fixed_header;
/**
* #brief A union of the possible responses from the broker.
*
* #note The fixed_header contains the control type. This control type corresponds to the
* member of this union that should be accessed. For example if
* fixed_header#control_type == \c MQTT_CONTROL_PUBLISH then
* decoded#publish should be accessed.
*/
union {
struct mqtt_response_connack connack;
struct mqtt_response_publish publish;
struct mqtt_response_puback puback;
struct mqtt_response_pubrec pubrec;
struct mqtt_response_pubrel pubrel;
struct mqtt_response_pubcomp pubcomp;
struct mqtt_response_suback suback;
struct mqtt_response_unsuback unsuback;
struct mqtt_response_pingresp pingresp;
} decoded;
};
static void netReqHandler(void) {
int sockfd = myOpenSocket(ipaddr, port);//open_nb_socket(ipaddr, port);
mqtt_init(&client, sockfd, sendbuf, buffersizes, receivebuf, buffersizes, publish_callback);
printf("client error is %d\n", (&client)->error);
void publish_callback(void** unused, struct mqtt_response_publish *published) {
printf("app msg size : %d, addr: %d\n", published->application_message_size, (uint32_t) published);
char *nullTerminatedMessage = malloc(published->application_message_size + 1);
strncpy(nullTerminatedMessage, published->application_message, published->application_message_size);
nullTerminatedMessage[published->application_message_size] = '\0';

Is it possibly to use POSIX Message queue to transfer data between threads?

Is it possible to transfer data between threads like producer consumer using POSIX Message queue?
i need to transfer and an array of double with 5000 elements each from producer thread to consumer thread for processing
is POSIX Message queue designed for such a purpose?
POSIX message queues are absolutely the wrong tool for that.
All you actually need, is a buffer, a couple of counters or pointers, a mutex, and a couple of condition variables:
static pthread_mutex_t buffer_lock = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t buffer_more = PTHREAD_COND_INITIALIZER;
static pthread_cond_t buffer_room = PTHREAD_COND_INITIALIZER;
/* Pointer and counters are volatile, since different threads may change them
whenever they hold the buffer_lock. */
static double * volatile buffer_data = NULL;
static volatile size_t buffer_size = 0;
static volatile size_t buffer_next = 0; /* First/next buffered value */
static volatile size_t buffer_ends = 0; /* First unused byte in buffer */
/* Optional flag to indicate no more data to be produced or consumed */
static volatile int buffer_done = 0;
/* Helper function to repack the buffer; caller must hold buffer_lock. */
static inline void buffer_repack_locked(void)
{
if (buffer_ends > buffer_next) {
if (buffer_next > 0) {
memmove(buffer_data, buffer_data + buffer_next,
(buffer_ends - buffer_next) * sizeof buffer_data[0]);
buffer_ends -= buffer_next;
buffer_next = 0;
}
} else {
buffer_next = 0;
buffer_ends = 0;
}
}
To grow the buffer (at any point), you use
static int buffer_resize(size_t new_size)
{
pthread_mutex_lock(&buffer_lock);
/* First, repack the buffer to start of the area. */
buffer_repack_locked();
/* Do not lose any data, however. */
if (new_size < buffer_ends)
new_size = buffer_ends;
/* Reallocate. */
void *new_data = realloc(buffer_data, new_size * sizeof buffer_data[0]);
if (!new_data) {
/* Not enough memory to reallocate; old data still exists. */
pthread_mutex_unlock(&buffer_lock);
return -1;
}
/* Success. */
buffer_data = new_data;
buffer_size = new_size;
/* Wake up any waiters waiting on room in the buffer, just to be sure. */
pthread_cond_broadcast(&buffer_room);
pthread_mutex_unlock(&buffer_lock);
return 0;
}
Producer or producers add a block of data to the buffer using
static void buffer_add(const double *data, size_t count)
{
pthread_mutex_lock(&buffer_lock);
buffer_repack_locked();
while (count > 0) {
if (buffer_ends >= buffer_size) {
/* Buffer is full. Wait for more room, repack, retry. */
pthread_cond_wait(&buffer_room, &buffer_lock);
buffer_repack_locked();
continue;
}
/* How much can we add? */
size_t size = buffer_size - buffer_ends;
if (size > count)
size = count;
memmove(buffer_data + buffer_ends, data, size * sizeof buffer_data[0]);
buffer_ends += size;
/* Wake up a consumer waiting on more data */
pthread_cond_signal(&buffer_more);
/* Update to reflect the data already added */
data += size;
count -= size;
}
/* All data added. */
pthread_mutex_unlock(&buffer_lock);
}
Similarly, consumers get data from the buffer using
static size_t buffer_get(double *data, size_t min_size, size_t max_size)
{
size_t size, have = 0;
/* Make sure min and max size are in the right order. */
if (max_size < min_size) {
size = max_size;
max_size = min_size;
min_size = size;
}
pthread_mutex_lock(&buffer_lock);
while (1) {
/* No more data incoming? */
if (buffer_done) {
pthread_mutex_unlock(&buffer_lock);
return have;
}
/* Buffer empty? */
if (buffer_next >= buffer_ends) {
pthread_cond_wait(&buffer_more, &buffer_lock);
continue;
}
/* How much can we grab? */
size = buffer_ends - buffer_next;
if (have + size > max_size)
size = max_size - have;
memmove(data, buffer_data + buffer_next,
size * sizeof buffer_data[0]);
buffer_next += size;
/* Wake up a waiter for empty room in the buffer. */
pthread_cond_signal(&buffer_room);
/* Enough data to return? */
if (have >= min_size) {
pthread_mutex_lock(&buffer_lock);
return have;
}
}
}
While this does copy the data around quite a bit, it allows both producers and consumers to work on their own data in any size "chunks" they wish.
If your producers and consumers work on matrices, or other "packetized" data of some maximum size, it makes sense to use singly-linked lists of preallocated packets of data, and not a linear buffer:
struct data_packet {
struct data_packet *next;
size_t size; /* Maximum size of data */
size_t used; /* Or rows, cols if a matrix */
double data[];
};
struct data_queue {
pthread_mutex_t lock;
pthread_cond_t more;
pthread_cond_t room;
struct data_packet *queue;
struct data_packet *unused;
unsigned long produced; /* Optional, just information */
unsigned long consumed; /* Optional, just information */
volatile int done; /* Set when no more to be produced */
};
static void free_data_packets(struct data_packet *root)
{
while (root) {
struct data_packet *curr = root;
root = root->next;
curr->next = NULL;
curr->size = 0;
free(curr);
}
}
To initialize a data queue, we also need to generate some empty packets in it. This must be done before any threads start working with the queue:
/* Returns the count of data packets actually created,
or 0 if an error occurs (with errno set).
*/
size_t data_queue_init(struct data_queue *q,
const size_t size,
const size_t count)
{
if (!q) {
errno = EINVAL;
return 0;
}
pthread_mutex_init(&(q->lock), NULL);
pthread_cond_init(&(q->more), NULL);
pthread_cond_init(&(q->room), NULL);
q->queue = NULL;
q->unused = NULL;
q->produced = 0;
q->consumed = 0;
q->done = 0;
/* Makes no sense to request no data packets. */
if (count < 1) {
errno = EINVAL;
return 0;
}
/* Create a chain of empty packets of desired size. */
struct data_packet *curr, *unused = NULL;
size_t have = 0;
while (have < count) {
curr = malloc( sizeof (struct data_packet)
+ size * sizeof curr->data[0]);
if (!curr)
break;
curr->next = unused;
curr->size = size;
curr->used = 0;
unused = curr;
have++;
}
if (!have) {
errno = ENOMEM;
return 0;
}
/* Attach chain to data queue; done. */
q->unused = unused;
return have;
}
Producers grab a free packet from the data queue:
struct data_packet *data_queue_get_unused(struct data_queue *q)
{
/* Safety check. */
if (!q) {
errno = EINVAL;
return NULL;
}
pthread_mutex_lock(&(q->lock));
while (!q->done) {
struct data_packet *curr = q->unused;
/* No unused data packets free? */
if (!curr) {
pthread_cond_wait(&(q->room), &(q->lock));
continue;
}
/* Detach and clear. */
q->unused = curr->next;
curr->next = NULL;
curr->used = 0;
/* Successful. */
pthread_mutex_unlock(&(q->lock));
return curr;
}
/* Done is set. */
pthread_mutex_unlock(&(q->lock));
errno = 0;
return NULL;
}
The above may return NULL, when an error occurs (errno will be set to a nonzero error), or when the done flag is set (errno will be zero).
The producer must remember to set the used field to reflect the amount of data it produced in the packet. (It must not exceed size, though.)
The producer can work on the data packet as they wish; it is their "own", and no locking is needed.
When the producer has completed the packet, they append it to the data queue:
int data_queue_append(struct data_queue *q, struct data_packet *p)
{
/* Safety check. */
if (!q || !p) {
errno = EINVAL;
return -1;
}
p->next = NULL;
pthread_mutex_lock(&(q->lock));
/* Append to queue. */
struct data_packet *prev = q->queue;
if (!prev) {
q->queue = p;
} else {
while (prev->next)
prev = prev->next;
prev->next = p;
}
q->produced++;
/* Wake up a waiter for a new packet. */
pthread_cond_signal(&(q->more));
/* Done. */
pthread_mutex_unlock(&(q->lock));
return 0;
}
Similarly, a consumer grabs the next packet from the queue,
struct data_packet *data_queue_get(struct data_queue *q)
{
/* Safety check. */
if (!q) {
errno = EINVAL;
return NULL;
}
pthread_mutex_lock(&(q->lock));
while (1) {
struct data_packet *curr = q->queue;
/* No data produced yet? */
if (!curr) {
/* If the done flag is set, we're done. */
if (q->done) {
pthread_mutex_unlock(&(q->lock));
errno = 0;
return NULL;
}
/* Wait for a signal on 'more'. */
pthread_cond_wait(&(q->more), &(q->lock));
continue;
}
/* Detach and done. */
q->queue = curr->next;
curr->next = NULL;
q->consumed++;
pthread_mutex_unlock(&(q->lock));
return curr;
}
}
and freely works on it. Note that the above does not examine the done flag unless the queue is empty.
When it is completed the work on the packet, it returns it to the unused queue:
int data_queue_append_unused(struct data_queue *q, struct data_packet *p)
{
/* Safety check */
if (!q || !p) {
errno = EINVAL;
return -1;
}
/* Clear it. */
p->used = 0;
pthread_mutex_lock(&(q->lock));
/* Prepend to unused queue. */
p->next = q->unused;
q->unused = p;
/* Signal a waiter that a new packet is available. */
pthread_cond_signal(&(q->room));
/* Done. */
pthread_mutex_unlock(&(q->lock));
return 0;
}
This approach allows one or more consumers and one or more producers work on their own packets on their own pace, without using any locks et cetera, and without copying the data itself around. However, the packet size and number of packets concurrently being worked on are limited.
The queue must be initialized with unused packet count at least the total number of producers and consumers; I prefer about twice that, to maximize throughput when the time taken by each varies a bit. The above, however, does allow removal of empty packets from the unused queue, and/or appending new empty packets to the unused queue, at any point in time. (When appending, remember to signal on the data queue room condition variable.)
Finally, note that the produced and consumed counts refer to the queue itself. If you want consumed to reflect the number of packets already consumed, you can move the q->consumed++ from data_queue_get() to data_queue_append_unused() instead.
It will work, but be aware that the absolute maximum message size is 16 MB (HARD_MSGSIZEMAX) since Linux 3.5, and was 1 MB before that. The default message size limit is only 8 KB though, so you need to set it when you call mq_open() or your 5000 doubles won't fit in one message.
A message queue is meant to transfer data between processes. Since threads are a part of the same process, there is no need to send data first to the kernel and then receive it back. In case of threads, all the global data is visible to all threads. Signalling mechanism like mutex and condition variables are required to synchronize the availability of data between threads.

Getting an segmentation fault (core dumped) error in Ubuntu

I am trying to build a program which will function as an assembler, it will be getting file name as command line arguments and translate them to machine code.
The program compiles just fine and runs OK with 1 file name, but when I try to run with several, the error appears after the first iteration.
I think there might be something withe the Clear() function (which flushes out all the data allocated in the previous iteration), but not sure why. Note that this is partial, but as I said, the program will run unless several files are used.
struct symbolStruct { // a structure which is used to absorb info about a tag, its place in memory and related flags
char *name;
int place;
unsigned int isEntry : 1;
unsigned int isData : 1;
unsigned int isExternal : 1;
struct symbolStruct *next;
};
typedef struct { // a structure which is used to absorb info about the operand structure of an instruction line
unsigned int numOfOperands : 2;
unsigned int addrMethSou : 2;
unsigned int addrMethDest : 2;
unsigned int operation : 4;
unsigned int extraWords : 2;
char *firstOperand;
char *secondOperand;
} OperandType;
typedef struct {
unsigned int row : WORD_SIZE;
} int15;
struct MachineCode { // a structure which is used to absorb machine code lines, and their location in the assembly file
unsigned int row : WORD_SIZE;
unsigned int line;
OperandType *structure;
struct MachineCode *next;
};
struct DataCode { // a structure which is used to absorb data and string elements (signed numbers and ascii characters)
unsigned int row : WORD_SIZE;
struct DataCode *next;
};
struct Operation { /* the main operation structure, contains pointers to all used lists, the ic and dc counters, the
current line number which is dealt with and the error flag. */
unsigned int ic;
unsigned int dc;
struct symbolStruct *externHead; // a pointer to a linked list of extern tags used in the assembly file, and their locations
struct symbolStruct *symbolHead; // a pointer to a linked list of all tags
struct DataCode *dataHead; // a pointer to a linked list of all data/string elements
struct MachineCode *machineHead; // a pointer to a linked list of all machine code rows
int linenumber;
unsigned int errorflag : 1; // raised in case of an error which triggered a warning
};
#include "header.h"
void FirstRun(struct Operation*, char *);
void DataUpdate(struct symbolStruct*,int);
void SecondRun(struct Operation *, char *);
void Clear(struct Operation *);
int main(int argc, char *argv[]) {
int i;
struct Operation programCore = {0,0,NULL,NULL,NULL,NULL,0,0};
for(i=1;i<argc;i++) {
char *fn = argv[i];
FirstRun(&programCore,fn);
DataUpdate(programCore.symbolHead,programCore.ic+INSTRUCTION_OFFSET);
SecondRun(&programCore,fn);
Clear(&programCore);
programCore.symbolHead = programCore.externHead = programCore.dataHead = programCore.machineHead = NULL;
}
if(argc < 2) {
fprintf(stderr,"No files selected.\n");
}
return 0;
}
/*Used to empty the linked lists and allocated memory after the program has finished one iteration. */
void Clear(struct Operation *programCore) {
/*f(pointer name) is there to hold a pointer to the allocated memory which is about to be flushed. */
struct MachineCode *machineHead = programCore->machineHead, *fMachineHead;
struct DataCode *dataHead = programCore->dataHead, *fDataHead;
struct symbolStruct *externHead = programCore->externHead, *fExternHead;
struct symbolStruct *symbolHead = programCore->symbolHead, *fSymbolHead;
while(machineHead != NULL) {
fMachineHead = machineHead;
machineHead = machineHead->next;
if(fMachineHead->structure != NULL) {
if(fMachineHead->structure->numOfOperands == 2)
free(fMachineHead->structure->secondOperand);
if(fMachineHead->structure->numOfOperands > 0)
free(fMachineHead->structure->firstOperand);
free(fMachineHead->structure);
}
free(fMachineHead);
}
while(dataHead != NULL) {
fDataHead = dataHead;
dataHead = dataHead->next;
free(fDataHead);
}
while(externHead != NULL) {
fExternHead = externHead;
externHead = externHead->next;
free(fExternHead->name);
free(fExternHead);
}
while(symbolHead != NULL) {
fSymbolHead = symbolHead;
symbolHead = symbolHead->next;
free(fSymbolHead->name);
free(fSymbolHead);
}
programCore->ic = programCore->dc = programCore->linenumber = programCore->errorflag = 0;
}
You do not free and nullifying the linked lists in the context struct (programCore). I suspect you are then using pointers to freed memory blocks.
This line only copies the pointer:
struct MachineCode *machineHead = programCore->machineHead;
The while() loop is not clearing programCore->machineHead
To fix it, run directly on the head:
while(programCore->machineHead != NULL)
{
...
}
Well, by getting rid of
if(fMachineHead->structure->numOfOperands == 2)
free(fMachineHead->structure->secondOperand);
if(fMachineHead->structure->numOfOperands > 0)
free(fMachineHead->structure->firstOperand);
I have managed to solve the error, but now I am getting a new one -
main.c:242:13: error: request for member ‘symbolHead’ in something not a structure or union
main.c:242:38: error: request for member ‘externHead’ in something not a structure or union
main.c:243:13: error: request for member ‘dataHead’ in something not a structure or union
main.c:244:13: error: request for member ‘machineHead’ in something not a structure or union
Referring to the next line -
programCore.symbolHead = programCore.externHead = programCore.dataHead = programCore.machineHead = NULL;
Is there a problem with the way I wrote that? (Obviously yes, but I just don't see it).
Changed the clear() function again and it seems to be working fine now.
/*Used to empty the linked lists and allocated memory after the program has finished one iteration. */
void Clear(struct Operation *programCore) {
/*f(pointer name) is there to hold a pointer to the allocated memory which is about to be flushed. */
struct MachineCode *machineRowPointer = programCore->machineHead, *fMachineRow;
struct DataCode *dataRowPointer = programCore->dataHead, *fDataRow;
struct symbolStruct *externSymbolPointer = programCore->externHead, *fExtern;
struct symbolStruct *symbolPointer = programCore->symbolHead, *fSymbol;
if(machineRowPointer != NULL) {
while(machineRowPointer != NULL) {
if(machineRowPointer->structure != NULL)
free(machineRowPointer->structure);
fMachineRow = machineRowPointer;
machineRowPointer = machineRowPointer->next;
free(fMachineRow);
}
programCore->machineHead = NULL;
}
if(dataRowPointer != NULL) {
while(dataRowPointer != NULL) {
fDataRow = dataRowPointer;
dataRowPointer = dataRowPointer->next;
free(fDataRow);
}
programCore->dataHead = NULL;
}
if(externSymbolPointer != NULL) {
while(externSymbolPointer != NULL) {
fExtern = externSymbolPointer;
externSymbolPointer = externSymbolPointer->next;
free(fExtern->name);
free(fExtern);
}
programCore->externHead = NULL;
}
if(symbolPointer != NULL) {
while(symbolPointer != NULL) {
fSymbol = symbolPointer;
symbolPointer = symbolPointer->next;
free(fSymbol->name);
free(fSymbol);
}
programCore->symbolHead = NULL;
}
programCore->ic = programCore->dc = programCore->linenumber = programCore->errorflag = 0;
}

How to make a FIFO buffer with can_frame structure inside?

In the moment I am working on a project on which a few processors are communication between then using can bus. The main controler (beagle bone) controls the other device using a can bus. Using a socket can linux framework I wrote a proccess that reads the can messages send from the other devices, and now I want to put the messages I get into a FIFO bufer, and then to obrabotam the messages.
So I need to write the FIFO buffer with can_frame structure inside.
For example:
struct can_buffer {
struct can_frame *frames;
int head;
int tail;
int size;
};
can_buffer new_can_buffer (size_t capacity)
{
can_buffer rb = malloc(sizeof(struct can_buffer));
if (rb) {
/* One byte is used for detecting the full condition. */
rb->size = capacity + 1;
rb->frames = malloc(rb->size * sizeof(struct can_frame));
if (rb->frames)
can_buffer_reset(rb);
else {
free(rb);
return 0;
}
}
return rb;
}
size_t can_buffer_size(const struct can_buffer *rb)
{
return rb->size;
}
size_t can_buffer_capacity(const struct can_buffer *rb)
{
return can_buffer_buffer_size(rb) - 1;
}
size_t can_buffer_free(const struct can_buffer *rb)
{
if (rb->head >= rb->tail)
return can_buffer_capacity(rb) - (rb->head - rb->tail);
else
return rb->tail - rb->head - 1;
}
int can_buffer_is_full(const struct can_buffer *rb)
{
return can_buffer_free(rb) == 0;
}
int can_buffer_is_empty(const struct can_buffer *rb)
{
return can_buffer_free(rb) ==can_buffer_capacity(rb);
}
void can_buffer_reset(can_buffer rb)
{
rb->head = rb->tail = 0;
}
.........
........
/* Add message to the end of the queue. */
void can_buffer_push(struct can_buffer *cb, struct can_frame *frame)
{
memcpy(&cb->frames[cb->tail], frame, sizeof(struct can_frame));
cb->tail = (cb->tail + 1) % cb->size;
}
/* Retrieve message from the start of the queue. */
can_frame *can_buffer_pop(struct can_buffer *cb)
{
struct can_frame *frame;
memcpy(frame, &cb->frames[cb->head], sizeof(struct can_frame));
cb->head = (cb->head + 1) % cb->size;
return frame;
}
But I canoot do it successfully. I think the problem is that every can_frame structure inside is a structure again,that is the problem (for example int, char etc), but I do not know how to solve this issue.
How can I make a FIFO buffer that can store the can_frame structure inside?
I need to write this in C lagnuage
in main i call
can_buffer can_buff;
can_buff = new_can_buffer(100);
can_buffer_push(can_buff,frame);
frame = can_frame i received
can_buff = fifo buffer
Well, you have incompletely modified the ringbuf routines. Specifically, you don't allocate enough space for the structures here:
if (rb) {
/* One byte is used for detecting the full condition. */
rb->size = capacity + 1;
rb->frames = malloc(rb->size);
if (rb->frames)
ringbuf_reset(rb);
else {
free(rb);
return 0;
}
}
The malloc needs to be
rb->frames = malloc(rb->size * sizeof(struct can_frame));
And the you should update the ringbuf_reset() call on the next line to your renamed can_buffer_reset()
Addendum:
I just noticed that you also need to update your ringbuf_reset() function to rb->head = rb->tail = 0
Addendum 2:
Referencing the newly added code, can_buffer_pop() will not work correctly as it doesn't check for the message existing and it doesn't allocate memory for the popped message.
There is also a typo in can_buffer_capacity().
Editorial:
I would strongly suggest writing a simple test program that executes these functions. It's frustrating but will catch a number of these small gotchas.

How to synchronize the following scenario

Here's a minimal example of what I'm trying to do. Right now you'll see, if you run this, that the example takes a little more than 10 seconds to complete. It should take less than 2 seconds. The problem is that there is a race condition. The loop takes too long and the SetEvent occurs before the WaitForSingle object can get it. What would be nice is if the event could trigger but the WaitForSingleObject could still know it triggered somehow.
What happens here is that data is generated which could take a long time. That data is then sent across the network which could take even longer. So I want to queue data to be sent then be about my merry way while another thread picks up the data and sends it out. That way I can queue infinitely until I have nothing left to do and then the thread joins to the sending thread until it's finished sending out all the network data.
#include <stdio.h>
#include <windows.h>
#include <unistd.h>
#define PIPE_NAME "\\\\.\\pipe\\testpipe"
void copy_protocol_buffer(struct protocol_buffer *in, struct protocol_buffer *out);
DWORD PipeClientStartSendBufferThread(struct client_pipe_settings *pipe_settings, LPDWORD lpThreadId);
DWORD InitializeClientPipeSettings(struct client_pipe_settings *pipe_settings);
void initialize_protocol_buffer(struct protocol_buffer *protocol_buffer);
struct protocol_buffer {
size_t length;
size_t location;
int data_type;
char *data;
struct protocol_buffer *next;
};
struct client_pipe_settings {
HANDLE hPipe;
LPCTSTR name;
DWORD pipe_timeout;
HANDLE write_event;
struct protocol_buffer *fifo;
HANDLE send_thread;
};
DWORD WINAPI PipeClientSendThread(LPVOID client_pipe_settings_object) {
struct client_pipe_settings *pipe_settings = (struct client_pipe_settings *)client_pipe_settings_object;
struct protocol_buffer *buf = NULL;
while(1) {
WaitForSingleObject(pipe_settings->write_event, 10000);
if (buf == NULL) {
buf = pipe_settings->fifo;
} else {
struct protocol_buffer *fifo_protocol_buffer = buf->next;
free(buf);
buf = fifo_protocol_buffer;
if(buf->length == 0)
//signal to quit
return 0;
}
//Send data over the network
Sleep(500);
}
return 0;
}
DWORD PipeQueueBuffer(struct client_pipe_settings *pipe_settings, struct protocol_buffer *buf)
{
struct protocol_buffer *out_protocol_buffer = (struct protocol_buffer *)malloc(sizeof *buf);
if(out_protocol_buffer == NULL)
exit(1);
copy_protocol_buffer(buf, out_protocol_buffer);
if (pipe_settings->fifo == NULL) {
pipe_settings->fifo = out_protocol_buffer;
}
else
{
struct protocol_buffer *last_protocol_buffer = pipe_settings->fifo;
while(last_protocol_buffer->next != NULL)
{
last_protocol_buffer = last_protocol_buffer->next;
}
last_protocol_buffer->next = out_protocol_buffer;
}
if(!SetEvent(pipe_settings->write_event))
return GetLastError();
return ERROR_SUCCESS;
}
int main(void) {
struct client_pipe_settings pipe_settings;
InitializeClientPipeSettings(&pipe_settings);
DWORD dwThreadId = 0;
PipeClientStartSendBufferThread(&pipe_settings, &dwThreadId);
//Generate data which could take a while
Sleep(1000);
struct protocol_buffer buf;
initialize_protocol_buffer(&buf);
buf.length = 5;
buf.data = (char *)malloc(5);
buf.data[0] = 'b';
buf.data[1] = 'l';
buf.data[2] = 'a';
buf.data[3] = 'h';
buf.data[4] = '\0';
PipeQueueBuffer(&pipe_settings, &buf);
Sleep(100);
PipeQueueBuffer(&pipe_settings, &buf);
buf.length = 0;
PipeQueueBuffer(&pipe_settings, &buf);
WaitForSingleObject(pipe_settings.send_thread, 100000);
}
DWORD InitializeClientPipeSettings(struct client_pipe_settings *pipe_settings)
{
pipe_settings->write_event = CreateEvent(NULL, 0, 0, NULL);
if(pipe_settings->write_event == NULL)
return GetLastError();
pipe_settings->hPipe = INVALID_HANDLE_VALUE;
pipe_settings->fifo = NULL;
pipe_settings->send_thread = NULL;
return ERROR_SUCCESS;
}
DWORD PipeClientStartSendBufferThread(struct client_pipe_settings *pipe_settings, LPDWORD lpThreadId)
{
HANDLE h = CreateThread(NULL, 0, PipeClientSendThread, pipe_settings, 0, lpThreadId);
if(h == NULL)
return GetLastError();
pipe_settings->send_thread = h;
return ERROR_SUCCESS;
}
void copy_protocol_buffer(struct protocol_buffer *in, struct protocol_buffer *out) {
out->data_type = in->data_type;
out->length = in->length;
out->location = in->location;
out->next = in->next;
out->data = (char*)malloc(in->length);
if (out->data == NULL) {
exit(1);
}
memcpy(out->data, in->data, in->length);
}
void initialize_protocol_buffer(struct protocol_buffer *protocol_buffer)
{
protocol_buffer->data = NULL;
protocol_buffer->length = 0;
protocol_buffer->location = 0;
protocol_buffer->next = NULL;
protocol_buffer->data_type = 0;
}
You mechanism is simply wrong. It isn't about a SetEvent that comes to early.
If the Event is set, it might be set "more than once". PipeClientSendThread should wait for the event, and if the event is set it should send all elements that have reached the queue. You code post 3 elements into the queue, but the event is set once, the thread runs and sends only one element a time, than the the next elements are only send when the timeout is reached....
Also you have a massive problem. You queue must be protected by a critical section or a mutex. You modify and loop over elements in the queue while another thread is also reading and modifying the queue.
Use a crtical section and a std::queue... this will also help you to get rid of the memory free/malloc stuff.

Resources