In the moment I am working on a project on which a few processors are communication between then using can bus. The main controler (beagle bone) controls the other device using a can bus. Using a socket can linux framework I wrote a proccess that reads the can messages send from the other devices, and now I want to put the messages I get into a FIFO bufer, and then to obrabotam the messages.
So I need to write the FIFO buffer with can_frame structure inside.
For example:
struct can_buffer {
struct can_frame *frames;
int head;
int tail;
int size;
};
can_buffer new_can_buffer (size_t capacity)
{
can_buffer rb = malloc(sizeof(struct can_buffer));
if (rb) {
/* One byte is used for detecting the full condition. */
rb->size = capacity + 1;
rb->frames = malloc(rb->size * sizeof(struct can_frame));
if (rb->frames)
can_buffer_reset(rb);
else {
free(rb);
return 0;
}
}
return rb;
}
size_t can_buffer_size(const struct can_buffer *rb)
{
return rb->size;
}
size_t can_buffer_capacity(const struct can_buffer *rb)
{
return can_buffer_buffer_size(rb) - 1;
}
size_t can_buffer_free(const struct can_buffer *rb)
{
if (rb->head >= rb->tail)
return can_buffer_capacity(rb) - (rb->head - rb->tail);
else
return rb->tail - rb->head - 1;
}
int can_buffer_is_full(const struct can_buffer *rb)
{
return can_buffer_free(rb) == 0;
}
int can_buffer_is_empty(const struct can_buffer *rb)
{
return can_buffer_free(rb) ==can_buffer_capacity(rb);
}
void can_buffer_reset(can_buffer rb)
{
rb->head = rb->tail = 0;
}
.........
........
/* Add message to the end of the queue. */
void can_buffer_push(struct can_buffer *cb, struct can_frame *frame)
{
memcpy(&cb->frames[cb->tail], frame, sizeof(struct can_frame));
cb->tail = (cb->tail + 1) % cb->size;
}
/* Retrieve message from the start of the queue. */
can_frame *can_buffer_pop(struct can_buffer *cb)
{
struct can_frame *frame;
memcpy(frame, &cb->frames[cb->head], sizeof(struct can_frame));
cb->head = (cb->head + 1) % cb->size;
return frame;
}
But I canoot do it successfully. I think the problem is that every can_frame structure inside is a structure again,that is the problem (for example int, char etc), but I do not know how to solve this issue.
How can I make a FIFO buffer that can store the can_frame structure inside?
I need to write this in C lagnuage
in main i call
can_buffer can_buff;
can_buff = new_can_buffer(100);
can_buffer_push(can_buff,frame);
frame = can_frame i received
can_buff = fifo buffer
Well, you have incompletely modified the ringbuf routines. Specifically, you don't allocate enough space for the structures here:
if (rb) {
/* One byte is used for detecting the full condition. */
rb->size = capacity + 1;
rb->frames = malloc(rb->size);
if (rb->frames)
ringbuf_reset(rb);
else {
free(rb);
return 0;
}
}
The malloc needs to be
rb->frames = malloc(rb->size * sizeof(struct can_frame));
And the you should update the ringbuf_reset() call on the next line to your renamed can_buffer_reset()
Addendum:
I just noticed that you also need to update your ringbuf_reset() function to rb->head = rb->tail = 0
Addendum 2:
Referencing the newly added code, can_buffer_pop() will not work correctly as it doesn't check for the message existing and it doesn't allocate memory for the popped message.
There is also a typo in can_buffer_capacity().
Editorial:
I would strongly suggest writing a simple test program that executes these functions. It's frustrating but will catch a number of these small gotchas.
Related
I suspect this is a basic issue, I just have a moment of dumbness.
I'm trying to make very simple mqtt client with MQTT-C library as a part of testing tool of my other solution.When I receive subscribed message I have weired issue : inside mqtt-c library (just before a call to a function pointer which happens to be mine "subsriber callback") everything seems ok, but right after a call to my function, dereferencing a pointer shows struct with completly wrong values inside. If I go one step up in callstack, so back to mqtt, the values inspector gives (set to dereference uint32_t value - precisely pointer's address), again, correct structure.
The structure, pointer to which mqtt passes to callback, is allocated on stack inside function in mqtt library. I don't think that thread could change, which would invalidate stack. I have also included mqtt.h, which means that my function should understand the structure.
I will paste some relevant code snippets, any help will be appreciated, including general advices.
ssize_t __mqtt_recv(struct mqtt_client *client)
{
struct mqtt_response response;
ssize_t mqtt_recv_ret = MQTT_OK;
MQTT_PAL_MUTEX_LOCK(&client->mutex);
/* read until there is nothing left to read, or there was an error */
while(mqtt_recv_ret == MQTT_OK) {
/* read in as many bytes as possible */
ssize_t rv, consumed;
struct mqtt_queued_message *msg = NULL;
rv = mqtt_pal_recvall(client->socketfd, client->recv_buffer.curr, client->recv_buffer.curr_sz, 0);
if (rv < 0) {
/* an error occurred */
client->error = (enum MQTTErrors)rv;
MQTT_PAL_MUTEX_UNLOCK(&client->mutex);
return rv;
} else {
client->recv_buffer.curr += rv;
client->recv_buffer.curr_sz -= (unsigned long)rv;
}
/* attempt to parse */
consumed = mqtt_unpack_response(&response, client->recv_buffer.mem_start, (size_t) (client->recv_buffer.curr - client->recv_buffer.mem_start));
if (consumed < 0) {
client->error = (enum MQTTErrors)consumed;
MQTT_PAL_MUTEX_UNLOCK(&client->mutex);
return consumed;
} else if (consumed == 0) {
/* if curr_sz is 0 then the buffer is too small to ever fit the message */
if (client->recv_buffer.curr_sz == 0) {
printf("receive buff sz is zero??\n");
client->error = MQTT_ERROR_RECV_BUFFER_TOO_SMALL;
MQTT_PAL_MUTEX_UNLOCK(&client->mutex);
return MQTT_ERROR_RECV_BUFFER_TOO_SMALL;
}
/* just need to wait for the rest of the data */
MQTT_PAL_MUTEX_UNLOCK(&client->mutex);
return MQTT_OK;
}
switch (response.fixed_header.control_type) {
//(...)
case MQTT_CONTROL_PUBLISH:
/* stage response, none if qos==0, PUBACK if qos==1, PUBREC if qos==2 */
if (response.decoded.publish.qos_level == 1) {
rv = __mqtt_puback(client, response.decoded.publish.packet_id);
if (rv != MQTT_OK) {
client->error = (enum MQTTErrors)rv;
mqtt_recv_ret = rv;
break;
}
} else if (response.decoded.publish.qos_level == 2) {
/* check if this is a duplicate */
if (mqtt_mq_find(&client->mq, MQTT_CONTROL_PUBREC, &response.decoded.publish.packet_id) != NULL) {
break;
}
rv = __mqtt_pubrec(client, response.decoded.publish.packet_id);
if (rv != MQTT_OK) {
client->error = (enum MQTTErrors)rv;
mqtt_recv_ret = rv;
break;
}
}
/* call publish callback */
printf("address: %d; size: %d\n", (uint32_t) &response.decoded.publish, response.decoded.publish.application_message_size);
//all ok here.
client->publish_response_callback(&client->publish_response_callback_state, &response.decoded.publish);
break;
//(...)
{
/* we've handled the response, now clean the buffer */
void* dest = (unsigned char*)client->recv_buffer.mem_start;
void* src = (unsigned char*)client->recv_buffer.mem_start + consumed;
size_t n = (size_t) (client->recv_buffer.curr - client->recv_buffer.mem_start - consumed);
memmove(dest, src, n);
client->recv_buffer.curr -= consumed;
client->recv_buffer.curr_sz += (unsigned long)consumed;
}
}
/* In case there was some error handling the (well formed) message, we end up here */
MQTT_PAL_MUTEX_UNLOCK(&client->mutex);
return mqtt_recv_ret;
}
struct mqtt_response {
/** #brief The mqtt_fixed_header of the deserialized packet. */
struct mqtt_fixed_header fixed_header;
/**
* #brief A union of the possible responses from the broker.
*
* #note The fixed_header contains the control type. This control type corresponds to the
* member of this union that should be accessed. For example if
* fixed_header#control_type == \c MQTT_CONTROL_PUBLISH then
* decoded#publish should be accessed.
*/
union {
struct mqtt_response_connack connack;
struct mqtt_response_publish publish;
struct mqtt_response_puback puback;
struct mqtt_response_pubrec pubrec;
struct mqtt_response_pubrel pubrel;
struct mqtt_response_pubcomp pubcomp;
struct mqtt_response_suback suback;
struct mqtt_response_unsuback unsuback;
struct mqtt_response_pingresp pingresp;
} decoded;
};
static void netReqHandler(void) {
int sockfd = myOpenSocket(ipaddr, port);//open_nb_socket(ipaddr, port);
mqtt_init(&client, sockfd, sendbuf, buffersizes, receivebuf, buffersizes, publish_callback);
printf("client error is %d\n", (&client)->error);
void publish_callback(void** unused, struct mqtt_response_publish *published) {
printf("app msg size : %d, addr: %d\n", published->application_message_size, (uint32_t) published);
char *nullTerminatedMessage = malloc(published->application_message_size + 1);
strncpy(nullTerminatedMessage, published->application_message, published->application_message_size);
nullTerminatedMessage[published->application_message_size] = '\0';
Is it possible to transfer data between threads like producer consumer using POSIX Message queue?
i need to transfer and an array of double with 5000 elements each from producer thread to consumer thread for processing
is POSIX Message queue designed for such a purpose?
POSIX message queues are absolutely the wrong tool for that.
All you actually need, is a buffer, a couple of counters or pointers, a mutex, and a couple of condition variables:
static pthread_mutex_t buffer_lock = PTHREAD_MUTEX_INITIALIZER;
static pthread_cond_t buffer_more = PTHREAD_COND_INITIALIZER;
static pthread_cond_t buffer_room = PTHREAD_COND_INITIALIZER;
/* Pointer and counters are volatile, since different threads may change them
whenever they hold the buffer_lock. */
static double * volatile buffer_data = NULL;
static volatile size_t buffer_size = 0;
static volatile size_t buffer_next = 0; /* First/next buffered value */
static volatile size_t buffer_ends = 0; /* First unused byte in buffer */
/* Optional flag to indicate no more data to be produced or consumed */
static volatile int buffer_done = 0;
/* Helper function to repack the buffer; caller must hold buffer_lock. */
static inline void buffer_repack_locked(void)
{
if (buffer_ends > buffer_next) {
if (buffer_next > 0) {
memmove(buffer_data, buffer_data + buffer_next,
(buffer_ends - buffer_next) * sizeof buffer_data[0]);
buffer_ends -= buffer_next;
buffer_next = 0;
}
} else {
buffer_next = 0;
buffer_ends = 0;
}
}
To grow the buffer (at any point), you use
static int buffer_resize(size_t new_size)
{
pthread_mutex_lock(&buffer_lock);
/* First, repack the buffer to start of the area. */
buffer_repack_locked();
/* Do not lose any data, however. */
if (new_size < buffer_ends)
new_size = buffer_ends;
/* Reallocate. */
void *new_data = realloc(buffer_data, new_size * sizeof buffer_data[0]);
if (!new_data) {
/* Not enough memory to reallocate; old data still exists. */
pthread_mutex_unlock(&buffer_lock);
return -1;
}
/* Success. */
buffer_data = new_data;
buffer_size = new_size;
/* Wake up any waiters waiting on room in the buffer, just to be sure. */
pthread_cond_broadcast(&buffer_room);
pthread_mutex_unlock(&buffer_lock);
return 0;
}
Producer or producers add a block of data to the buffer using
static void buffer_add(const double *data, size_t count)
{
pthread_mutex_lock(&buffer_lock);
buffer_repack_locked();
while (count > 0) {
if (buffer_ends >= buffer_size) {
/* Buffer is full. Wait for more room, repack, retry. */
pthread_cond_wait(&buffer_room, &buffer_lock);
buffer_repack_locked();
continue;
}
/* How much can we add? */
size_t size = buffer_size - buffer_ends;
if (size > count)
size = count;
memmove(buffer_data + buffer_ends, data, size * sizeof buffer_data[0]);
buffer_ends += size;
/* Wake up a consumer waiting on more data */
pthread_cond_signal(&buffer_more);
/* Update to reflect the data already added */
data += size;
count -= size;
}
/* All data added. */
pthread_mutex_unlock(&buffer_lock);
}
Similarly, consumers get data from the buffer using
static size_t buffer_get(double *data, size_t min_size, size_t max_size)
{
size_t size, have = 0;
/* Make sure min and max size are in the right order. */
if (max_size < min_size) {
size = max_size;
max_size = min_size;
min_size = size;
}
pthread_mutex_lock(&buffer_lock);
while (1) {
/* No more data incoming? */
if (buffer_done) {
pthread_mutex_unlock(&buffer_lock);
return have;
}
/* Buffer empty? */
if (buffer_next >= buffer_ends) {
pthread_cond_wait(&buffer_more, &buffer_lock);
continue;
}
/* How much can we grab? */
size = buffer_ends - buffer_next;
if (have + size > max_size)
size = max_size - have;
memmove(data, buffer_data + buffer_next,
size * sizeof buffer_data[0]);
buffer_next += size;
/* Wake up a waiter for empty room in the buffer. */
pthread_cond_signal(&buffer_room);
/* Enough data to return? */
if (have >= min_size) {
pthread_mutex_lock(&buffer_lock);
return have;
}
}
}
While this does copy the data around quite a bit, it allows both producers and consumers to work on their own data in any size "chunks" they wish.
If your producers and consumers work on matrices, or other "packetized" data of some maximum size, it makes sense to use singly-linked lists of preallocated packets of data, and not a linear buffer:
struct data_packet {
struct data_packet *next;
size_t size; /* Maximum size of data */
size_t used; /* Or rows, cols if a matrix */
double data[];
};
struct data_queue {
pthread_mutex_t lock;
pthread_cond_t more;
pthread_cond_t room;
struct data_packet *queue;
struct data_packet *unused;
unsigned long produced; /* Optional, just information */
unsigned long consumed; /* Optional, just information */
volatile int done; /* Set when no more to be produced */
};
static void free_data_packets(struct data_packet *root)
{
while (root) {
struct data_packet *curr = root;
root = root->next;
curr->next = NULL;
curr->size = 0;
free(curr);
}
}
To initialize a data queue, we also need to generate some empty packets in it. This must be done before any threads start working with the queue:
/* Returns the count of data packets actually created,
or 0 if an error occurs (with errno set).
*/
size_t data_queue_init(struct data_queue *q,
const size_t size,
const size_t count)
{
if (!q) {
errno = EINVAL;
return 0;
}
pthread_mutex_init(&(q->lock), NULL);
pthread_cond_init(&(q->more), NULL);
pthread_cond_init(&(q->room), NULL);
q->queue = NULL;
q->unused = NULL;
q->produced = 0;
q->consumed = 0;
q->done = 0;
/* Makes no sense to request no data packets. */
if (count < 1) {
errno = EINVAL;
return 0;
}
/* Create a chain of empty packets of desired size. */
struct data_packet *curr, *unused = NULL;
size_t have = 0;
while (have < count) {
curr = malloc( sizeof (struct data_packet)
+ size * sizeof curr->data[0]);
if (!curr)
break;
curr->next = unused;
curr->size = size;
curr->used = 0;
unused = curr;
have++;
}
if (!have) {
errno = ENOMEM;
return 0;
}
/* Attach chain to data queue; done. */
q->unused = unused;
return have;
}
Producers grab a free packet from the data queue:
struct data_packet *data_queue_get_unused(struct data_queue *q)
{
/* Safety check. */
if (!q) {
errno = EINVAL;
return NULL;
}
pthread_mutex_lock(&(q->lock));
while (!q->done) {
struct data_packet *curr = q->unused;
/* No unused data packets free? */
if (!curr) {
pthread_cond_wait(&(q->room), &(q->lock));
continue;
}
/* Detach and clear. */
q->unused = curr->next;
curr->next = NULL;
curr->used = 0;
/* Successful. */
pthread_mutex_unlock(&(q->lock));
return curr;
}
/* Done is set. */
pthread_mutex_unlock(&(q->lock));
errno = 0;
return NULL;
}
The above may return NULL, when an error occurs (errno will be set to a nonzero error), or when the done flag is set (errno will be zero).
The producer must remember to set the used field to reflect the amount of data it produced in the packet. (It must not exceed size, though.)
The producer can work on the data packet as they wish; it is their "own", and no locking is needed.
When the producer has completed the packet, they append it to the data queue:
int data_queue_append(struct data_queue *q, struct data_packet *p)
{
/* Safety check. */
if (!q || !p) {
errno = EINVAL;
return -1;
}
p->next = NULL;
pthread_mutex_lock(&(q->lock));
/* Append to queue. */
struct data_packet *prev = q->queue;
if (!prev) {
q->queue = p;
} else {
while (prev->next)
prev = prev->next;
prev->next = p;
}
q->produced++;
/* Wake up a waiter for a new packet. */
pthread_cond_signal(&(q->more));
/* Done. */
pthread_mutex_unlock(&(q->lock));
return 0;
}
Similarly, a consumer grabs the next packet from the queue,
struct data_packet *data_queue_get(struct data_queue *q)
{
/* Safety check. */
if (!q) {
errno = EINVAL;
return NULL;
}
pthread_mutex_lock(&(q->lock));
while (1) {
struct data_packet *curr = q->queue;
/* No data produced yet? */
if (!curr) {
/* If the done flag is set, we're done. */
if (q->done) {
pthread_mutex_unlock(&(q->lock));
errno = 0;
return NULL;
}
/* Wait for a signal on 'more'. */
pthread_cond_wait(&(q->more), &(q->lock));
continue;
}
/* Detach and done. */
q->queue = curr->next;
curr->next = NULL;
q->consumed++;
pthread_mutex_unlock(&(q->lock));
return curr;
}
}
and freely works on it. Note that the above does not examine the done flag unless the queue is empty.
When it is completed the work on the packet, it returns it to the unused queue:
int data_queue_append_unused(struct data_queue *q, struct data_packet *p)
{
/* Safety check */
if (!q || !p) {
errno = EINVAL;
return -1;
}
/* Clear it. */
p->used = 0;
pthread_mutex_lock(&(q->lock));
/* Prepend to unused queue. */
p->next = q->unused;
q->unused = p;
/* Signal a waiter that a new packet is available. */
pthread_cond_signal(&(q->room));
/* Done. */
pthread_mutex_unlock(&(q->lock));
return 0;
}
This approach allows one or more consumers and one or more producers work on their own packets on their own pace, without using any locks et cetera, and without copying the data itself around. However, the packet size and number of packets concurrently being worked on are limited.
The queue must be initialized with unused packet count at least the total number of producers and consumers; I prefer about twice that, to maximize throughput when the time taken by each varies a bit. The above, however, does allow removal of empty packets from the unused queue, and/or appending new empty packets to the unused queue, at any point in time. (When appending, remember to signal on the data queue room condition variable.)
Finally, note that the produced and consumed counts refer to the queue itself. If you want consumed to reflect the number of packets already consumed, you can move the q->consumed++ from data_queue_get() to data_queue_append_unused() instead.
It will work, but be aware that the absolute maximum message size is 16 MB (HARD_MSGSIZEMAX) since Linux 3.5, and was 1 MB before that. The default message size limit is only 8 KB though, so you need to set it when you call mq_open() or your 5000 doubles won't fit in one message.
A message queue is meant to transfer data between processes. Since threads are a part of the same process, there is no need to send data first to the kernel and then receive it back. In case of threads, all the global data is visible to all threads. Signalling mechanism like mutex and condition variables are required to synchronize the availability of data between threads.
i am trying to understand the round robin code for Mulltipath tcp scheduling . it is available here
https://github.com/multipath-tcp/mptcp/blob/mptcp_v0.95/net/mptcp/mptcp_rr.c
i had too much difficulties since there is no documentation
here is the code
`/* MPTCP Scheduler module selector. Highly inspired by tcp_cong.c */
#include <linux/module.h>
#include <net/mptcp.h>
static unsigned char num_segments __read_mostly = 1;
module_param(num_segments, byte, 0644);
MODULE_PARM_DESC(num_segments, "The number of consecutive segments that are part of a burst");
static bool cwnd_limited __read_mostly = 1;
module_param(cwnd_limited, bool, 0644);
MODULE_PARM_DESC(cwnd_limited, "if set to 1, the scheduler tries to fill the congestion-window on all subflows");
struct rrsched_priv {
unsigned char quota;
};
static struct rrsched_priv *rrsched_get_priv(const struct tcp_sock *tp)
{
return (struct rrsched_priv *)&tp->mptcp->mptcp_sched[0];
}
/* If the sub-socket sk available to send the skb? */
static bool mptcp_rr_is_available(const struct sock *sk, const struct sk_buff *skb,
bool zero_wnd_test, bool cwnd_test)
{
const struct tcp_sock *tp = tcp_sk(sk);
unsigned int space, in_flight;
/* Set of states for which we are allowed to send data */
if (!mptcp_sk_can_send(sk))
return false;
/* We do not send data on this subflow unless it is
* fully established, i.e. the 4th ack has been received.
*/
if (tp->mptcp->pre_established)
return false;
if (tp->pf)
return false;
if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) {
/* If SACK is disabled, and we got a loss, TCP does not exit
* the loss-state until something above high_seq has been acked.
* (see tcp_try_undo_recovery)
*
* high_seq is the snd_nxt at the moment of the RTO. As soon
* as we have an RTO, we won't push data on the subflow.
* Thus, snd_una can never go beyond high_seq.
*/
if (!tcp_is_reno(tp))
return false;
else if (tp->snd_una != tp->high_seq)
return false;
}
if (!tp->mptcp->fully_established) {
/* Make sure that we send in-order data */
if (skb && tp->mptcp->second_packet &&
tp->mptcp->last_end_data_seq != TCP_SKB_CB(skb)->seq)
return false;
}
if (!cwnd_test)
goto zero_wnd_test;
in_flight = tcp_packets_in_flight(tp);
/* Not even a single spot in the cwnd */
if (in_flight >= tp->snd_cwnd)
return false;
/* Now, check if what is queued in the subflow's send-queue
* already fills the cwnd.
*/
space = (tp->snd_cwnd - in_flight) * tp->mss_cache;
if (tp->write_seq - tp->snd_nxt > space)
return false;
zero_wnd_test:
if (zero_wnd_test && !before(tp->write_seq, tcp_wnd_end(tp)))
return false;
return true;
}
/* Are we not allowed to reinject this skb on tp? */
static int mptcp_rr_dont_reinject_skb(const struct tcp_sock *tp, const struct sk_buff *skb)
{
/* If the skb has already been enqueued in this sk, try to find
* another one.
*/
return skb &&
/* Has the skb already been enqueued into this subsocket? */
mptcp_pi_to_flag(tp->mptcp->path_index) & TCP_SKB_CB(skb)->path_mask;
}
/* We just look for any subflow that is available */
static struct sock *rr_get_available_subflow(struct sock *meta_sk,
struct sk_buff *skb,
bool zero_wnd_test)
{
const struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb;
struct sock *sk = NULL, *bestsk = NULL, *backupsk = NULL;
struct mptcp_tcp_sock *mptcp;
/* Answer data_fin on same subflow!!! */
if (meta_sk->sk_shutdown & RCV_SHUTDOWN &&
skb && mptcp_is_data_fin(skb)) {
mptcp_for_each_sub(mpcb, mptcp) {
sk = mptcp_to_sock(mptcp);
if (tcp_sk(sk)->mptcp->path_index == mpcb->dfin_path_index &&
mptcp_rr_is_available(sk, skb, zero_wnd_test, true))
return sk;
}
}
/* First, find the best subflow */
mptcp_for_each_sub(mpcb, mptcp) {
struct tcp_sock *tp;
sk = mptcp_to_sock(mptcp);
tp = tcp_sk(sk);
if (!mptcp_rr_is_available(sk, skb, zero_wnd_test, true))
continue;
if (mptcp_rr_dont_reinject_skb(tp, skb)) {
backupsk = sk;
continue;
}
bestsk = sk;
}
if (bestsk) {
sk = bestsk;
} else if (backupsk) {
/* It has been sent on all subflows once - let's give it a
* chance again by restarting its pathmask.
*/
if (skb)
TCP_SKB_CB(skb)->path_mask = 0;
sk = backupsk;
}
return sk;
}
/* Returns the next segment to be sent from the mptcp meta-queue.
* (chooses the reinject queue if any segment is waiting in it, otherwise,
* chooses the normal write queue).
* Sets *#reinject to 1 if the returned segment comes from the
* reinject queue. Sets it to 0 if it is the regular send-head of the meta-sk,
* and sets it to -1 if it is a meta-level retransmission to optimize the
* receive-buffer.
*/
static struct sk_buff *__mptcp_rr_next_segment(const struct sock *meta_sk, int *reinject)
{
const struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb;
struct sk_buff *skb = NULL;
*reinject = 0;
/* If we are in fallback-mode, just take from the meta-send-queue */
if (mpcb->infinite_mapping_snd || mpcb->send_infinite_mapping)
return tcp_send_head(meta_sk);
skb = skb_peek(&mpcb->reinject_queue);
if (skb)
*reinject = 1;
else
skb = tcp_send_head(meta_sk);
return skb;
}
static struct sk_buff *mptcp_rr_next_segment(struct sock *meta_sk,
int *reinject,
struct sock **subsk,
unsigned int *limit)
{
const struct mptcp_cb *mpcb = tcp_sk(meta_sk)->mpcb;
struct sock *choose_sk = NULL;
struct mptcp_tcp_sock *mptcp;
struct sk_buff *skb = __mptcp_rr_next_segment(meta_sk, reinject);
unsigned char split = num_segments;
unsigned char iter = 0, full_subs = 0;
/* As we set it, we have to reset it as well. */
*limit = 0;
if (!skb)
return NULL;
if (*reinject) {
*subsk = rr_get_available_subflow(meta_sk, skb, false);
if (!*subsk)
return NULL;
return skb;
}
retry:
/* First, we look for a subflow who is currently being used */
mptcp_for_each_sub(mpcb, mptcp) {
struct sock *sk_it = mptcp_to_sock(mptcp);
struct tcp_sock *tp_it = tcp_sk(sk_it);
struct rrsched_priv *rr_p = rrsched_get_priv(tp_it);
if (!mptcp_rr_is_available(sk_it, skb, false, cwnd_limited))
continue;
iter++;
/* Is this subflow currently being used? */
if (rr_p->quota > 0 && rr_p->quota < num_segments) {
split = num_segments - rr_p->quota;
choose_sk = sk_it;
goto found;
}
/* Or, it's totally unused */
if (!rr_p->quota) {
split = num_segments;
choose_sk = sk_it;
}
/* Or, it must then be fully used */
if (rr_p->quota >= num_segments)
full_subs++;
}
/* All considered subflows have a full quota, and we considered at
* least one.
*/
if (iter && iter == full_subs) {
/* So, we restart this round by setting quota to 0 and retry
* to find a subflow.
*/
mptcp_for_each_sub(mpcb, mptcp) {
struct sock *sk_it = mptcp_to_sock(mptcp);
struct tcp_sock *tp_it = tcp_sk(sk_it);
struct rrsched_priv *rr_p = rrsched_get_priv(tp_it);
if (!mptcp_rr_is_available(sk_it, skb, false, cwnd_limited))
continue;
rr_p->quota = 0;
}
goto retry;
}
found:
if (choose_sk) {
unsigned int mss_now;
struct tcp_sock *choose_tp = tcp_sk(choose_sk);
struct rrsched_priv *rr_p = rrsched_get_priv(choose_tp);
if (!mptcp_rr_is_available(choose_sk, skb, false, true))
return NULL;
*subsk = choose_sk;
mss_now = tcp_current_mss(*subsk);
*limit = split * mss_now;
if (skb->len > mss_now)
rr_p->quota += DIV_ROUND_UP(skb->len, mss_now);
else
rr_p->quota++;
return skb;
}
return NULL;
}
static struct mptcp_sched_ops mptcp_sched_rr = {
.get_subflow = rr_get_available_subflow,
.next_segment = mptcp_rr_next_segment,
.name = "roundrobin",
.owner = THIS_MODULE,
};
static int __init rr_register(void)
{
BUILD_BUG_ON(sizeof(struct rrsched_priv) > MPTCP_SCHED_SIZE);
if (mptcp_register_scheduler(&mptcp_sched_rr))
return -1;
return 0;
}
static void rr_unregister(void)
{
mptcp_unregister_scheduler(&mptcp_sched_rr);
}
module_init(rr_register);
module_exit(rr_unregister);
MODULE_AUTHOR("Christoph Paasch");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ROUNDROBIN MPTCP");
MODULE_VERSION("0.89");`
please help with this part of the code i didn't understand what it does can any one help me with understanding it ? :
struct rrsched_priv {
unsigned char quota;
};
static struct rrsched_priv *rrsched_get_priv(const struct tcp_sock *tp)
{
return (struct rrsched_priv *)&tp->mptcp->mptcp_sched[0];
}
The Multipath TCP Scheduler is responsible for chosing in which subflow a packet will be forward and transmited. In the round-robin case, this choice is "random" for each subflow available and the following code is basically a constructor that regards the linux kernel tcp stack given by tcp_sock and the mptcp stack given by net/mptcp.h.
In the following code, there's a constructor regarding quota. It means the amount of packets that will be consume by each subflow.
struct rrsched_priv {
unsigned char quota;
};
At the next step, rrsched_priv is taking linux-kernel tcp_sock attributes.
Then, the reference *tp is taking mptcp->mptcp_sched[0] from the mptcp stack;
Finally it returns as an struct for rrsched_priv
static struct rrsched_priv *rrsched_get_priv(const struct tcp_sock *tp)
{
return (struct rrsched_priv *)&tp->mptcp->mptcp_sched[0];
}
References
[Round-Robin Scheduler] : http://progmp.net/progmp.html#/dissertation_round_robin
[tcp_sock] : https://docs.huihoo.com/doxygen/linux/kernel/3.7/structtcp__sock.html
[Quota] : https://tuprints.ulb.tu-darmstadt.de/7709/13/Dissertation_Froemmgen.pdf
[MPTCP Scheduler] : http://progmp.net/
I have searched around on the internet and there were few document about how we use Ring Buffer to read from file or write to a file. So i wrote my own implementation here:
my ringbuffer structure
typedef struct Ringbuffer {
char* buffer;
int length;
int start;
int end;
int fd;
int looped;
} RingBuffer;
//to update the end of the buffer after writing
#define RingBuffer_commit_write(B, A) (B)->end += A;if((B)->end >=(B)->length){(B)->looped =1;(B)->end %= (B)->length;}
#define RingBuffer_ends_at(B) ((B)->buffer + (B)->end)
static inline int RingBuffer_available_space(RingBuffer *buffer) {
return buffer->length- RingBuffer_available_data(buffer);
}
code snippet to read from file
void read_some(int sockfd,RingBuffer *buffer) {
int byte_recv =0;
check((RingBuffer_available_space(buffer) > 0),"out of space, release some data");
if(buffer->looped == 0){
byte_recv = recv(sockfd,buffer,buffer->length - buffer->end,0);
if(byte_recv < buffer->length- buffer->end){
RingBuffer_commit_write(buffer,byte_recv);
return ;
}else{
RingBuffer_commit_write(buffer,byte_recv);
byte_recv = recv(sockfd,buffer,RingBuffer_ends_at(buffer),RingBuffer_available_space(buffer),0);
RingBuffer_commit_write(buffer,byte_recv);
}
}else{
byte_recv = recv(sockfd,buffer,RingBuffer_available_space(),0);
RingBuffer_commit_write(buffer,byte_recv)
}
error:
return;
}
This, however, is not efficient and overcomplicated in my opinion. Is there a better implementation of Ring buffer to read from file( since both socket and file are quite the same)? Any help,even an idea only,would be appreciated.
Edited to include short description of what is expected from the code.
#include <sys/file.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#define MAX_PAGE 0xFF+1
/* page table entry you may need to add your own fields to it*/
typedef struct
{
unsigned short frame;/*location*/
unsigned int valid:1;
unsigned int in_mem:1;
unsigned int dirty:1;
unsigned int last_frame;
} pt_entry;
/* list entry for physical frames*/
struct list_item
{
unsigned short frame;
struct list_item *next;
struct list_item *prev;
int page_num;
};
typedef struct list_item *list;
void start_simulation(FILE *);
void resolve(int);
unsigned short find_frame(void);
unsigned short find_victim(void);
void display_stats(void);
void to_resident_set(list);
void free_mem(list);
void invalidate(unsigned short);
/*============================ header ends here ============================== *
/*#include "lru.h"*/
pt_entry pte[MAX_PAGE]; /* page table */
int mem_size; /* physical memory size in page frames */
list free_list_head; /* free list */
list res_set_head; /* resident set */
int total_fault = 0; /* total number of page faults */
int total_ref = 0; /* total number of memory references */
/* main program:
** read in paramters, and open the input file start the simulation */
int main(int argc, char *argv[])
{
FILE *stream;
if (argc != 3)
{
printf("The format is: pager file_name memory_size.\n");
exit(1);
}
printf("File used %s, resident set size %d\n", argv[1], atoi(argv[2]));
if ((stream = fopen(argv[1], "r")) == NULL)
{
perror("File open failed");
exit(1);
}
mem_size = atoi(argv[2]);
start_simulation(stream);
fclose(stream);
}
/*initialise the page table
** initialise the resident set, and the free list
** in the simulation loop
**16-bit memory addresses representing the program trace are read from the input
**file one by one the virtual address is resolved ie. physical frame for the
**virtual page identified
**the loop exits when it encounters the end of file
** free memory allocated for lists
** display statistics
*/
void start_simulation(FILE * stream)
{
char *addr_buf;
int address;
int i, n;
list new_entry, current;
/* initialise the page table */
for(i=0; i<MAX_PAGE;i++)
{
pte[i].frame = -1;
pte[i].valid = 0;
pte[i].dirty = 0;
pte[i].in_mem = 0;
}
/* initialise the resident set - empty*/
res_set_head = (list)malloc(sizeof(struct list_item));
res_set_head->next = res_set_head;
res_set_head->prev = res_set_head;
/* initialise free list - all physical pages*/
free_list_head = (list)malloc(sizeof(struct list_item));
free_list_head->next = free_list_head;
free_list_head->prev = free_list_head;
current = free_list_head;
for(i=0; i<mem_size;i++)
{
new_entry = (list)malloc(sizeof(struct list_item));
current->next = new_entry;
new_entry->prev = current;
new_entry->next = free_list_head;
new_entry->frame = i;
current = new_entry;
free_list_head->prev = current;
}
/* main simulation loop */
while( (n = fscanf(stream, "%x", &address)) != -1)
{
resolve(address);
total_ref++;
}
free_mem(free_list_head);
free_mem(res_set_head);
display_stats();
return;
}
/* resolve address reference
** if page table entry valid - do nothing
** if page table entry invalid - find a physical frame for this page
**and update pte for the page
*/
void resolve(int address)
{
unsigned short frame_alloc;
int virt_page;
static int disp_counter = 0;
virt_page = address >> 8;
if (pte[virt_page].valid == 1)
{
/*Was trying to implement */
//pte[virt_page].frame = pte[0];
}
else
{
frame_alloc = find_frame();
pte[virt_page].valid = 1;
pte[virt_page].frame = frame_alloc;
total_fault++;
}
}
/* find_frame:
** if free list is empty find a victim frame
** else detach the last frame of the free list and attach it
** to the resident set
** return frame number
*/
unsigned short find_frame()
{
unsigned short frame;
list current, new_tail;
if (free_list_head == free_list_head->prev) /* free list empty */
frame = find_victim();
else
{
new_tail = free_list_head->prev->prev;
new_tail->next = free_list_head;
current = free_list_head->prev;
free_list_head->prev = new_tail;
to_resident_set(current);
frame = current->frame;
}
return frame;
}
/* to_resident_set:
** attach a list entry at the end of resident set
*/
void to_resident_set(list current)
{
list tail;
tail = res_set_head->prev;
tail->next = current;
current->next = res_set_head;
current->prev = tail;
res_set_head->prev = current;
}
/* find_victim:
** As you can see I simply take the first page frame from the resident set list.
** This implements the FIFO replacement strategy. Your task is to replace it with
** a more efficient strategy.
*/
unsigned short find_victim()
{
int i;
unsigned short frame=0;
list current;
for(i=0;i<MAX_PAGE;i++)
{
if (pte[i].frame == frame && pte[i].valid == 1)
{
frame = res_set_head->next->frame;
invalidate(frame);
current = res_set_head->next;
res_set_head->next = current->next;
res_set_head->next->prev = res_set_head;
to_resident_set(current);
break;
}
}
return frame;
}
/* invalidate:
** invalidate the page table entry for the victim page */
void invalidate(unsigned short frame)
{
int i;
for(i=0;i<MAX_PAGE;i++)
{
if (pte[i].frame == frame && pte[i].valid == 1)
{
pte[i].valid = 0;
pte[i].frame = -1;
break;
}
}
}
/* display_stats:
** This is very basic, you may want to make it more sophisticated,
** for example save the data from multiple runs into a file for
** comparison etc
*/
void display_stats()
{
printf("\nProcess issued %d memory references\n", total_ref);
printf("Process triggered %d page faults\n", total_fault);
printf("Pafe fault rate is %d percent\n",((total_fault*100)/total_ref));
}
/* free memory allocated to the list */
void free_mem(list head)
{
list current,tail;
tail = head->prev;
current = head;
while (current->prev != tail)
{
current = current->next;
free(current->prev);
}
}
The most obvious problem lies in the input to your algorithm.
The restpage array is a global array and will thus be initialised to contain only the value 0. You then use these array elements as the page-numbers you are requesting, which means that your algorithm processes only requests for page 0 if mem_size < 100.
And if mem_size >= 100, you are overrunning the array bounds and land squarely in the land of undefined behaviour.
There are two fixes you need to make:
Just as you are checking for a valid file in the command-line arguments, you must also check that mem_size is not too large
Write an additional loop to give each element in restpage a random value, to ensure not all page requests are for the same page.
You have dimensioned restpage to [100] but mem_size seems freely configurable, is this the intent?
mem_size = atoi(argv[2]);
fclose(stream);
..
for(i=0;i<mem_size;i++)
{
totalabsence+=find_victim(&pt,restpage[i]);
}
EDIT:
I see one bug in your new code, in your find_victim you don't initialize the local variable 'frame'
EDITx2:
When you read from the file you may just want to put one hex address on each line
and use instead fgets() to read the file line by line (or load the whole file and
go through it line by line).