I am creating a linked list in kernel module ; It has no problem with adding the first record , in init module i am initializing the map also with create map function ;
typedef struct IP4SYN{
unsigned int KaynakIP;
u_int16_t KaynakPort;
u_int16_t HedefPort;
} IP4SYN;
typedef struct IP4Map{
unsigned int HedefIP;
IP4SYN * a;
size_t s;
size_t n;
} IP4Map;
typedef struct Sessions{
IP4Map * a;
size_t s;
size_t n;
} Sessions;
static size_t blocksize = (1024*102);
Sessions * Session_Map;
static unsigned int get_tsval(void){
return (unsigned int)(ktime_to_ns(ktime_get())>>10);
}
Sessions * CreateMap(size_t initial){
Sessions * pMap = (Sessions *)vmalloc(sizeof(Sessions));
if (initial > 0 && blocksize != initial) {
blocksize = initial;
}
pMap->a = (IP4Map *)vmalloc(sizeof(IP4Map)*blocksize);
memset(pMap->a, 0, sizeof(IP4Map)*blocksize);
pMap->s = blocksize;
pMap->n = 0;
pMap->a->a=(IP4SYN *)vmalloc(sizeof(IP4SYN)*blocksize);
memset(pMap->a->a, 0, sizeof(IP4SYN)*blocksize);
pMap->a->s = blocksize;
pMap->a->n = 0;
return pMap;
}
size_t HedefIPKayitNo(unsigned int HedefIP){
size_t beg = 0;
size_t end = Session_Map->n;
for (beg; beg < end; beg) {
IP4Map map = Session_Map->a[beg];
if(map.HedefIP==HedefIP){
return beg;
break;
}
}
return beg;
}
size_t YeniHedefIP(unsigned int HedefIP,size_t index){
Session_Map->a[index].HedefIP = HedefIP;
Session_Map->n++;
}
I want to check if ip exist in listed and if not listed i want to add it to the list. But strangely it causing lack of kernel on 2nd try . 1st add-up is working perfect. what should be the issue ?
size_t IP_Index = HedefIPKayitNo(hedef_ip);
if(IP_Index == Session_Map->n ){
YeniHedefIP(hedef_ip,(IP_Index));
}
Related
I'm trying to trace get_page_from_freelist().
I defined a pointer of integer and initialized it with kmalloc() in mm/init_mm.c, and added some system calls to control it. But after this, I rebooted my computer and saw the "Out of Memory" error.
I reduced the array size to 4KB(512 entries), but it still shows the same error message. As far as I know, 4KB size is very small to kernel. How can I handle this problem?
My kernel version is 5.19.9 and I have 32GB physical memory. I'm using 64bit ubuntu 22.04
from mm/init-mm.c:
int trace_on;
int trace_idx;
int trace_mod;
int raw_trace[TRACE_SIZE][2];
void setup_initial_init_mm(void *start_code, void *end_code,
void *end_data, void *brk)
{
int i;
trace_on = 0;
trace_idx = 0;
trace_mod = 0;
for (i = 0; i < TRACE_SIZE; i++) {
raw_trace[I][trace_mod] = 0;
}
init_mm.start_code = (unsigned long)start_code;
init_mm.end_code = (unsigned long)end_code;
init_mm.end_data = (unsigned long)end_data;
init_mm.brk = (unsigned long)brk;
}
from include/linux/mm_types.h:
#define TRACE_SIZE 0x100
extern int trace_on;
extern int trace_idx;
extern int trace_mod;
extern int raw_trace[TRACE_SIZE][2];
from mm/page_alloc.c:
#include <linux/mm_types.h>
extern int trace_on;
extern int trace_idx;
extern int trace_mod;
extern int raw_trace[TRACE_SIZE][2];
static struct page *
get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
const struct alloc_context *ac)
{
struct zoneref *z;
struct zone *zone;
struct pglist_data *last_pgdat_dirty_limit = NULL;
bool no_fallback;
int i;
if (unlikely(!trace_on && trace_idx > 0)) {
if (unlikely(trace_idx == TRACE_SIZE))
trace_idx--;
for (i = 0; i <= trace_idx; i++) {
raw_trace[i][0] = 0;
raw_trace[i][1] = 0;
}
trace_idx = 0;
trace_mod = 0;
}
retry:
...
try_this_zone:
page = rmqueue(ac->preferred_zoneref->zone, zone, order,
gfp_mask, alloc_flags, ac->migratetype);
if (page) {
prep_new_page(page, order, gfp_mask, alloc_flags);
/*
* If this is a high-order atomic allocation then check
* if the pageblock should be reserved for the future
*/
if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
reserve_highatomic_pageblock(page, zone, order);
if (unlikely(trace_on)) {
if (unlikely(trace_idx >= TRACE_SIZE)) {
if (trace_mod) trace_mod = 0;
else trace_mod = 1;
trace_idx = 0;
}
raw_trace[trace_idx++][trace_mod] = page_to_phys(page);
}
...
I think the array size or using kmalloc() is not the issue, because I've already try to reduce its size and allocate it statically.
The code above is the static allocation version of my modification.
Originally, I allocated it a buffer with kmalloc() in setup_initial_init_mm().
I have written the code for the circular buffer in C and it works well until some extent. I took the size of the buffer being equal to 10. When I fill the buffer till element 6 - it works fine. But at the moment when I fill the 7-th element - I get the result "The size of the buffer is equal to 767". For the element 8 - it does not work. I use "head" to write and "tail" to extract values. Could you please help me with this?
#include<stdio.h>
#include<stdint.h>
#include <stdbool.h>
typedef struct RingBuffer {
uint16_t* buffer;
size_t head;
size_t tail;
size_t max;
bool full;
}*cbuf_handle_t;
cbuf_handle_t init_RingBuffer (uint8_t* buffer, size_t size){
cbuf_handle_t cbuf = malloc (sizeof(cbuf_handle_t));
cbuf->buffer = buffer;
cbuf->max = size;
return cbuf;
}
void RingBuffer_free(cbuf_handle_t cbuf){
free(cbuf);
}
void RingBuffer_reset(cbuf_handle_t cbuf){
cbuf->head = 0;
cbuf->tail = 0;
cbuf->full = false;
}
bool RingBuffer_full (cbuf_handle_t cbuf){
return cbuf->full;
}
bool RingBuffer_empty(cbuf_handle_t cbuf){
return (!cbuf->full && (cbuf->tail == cbuf->head));
}
size_t RingBuffer_Capacity(cbuf_handle_t cbuf){
return cbuf->max;
}
size_t RingBuffer_size(cbuf_handle_t cbuf){
size_t size = cbuf->max;
if (!cbuf->full){
if (cbuf->head >= cbuf->tail)
{
size = (cbuf->head - cbuf->tail);}
else
{
size = (cbuf->head - cbuf->tail + cbuf->max);
}
}
return size;
}
void RingBuffer_AdvancePointer(cbuf_handle_t cbuf){
if (cbuf->full){
cbuf->tail = (cbuf->tail+1)%cbuf->max;
}
cbuf->head = (cbuf->head + 1)%cbuf->max;
cbuf->full = (cbuf->head == cbuf->tail);
}
void RingBuffer_retreatPointer (cbuf_handle_t cbuf){
cbuf->full = false;
cbuf->tail = (cbuf->tail + 1)%cbuf->max;
}
void RingBuffer_addValue (cbuf_handle_t cbuf, uint8_t data){
cbuf->buffer[cbuf->head] = data;
RingBuffer_AdvancePointer(cbuf);
}
int RingBuffer_Remove (cbuf_handle_t cbuf, uint8_t *data){
int r = -1;
if (!RingBuffer_empty(cbuf)){
*data = cbuf->buffer[cbuf->tail];
RingBuffer_retreatPointer(cbuf);
r = 0;
}
return r;
}
int main (){
uint8_t arr[10];
cbuf_handle_t cpt = init_RingBuffer(arr, 10);
//initialzie the buffer, tail and head and max
int i = 0;
RingBuffer_reset(cpt);
for ( i = 0 ; i< 6; i++){
RingBuffer_addValue(cpt, i);
}
size_t size = RingBuffer_size(cpt);
printf("The size of the buffer %d", size);
}
Thank you in advance!
Regards
Rostyslav
As said in comments, the declaration of the structure as a pointer is generally not recommended. However you can fix that bug by changing the way you allocate it using malloc :
cbuf_handle_t cbuf = malloc (sizeof(*cbuf));
This is because, cbuf being a pointer to the structure, if you dereference it you get the structure and thus its real size when you pass it to sizeof.
I have defined my own type. It contains a pointer to an array, as well as how many items are in that array
struct neighborList
{
unsigned int nNeighbors;
unsigned int* pNeighbors;
};
These get instantiated, populated, and eventually I want to go back through them. Then something very strange happens. I think screenshots are better than words here.
I've shown the next statement to execute. I have an array of the aforementioned data type, and the one under consideration here has 1 neighbor and the address of that 1 neighbor is 0x107a28; Cool. But what actually gets assigned to pLook?
The address is always off by 0x40. Has anyone seen anything like this? Help here is appreciated.
EDIT: Here's the whole thing since several people want to see it.
#include "stdafx.h"
#include <stdlib.h>
#include <time.h>
//#define NVERTEX 875714
#define NVERTEX 9
struct linkedNode
{
unsigned int node;
linkedNode* pNextLinkedNode;
linkedNode* pPrevLinkedNode;
};
struct neighborList
{
unsigned int nNeighbors;
unsigned int* pNeighbors;
};
struct linkedNodeList
{
linkedNode* pHead;
linkedNode* pTail;
};
void populateNeighbors(neighborList* pNeighborList, FILE* fp);
void DFSLoop(neighborList* pNeighborList, linkedNode* pOutput, unsigned int nNodes);
void append(linkedNodeList* pLinkedList, unsigned int node);
void DFSLoop(neighborList* pNeighborList, linkedNodeList* pOutput, unsigned int nNodes)
{
bool* visitedArray;
bool* cashedArray;
unsigned int* leaderArray;
unsigned int* finishingTimes;
unsigned int t = 0;
visitedArray = (bool*)malloc(nNodes*sizeof(bool));
cashedArray = (bool*)malloc(nNodes*sizeof(bool));
leaderArray = (unsigned int*)malloc(nNodes*sizeof(unsigned int));
finishingTimes = (unsigned int*)malloc(nNodes*sizeof(unsigned int));
//initialize all arrays to all false/0
for (unsigned int i = 0; i < nNodes; i++)
{
visitedArray[i] = false;
cashedArray[i] = false;
leaderArray[i] = 0;
finishingTimes[i] = 0;
}
//firstly, pick a starting node and put it on the linkedList
//initialize head and tail
(pOutput->pHead)->node = 1;
(pOutput->pHead)->pNextLinkedNode = NULL;
(pOutput->pHead)->pPrevLinkedNode = NULL;
(pOutput->pTail)->node = 1;
(pOutput->pTail)->pNextLinkedNode = NULL;
(pOutput->pTail)->pPrevLinkedNode = NULL;
unsigned int curNode = (pOutput->pTail)->node;
for (;;)
{
//Start DFS
//#1 If current node under consideration has an unexplored neighbor, make it the new tail and repeat
// If not, current node is cashed. Set it's finishing time, and leader. Work back through the list
// Until you find a node with an unexplored neighbor
unsigned int nNeighbors = pNeighborList[curNode].nNeighbors;
for (unsigned int i = 0; i < nNeighbors; i++)
{
unsigned int* pLook = (pNeighborList[curNode]).pNeighbors;
unsigned int neighbor = pLook[0];
/*
unsigned int nodeUnderConsideration = (pNeighborList[curNode].pNeighbors)[i];
if ( !cashedArray[nodeUnderConsideration])
{
append(pOutput, (pNeighborList[curNode].pNeighbors)[i]);
curNode = (pOutput->pTail)->node;
continue;
}
*/
}
//#2 If you make it back to the head and have no unexplored neighbors, pick new vertex (if unvisited) and repeat
}
free(visitedArray);
free(cashedArray);
free(leaderArray);
free(finishingTimes);
}
int _tmain(int argc, _TCHAR* argv[])
{
//open file
FILE* fp;
FILE* fpRev;
//fp = fopen("SCC.txt", "rb");
//fpRev = fopen("SSCrev.txt", "rb");
fp = fopen("SSCsmall1.txt", "rb");
fpRev = fopen("SSCsmall1rev.txt", "rb");
/* read file. When reading, keep track of how much memory to malloc */
/* for each vertex */
neighborList* pAllEdges;
neighborList* pAllEdgesRev;
pAllEdges = (neighborList*)malloc(NVERTEX*sizeof(neighborList));
pAllEdgesRev = (neighborList*)malloc(NVERTEX*sizeof(neighborList));
populateNeighbors(pAllEdges, fp);
populateNeighbors(pAllEdgesRev, fpRev);
//instantiate pointers for linkedlists needed for DFS
linkedNodeList NodesFirstPass, NodesSecondPass;
NodesFirstPass.pHead = (linkedNode*)malloc(sizeof(linkedNode));
NodesFirstPass.pTail = NodesFirstPass.pHead;
NodesSecondPass.pHead = (linkedNode*)malloc(sizeof(linkedNode));
NodesSecondPass.pTail = NodesSecondPass.pHead;
DFSLoop(pAllEdges, &NodesFirstPass, NVERTEX);
free(pAllEdges);
free(pAllEdgesRev);
return 0;
}
void populateNeighbors(neighborList* pNeighborList, FILE* fp)
{
unsigned int v1 = 1;
unsigned int v2 = 1;
unsigned int v1_next = 1;
unsigned int v2_next = 1;
unsigned int neighbors [1000];
fscanf(fp, "%u", &v1_next);
fscanf(fp, "%u", &v2_next);
for (unsigned int i = 0; i < (NVERTEX - 1); i++)
{
//initialize nNeigbors to 0
unsigned int nNeighbors = 0;
for (;;)
{
//if v1_next is a different vertex then v1, then copy v1_next to v1,
//malloc what we need to, copy over the array and continue
if (v1_next != v1)
{
pNeighborList[i].nNeighbors = nNeighbors;
if (nNeighbors != 0)
{
pNeighborList[i].pNeighbors = (unsigned int*)malloc(nNeighbors * sizeof(unsigned int));
for (unsigned int j = 0; j < nNeighbors; j++)
{
pNeighborList[i].pNeighbors[j] = neighbors[j];
}
}
v1++;
break;
}
//else, increment the neighbor count for this particular vertex and continue
//within this loop, getting new neighbors (edges)
else
{
neighbors[nNeighbors] = v2_next;
nNeighbors++;
if (nNeighbors == 1000)
{
break;
}
fscanf(fp, "%u", &v1_next);
fscanf(fp, "%u", &v2_next);
}
}
}
}
void append(linkedNodeList* pLinkedList, unsigned int node)
{
//make new node with the intention that it's going to be the new tail
linkedNode* pNewNode = (linkedNode*)malloc(sizeof(linkedNode));
pNewNode->node = node;
pNewNode->pNextLinkedNode = NULL;
pNewNode->pPrevLinkedNode = pLinkedList->pTail;
//set next node of current tail to new node
(pLinkedList->pTail)->pNextLinkedNode = pNewNode;
//new tail becomes new node
pLinkedList->pTail = pNewNode;
//lastly, set old tail's next node to point to new tail
(pLinkedList->pTail->pPrevLinkedNode)->pNextLinkedNode = pLinkedList->pTail;
}
Judging by the screenshots, and assuming you are on a 64 bit system (a pointer being 8 bytes wide), the pointer pNeighborList links to the start of the list, while pLook links to the pNeighbors attribute of a neighborList element at index 5:
// assuming sizeof(neighborList) == 4 (int) + 8 (pointer) = 12 bytes
neighborList* pNeighborList = new neighborList[10];
// pNeighborList points to the start of the list, 0x00107a28
// pNeighborList[5] is at address 0x00107a64 (start + 5 * sizeof(neighborList)
// .pNeighbors is offset 4 more bytes (sizeof(unsigned int)) = 0x00107a68
int curNode = 5;
unsigned int* pLook = (pNeighborList[curNode]).pNeighbors;
// pLook points to pNeighbors of the element at index 5, 0x00107a68
When you hover the pointer pNeighborList in Visual Studio, it shows you the pointer (which points to the start of the list), not the full value ((pNeighborList[curNode]).pNeighbors).
I am working on a kernel module and I need to compare two buffers to find out if they are equivalent. I am using the memcmp function defined in the Linux kernel to do so. My first buffer is like this:
cache_buffer = (unsigned char *)vmalloc(4097);
cache_buffer[4096] = '/0';
The second buffer is from a page using the page_address() function.
page = bio_page(bio);
kmap(page);
write_buffer = (char *)page_address(page);
kunmap(page);
I have printed the contents of both buffers before hand and not only to they print correctly, but they also have the same content. So next, I do this:
result = memcmp(write_buffer, cache_buffer, 2048); // only comparing up to 2048 positions
This causes the kernel to freeze up and I cannot figure out why. I checked the implementation of memcmp and saw nothing that would cause the freeze. Can anyone suggest a cause?
Here is the memcmp implementation:
int memcmp(const void *cs, const void *ct, size_t count)
{
const unsigned char *su1, *su2;
int res = 0;
for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--)
if ((res = *su1 - *su2) != 0)
break;
return res;
}
EDIT: The function causing the freeze is memcmp. When I commented it out, everything worked. Also, when I did I memcmp as follows
memcmp(write_buffer, write_buffer, 2048); //comparing two write_buffers
Everything worked as well. Only when I throw the cache_buffer into the mix is when I get the error. Also, above is a simplification of my actual code. Here is the entire function:
static int compare_data(sector_t location, struct bio * bio, struct cache_c * dmc)
{
struct dm_io_region where;
unsigned long bits;
int segno;
struct bio_vec * bvec;
struct page * page;
unsigned char * cache_data;
char * temp_data;
char * write_data;
int result, length, i;
cache_data = (unsigned char *)vmalloc((dmc->block_size * 512) + 1);
where.bdev = dmc->cache_dev->bdev;
where.count = dmc->block_size;
where.sector = location << dmc->block_shift;
printk(KERN_DEBUG "place: %llu\n", where.sector);
dm_io_sync_vm(1, &where, READ, cache_data, &bits, dmc);
length = 0;
bio_for_each_segment(bvec, bio, segno)
{
if(segno == 0)
{
page = bio_page(bio);
kmap(page);
write_data = (char *)page_address(page);
//kunmap(page);
length += bvec->bv_len;
}
else
{
page = bio_page(bio);
kmap(page);
temp_data = strcat(write_data, (char *)page_address(page));
//kunmap(page);
write_data = temp_data;
length += bvec->bv_len;
}
}
printk(KERN_INFO "length: %u\n", length);
cache_data[dmc->block_size * 512] = '\0';
for(i = 0; i < 2048; i++)
{
printk("%c", write_data[i]);
}
printk("\n");
for(i = 0; i < 2048; i++)
{
printk("%c", cache_data[i]);
}
printk("\n");
result = memcmp(write_data, cache_data, length);
return result;
}
EDIT #2: Sorry guys. The problem was not memcmp. It was the result of memcmp. When ever it returned a positive or negative number, the function that called my function would play with some pointers, one of which was uninitialized. I don't know why I didn't realize it before. Thanks for trying to help though!
I'm no kernel expert, but I would assume you need to keep this memory mapped while doing the comparison? In other words, don't call kunmap until after the memcmp is complete. I would presume that calling it before will result in write_buffer pointing to a page which is no longer mapped.
Taking your code in the other question, here is a rough attempt at incremental. Still needs some cleanup, I'm sure:
static int compare_data(sector_t location, struct bio * bio, struct cache_c * dmc)
{
struct dm_io_region where;
unsigned long bits;
int segno;
struct bio_vec * bvec;
struct page * page;
unsigned char * cache_data;
char * temp_data;
char * write_data;
int length, i;
int result = 0;
size_t position = 0;
size_t max_size = (dmc->block_size * 512) + 1;
cache_data = (unsigned char *)vmalloc(max_size);
where.bdev = dmc->cache_dev->bdev;
where.count = dmc->block_size;
where.sector = location << dmc->block_shift;
printk(KERN_DEBUG "place: %llu\n", where.sector);
dm_io_sync_vm(1, &where, READ, cache_data, &bits, dmc);
bio_for_each_segment(bvec, bio, segno)
{
// Map the page into memory
page = bio_page(bio);
write_data = (char *)kmap(page);
length = bvec->bv_len;
// Make sure we don't go past the end
if(position >= max_size)
break;
if(position + length > max_size)
length = max_size - position;
// Compare the data
result = memcmp(write_data, cache_data + position, length);
position += length;
kunmap(page);
// If the memory is not equal, bail out now and return the result
if(result != 0)
break;
}
cache_data[dmc->block_size * 512] = '\0';
return result;
}
i have the following structure that i use to create a hash table for fingerprints
typedef struct fpinfo
{
unsigned long chunk_offset;
unsigned long chunk_length;
unsigned char fing_print[33];
}fpinfo;
/*
* The following defines one entry in the hash table.
*/
typedef struct Hash_Entry
{
struct Hash_Entry *next; /* Link entries within same bucket. */
unsigned namehash; /* hash value of key */
struct fpinfo fp;
} Hash_Entry;
typedef struct Hash_Table
{
struct Hash_Entry **bucketPtr; /* Buckets in the table */
int numBuckets;
int buck_entry_count[64];//number of entries in each bucket
int size; /* Actual size of array. */
int numEntries; /* Number of entries in the table. */
int mask; /* Used to select bits for hashing. */
} Hash_Table;
I insert fingerprints into it using
int Hash_CreateEntry(Hash_Table *t, struct Hash_Entry he)
{
Hash_Entry *e;
const char *p;
int keylen;
struct Hash_Entry **hp;
unsigned long h = 0, g,i=0;
while ( i<5 )
{
h = ( h ) + he.fp.fing_print[i]++;
g = h & 0xF0000000;
h ^= g >> 24;
h &= ~g;
i++;
}
p =(const char*) he.fp.fing_print;
for (e = t->bucketPtr[h & t->mask]; e != NULL; e = e->next)
{
if (e->namehash == h && strcmp((const char *)(e->fp).fing_print, p) == 0)
{
printf("\n%d \t%s",(e->fp).chunk_length,(e->fp).fing_print);
return (1);
}
}
if (t->numEntries >= rebuildLimit * t->size)
WriteHTtoFile(t);
e = (Hash_Entry *)malloc(sizeof(*e) /*+ keylen*/);
hp = &t->bucketPtr[h & t->mask];
e->next = *hp;
*hp = e;
e->namehash = h;
strcpy((char *)(e->fp).fing_print, p);
t->numEntries++;
t->buck_entry_count[h & t->mask]++;
return (0);
}
The Code I used to write the HT to file is
static void WriteHTtoFile(Hash_Table *t)
{
Hash_Entry *e, *next = NULL, **hp, **xp;
int i=0, mask;
Hash_Entry **oldhp;
int oldsize;
FILE *htfile=fopen("htfile.txt","a");
system("cls");
for ( hp = t->bucketPtr;t->bucketPtr!=NULL;hp=t->bucketPtr++)
{
for (e = *hp;e ->next!= NULL;e = e->next)
fprintf(htfile,"\n%d \t%s",(e->fp).chunk_length,(e->fp).fing_print);
}
fclose(htfile);
}
my problem is (are)
1-it says "Access violation reading location 0xfdfdfe09." after writting a considerable number of times (it wrote 6401 fingerprints). It indicates the faulty line to be the fprintf() in the file writing function.
2- the fingerprints it writes and what i have before writing does not match at all. Actually the hex representation of the fingerprints in the compiler(i am using VC2010) and the one i have which is read by the program are different.
3- the values for chunck_length of all the entries are 3452816845l
I guess the loop in WriteHTtoFile should look more like this:
for (i = 0; i < t->numBuckets; ++i)
{
for (e = t->bucketPtr[i]; e && e->next; e = e->next)
fprintf(htfile, /*...*/);
}
You have more problems than that; this code is hopelessly botched
WriteHTToFile modifies the original hashtable, so you end up with memory leak at least
You use %d format to print out fing_print; it's not at all clear what fing_print is/should be (binary string; ascii string).
Get a good book on C, and get some practice with a debugger.