Crc ccitt (0x1d0f) calculating in C - c

I have an array of values in hexadecimal.
I have pre-calculated CRC-CCITT (0x1d0f) of it which is : 0xD7F2
I wrote an alghoritm based on working crc alghoritm wrote in javascript , which I tested with same entry of data (calculation here).
I rewrote it inC, but unfortunatelly, I'm getting different output than expected, actually: 0xB5DB.
So, my question is: is possible to have problem inside alghoritm? Could wrong data types causing problem?
Here is an example with a simple array of size 2. Calculated result by calculator is 0x9770, Result of my alghoritm is 0x5D80.
Calculation alghorithm:
unsigned int crcTest[2] = {0xB6FE,0x8C4A};
int main (void){
unsigned int crc = doCrc(crcTest,2);
printf("Correct CRC16-CCITT is: 0x9770\n");
printf("Calculated result func : 0x%X\n", crc);
return 0;
}
unsigned int doCrc(unsigned int *data, int size)
{
int i, j;
unsigned int crc = 0x1d0f;
for (i = 0; i < size; i++){
unsigned int xr = data[i] << 8;
crc = crc^xr;
for (j = 0; j < 8; j++)
{
if (crc & 0x8000){
crc = (crc << 1);
crc = crc ^ 0x1021;
}
else{
crc = crc << 1;
}
}
}
crc = crc & 0xFFFF;
return crc;
}
Whole source code main.c : Download here
JavaScript code which actually works :
CRC1D0F: function() {
var str = this.CleanedString;
var crc = 0x1d0f;
for (var c = 0; c < str.length; c++) {
crc ^= str.charCodeAt(c) << 8;
for (var i = 0; i < 8; i++) {
if (crc & 0x8000)
crc = (crc << 1) ^ 0x1021;
else
crc = crc << 1;
}
}
return crc & 0xFFFF;

Your code is almost correct:
It should be:
unsigned int doCrc(unsigned char *data, int size)
instead of:
unsigned int doCrc(unsigned int *data, int size)
This works:
#include <stdio.h>
#include <stdlib.h>
unsigned int doCrc(unsigned char *data, int size)
{
int i, j;
unsigned int crc = 0x1d0f;
for (i = 0; i < size; i++) {
unsigned int xr = data[i] << 8;
crc = crc ^ xr;
for (j = 0; j < 8; j++)
{
if (crc & 0x8000) {
crc = (crc << 1);
crc = crc ^ 0x1021;
}
else {
crc = crc << 1;
}
}
}
crc = crc & 0xFFFF;
return crc;
}
unsigned char data[] = "1234567890";
int main(int argc, char *argv[])
{
printf("%0x\n", doCrc(data, strlen(data)));
}
Expected output:
57d8
which is the same as we get here.

Related

CRC32 - wrong checksum using TABLE algorithm and 04C11DB7 polynomial

I am following a painless guide to code correction algorithms. (https://zlib.net/crc_v3.txt) I've managed to write a TABLE algorithm, using extra loop for augmented part (I hope so). I am trying to write a most widely used CRC32 version (with 0x04C11DB7 polynomial), but I can not get the right CRC value.
I've achieved the correct table for CRC32 values with mentioned polynomial.
My code for generating CRC32 (chapter 9 and 10):
#include <stdlib.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#define CRC32_BYTE_POSSIBLE_VALUES 255
#define CRC32_LAST_BIT_MASK 0x80000000
#define CRC32_POLYNOMIAL 0x04C11DB7
uint32_t __crc32_table[CRC32_BYTE_POSSIBLE_VALUES] = { 0 };
void __crc32_fill_crc_table() {
uint32_t reg;
uint8_t byte = 0;
for (;;) {
reg = (byte << 24);
for (uint8_t byte_size = 0; byte_size < 8; byte_size++) {
if (reg & CRC32_LAST_BIT_MASK) {
reg <<= 1;
reg ^= CRC32_POLYNOMIAL;
} else {
reg <<= 1;
}
}
__crc32_table[byte] = reg;
if (byte == 255)
break;
else
byte++;
}
}
void __crc32_print_table(uint32_t *arr) {
printf(" 0x%08X ", arr[0]);
for (uint32_t i = 1; i < 256; i++) {
if (!(i % 8))
printf("\n");
printf(" 0x%08X ", arr[i]);
}
printf("\n");
}
uint8_t inverse_byte(uint8_t byte) {
uint8_t reflected_byte = 0;
for (uint8_t i = 0; i < 8; i++) {
if (byte & (1 << i))
reflected_byte |= (1 << (7 - i));
}
return reflected_byte;
}
uint32_t inverse(uint32_t src) {
uint32_t toret;
for (uint8_t i = 0; i < 32; i++) {
if (src & (1 << i))
toret |= (1 << (31 - i));
}
return toret;
}
uint32_t __crc32_table_approach( unsigned char *data, size_t size) {
uint32_t reg = -1;
uint8_t top_byte;
for (size_t i = 0; i < size; i++) {
top_byte = (uint8_t)(reg >> 24);
reg = (reg << 8) | inverse_byte(data[i]);
reg ^= __crc32_table[top_byte];
}
for (size_t i = 0; i < 4; i++) {
top_byte = (uint8_t) (reg >> 24);
reg = (reg << 8) ;
reg ^= __crc32_table[top_byte];
}
return inverse(reg) ^ -1;
}
uint32_t calc_crc32(unsigned char *data, size_t size) {
if (!__crc32_table[1])
__crc32_fill_crc_table();
__crc32_print_table(__crc32_table);
return __crc32_table_approach(data, size);
}
int main( int argc, char** argv )
{
unsigned char* test = "123456789";
size_t test_len = strlen(test);
uint32_t crc = calc_crc32(test, test_len);
printf("CRC32: 0x%08X", crc);
return 0;
}
The inverse function reverses bits of UINT32 value, and function inverse_byte inverses bits of UINT8 value.
But for the '123456789' string I get the wrong checksum.
Could someone help me? Or give some advice?
Input string: '123456789'
Outputted CRC: CRC32: 0x22016B0A
Desired CRC: CRC32: 0xCBF43926
You made your array one word too short, and so overwrote the allocated memory. It needs to be:
#define CRC32_BYTE_POSSIBLE_VALUES 256
Though that part probably still worked, because C.
You need to initialize the variable you are reversing into:
uint32_t toret = 0;
These lines:
top_byte = (uint8_t)(reg >> 24);
reg = (reg << 8) | inverse_byte(data[i]);
need to be:
top_byte = (uint8_t)(reg >> 24) ^ inverse_byte(data[i]);
reg <<= 8;
and you need to delete these lines:
for (size_t i = 0; i < 4; i++) {
top_byte = (uint8_t) (reg >> 24);
reg = (reg << 8) ;
reg ^= __crc32_table[top_byte];
}
Then you get the right answer.
If you would like to implement the table approach on the augmented message as described in Chapter 9 (requiring another four iterations of the CRC at the end as in your code), then you need to first read these important notes in the document:
Note: The initial register value for this algorithm must be the
initial value of the register for the previous algorithm fed through
the table four times. Note: The table is such that if the previous
algorithm used 0, the new algorithm will too.
To get the same effect as the initial value of 0xffffffff (notably not zero) with the non-augmented message version, which is how that standard CRC is defined, then you'd need to find an initial value such that applying 32 zero bits to it using the CRC gives you 0xffffffff. That value is 0x46af6449, obtained by reversing the CRC bit-wise algorithm:
uint32_t x = -1;
for (unsigned i = 0; i < 32; i++)
x = x & 1 ? ((x ^ 0x4c11db7) >> 1) | 0x80000000 : x >> 1;
Then your code will work if you fix the array size and the toret initialization errors, and simply replace:
uint32_t reg = -1;
with:
uint32_t reg = 0x46af6449;
Either augmented or not, reversing every single input byte as you are doing is a waste of time. You can and should instead just reverse the calculation and the polynomial. See rcgldr's answer.
Example code using right shifting CRC (0xedb88320 is a reflected version of 0x04C11DB7):
#include <iostream>
#include <iomanip>
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
uint32_t crctbl[256];
void gentbl(void)
{
uint32_t crc;
uint32_t c;
uint32_t i;
for(c = 0; c < 0x100; c++){
crc = c;
for(i = 0; i < 8; i++){
crc = (crc & 1) ? (crc >> 1) ^ 0xedb88320 : (crc >> 1);
}
crctbl[c] = crc;
}
}
uint32_t crc32(uint8_t * bfr, size_t size)
{
uint32_t crc = 0xfffffffful;
while(size--)
crc = (crc >> 8) ^ crctbl[(crc & 0xff) ^ *bfr++];
return(crc ^ 0xfffffffful);
}
int main(int argc, char**argv)
{
uint32_t crc;
uint8_t msg[10] = "123456789";
gentbl();
crc = crc32(msg, 9);
std::cout << "crc " << std::hex << std::setw(8) << std::setfill('0') << crc << std::endl;
return(0);
}

Trying to remove a value from a set which is constructed using bitvector

Im trying to remove value from a set but can't get it to work
this is the struct
struct set {
int capacity;
int size;
char *array;
};
This is how i insert the values in to the set
void set_insert(const int value, set *s)
{
if (!set_member_of(value, s))
{
int bit_in_array = value; // To make the code easier to read
// Increase the capacity if necessary
if (bit_in_array >= s->capacity)
{
int no_of_bytes = bit_in_array / 8 + 1;
s->array = realloc(s->array, no_of_bytes);
for (int i = s->capacity / 8 ; i < no_of_bytes ; i++)
{
s->array[i] = 0;
}
s->capacity = no_of_bytes * 8;
}
// Set the bit
int byte_no = bit_in_array / 8;
int bit = 7 - bit_in_array % 8;
s->array[byte_no] = s->array[byte_no] | 1 << bit;
s->size++;
}
}
This is how i've tried to remove the values. I don't know why but
it completely ruins the set and assigns different values to the
entire array
void set_remove(const int value, set *const s)
{
int byte_no = value / 8;
if(set_member_of(value, s))
{
s->array[byte_no] = 0;
s->size--;
}
}
You didn't post set_member_of, so I had to synthesize it.
The main issue with set_remove is that it is zeroing out all bits in the given byte. You want to and against the complement of the bit mask. So, change this:
s->array[byte_no] = 0;
Into:
s->array[byte_no] &= ~mask;
When I do bit masks, I like to use macros instead of shifts/divides/etc in multiple places.
In set_insert, I think it's easier to make capacity be number of bytes rather than number of bits.
Although char for the array type works, using unsigned char is probably better.
Note that as you define set, under C, it is not a type.
Anyway, here is the refactored code. I've compiled it but not tested it:
#include <stdlib.h>
typedef unsigned char atom;
typedef struct set {
int capacity;
int size;
atom *array;
} set;
#define INDEX(_bitno) ((_bitno) / 8)
#define MASK(_bitno) (7 - ((_bitno) % 8))
int
set_member_of(int bitno,set *s)
{
int byte_no = INDEX(bitno);
atom mask = MASK(bitno);
atom match = 0;
if (byte_no < s->capacity)
match = s->array[byte_no] & mask;
return match;
}
void
set_insert(int bitno, set *s)
{
if (! set_member_of(bitno, s)) {
int newcap = INDEX(bitno + 8);
// Increase the capacity if necessary
if (newcap > s->capacity) {
s->array = realloc(s->array, newcap);
// zero out any new cells
for (int i = s->capacity; i < newcap; ++i)
s->array[i] = 0;
s->capacity = newcap;
}
atom mask = MASK(bitno);
int byte_no = INDEX(bitno);
s->array[byte_no] |= mask;
s->size++;
}
}
void
set_remove(int bitno, set *s)
{
if (set_member_of(bitno, s)) {
int byte_no = INDEX(bitno);
atom mask = MASK(bitno);
s->array[byte_no] &= ~mask;
s->size--;
}
}

Can't get the right CRC CCiTT in C

I'm trying to get the 16 bit CRC CCITT FALSE.
I'm using this page to check it.
http://www.sunshine2k.de/coding/javascript/crc/crc_js.html
And this is my code
unsigned int16 crc16_CCITT(unsigned int8 *data, unsigned int16 len)//CRC16 CCITT False
{
unsigned int16 crc = 0xFFFF;
for(unsigned int16 j = len; j > 0; j--)
{
crc ^= *data++;
for(unsigned int8 i = 0; i < 8; i++)
{
if(crc & 1)
{
//crc = (crc >> 1) ^ 0x8401; // 0x8401 is the reflection of 0x1021
crc = (crc >> 1) ^ 0x1021;
}
else
{
crc >>= 1;
}
}
}
return (crc);
}
As you can see I already tried by reflecting the polynomial and didn't work either.
I don't understand what I'm doing wrong, I already used this routine with the 16bit ARC CRC.(0x8005) and works ok.
Try shifting the bits the other way:
uint16_t crc16_CCITT (unsigned char *ptr, int count)
{
uint16_t crc = 0xffff;
int i = 0;
while (--count >= 0)
{
crc = crc ^ (uint16_t )*ptr++ << 8;
for (i = 0; i < 8; ++i)
{
if (crc & 0x8000)
{
crc = (crc << 1) ^ 0x1021;
}
else
{
crc = crc << 1;
}
}
}
return crc;
}
unsigned int16 and unsigned int8 are ambiguous. It's better to change them to uint16,uint8 or unsigned short,unsigned char. In many header files, int16 is defined as signed short and int8 is defined as signed char.

Bit field structure arrays in C

How can I create a bit-field array with variable size?
The following code is what I tried, which didn't work.
#include <stdio.h>
int main()
{
int n=4;
struct bite{
unsigned a1:2;
unsigned a2:2;
:
:
unsigned a(n-1):2;
unsigned a(n):2;
}bits;
for(i=1;i<=n;i++)
bits.a[i]=i;
for(i=1;i<=n;i++)
printf("%d ",bits.a[i]);
return 0;
}
The members of a struct cannot be defined at runtime.
You could simulate a bit array using a char array and some macros.
#define BitArray(array, bits) \
unsigned char array[bits / 8 + 1]
#define SetBit(array, n) \
do { array[n / 8] |= (1 << (n % 8)) } while (0)
#define GetBit(array, n) \
((array[n / 8] >> (n % 8)) & 1)
int main(void)
{
BitArray(bits, 42); /* Define 42 bits and init to all 0s
(in fact this allocates memory for (42/8 + 1) * 8 bits). */
SetBit(bits, 2); /* Set bit 2. */
int bit2 = GetBit(bits, 2); /* Get bit 2 */
...
Similar for 2-bit words are per your code:
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#define Define2BitWordArray(array, two_bit_words) \
unsigned char array[two_bit_words / 4 + 1]
#define Set2BitWord(array, n, value) \
do { array[n / 4] |= (unsigned char)((value & 0x11) << (2 * (n % 4))); } while (0)
#define Get2BitWord(array, n) \
((array[n / 4] >> (2 * (n % 4))) & 0x11)
int main(void)
{
size_t n = 10;
Define2BitWordArray(bits, n); /* Define 10 two-bits words
(in fact this allocates memory
for (10/4 + 1) * 4 two-bit bits). */
memset(bits, 0, sizeof bits); /* Set all bits to 0. */
for(size_t i = 0; i < n; i++) /* C array's indexes are 0-based. */
{
Set2BitWord(bits, i, i);
}
for(size_t i = 0; i < n; i++)
{
printf("%d ", Get2BitWord(bits, i));
}
}
Remember your array always starts at 0 position.
You can do this:
#include <stdio.h>
typedef struct
{
unsigned int x : 1;
} one;
int main() {
int n = 10;
one xx[n];
int i;
for(i=0;i<n;i++)
xx[i].x=i;
for(i=0;i<n;i++)
printf("%d ",xx[i].x);
return 0;
}
if you want to operate on the particular bit or bits on data which has not limited size (actually limited to the max value of the size_t divided by 8). Little endian in this example
void setbit(void *obj, size_t bit)
{
uint8_t *p = obj;
p[bit >> 3] |= (1 << (bit & 7));
}
void resetbit(void *obj, size_t bit)
{
uint8_t *p = obj;
p[bit >> 3] &= ~(1 << (bit & 7));
}
void assignbit(void *obj, size_t bit, unsigned value)
{
uint8_t *p = obj;
p[bit >> 3] &= ~(1 << (bit & 7));
p[bit >> 3] != (!!value << (bit & 7));
}
void setbits(void *obj, size_t bit, size_t num)
{
while (num--)
setbit(obj, bit + num);
}
void resetbits(void *obj, size_t bit, size_t num)
{
while (num--)
resetbit(obj, bit + num);
}
void assignbits_slow(void *obj, size_t pos, size_t size, unsigned value)
{
for (size_t i = 1, j = 0; j < size; j++, i <<= 1)
assignbit(obj, pos + j, !!(value & i));
}
int getbit(void *obj, size_t bit)
{
uint8_t *p = obj;
return !!(p[bit >> 3] & (1 << (bit & 7)));
}
void *getbits(void *obj, void *buff, size_t bit, size_t nbits)
{
memset(buff, 0, nbits >> 3 + !!(nbits & 7));
do
{
nbits--;
assignbit(buff, bit + nbits, getbit(obj, bit + nbits));
} while (nbits);
return buff;
}

Reading/Writing bits in memory

Let's say I'm given a void* memory address and I need to print the bits located in this memory address. How can I do this?
In my processor memory addresses are 32bits as are memory values, also int are 32 bits.
So I thought of doing this:
unsigned int value = *memory_address;
and then by simple arithmetic (some mod and div operations) to get the bits of the value saved in memory_address.
For example value mod 2 will give last bit of this value and so on. But from what I can tell (I was expecting different bits) it doesn't work. Any ideas why?
Also, is anyone aware of ready C source code that "does" such this, reads/writes bits from memory?
Shift the value by one for each bit and or it with 1
unsigned int value = *((unsigned int*)memory_address);
for( int i = 0; i < 32; i++)
{
printf("%d ", value >> i & 1);
}
You can also do it with math operators. You have to get the bit value (2 to the power of the bit index) and substract that value at each iteration to make sure the modulo doesn't return values that we seen before:
for( int i = 0; i < 32; i++)
{
int bit_value = (int)pow(2,i + 1);
int num_bit_value = value % bit_value;
printf("%d ", num_bit_value ? 1 : 0 );
value -= num_bit_value;
}
int main() {
int a = 0xFFFF;
void * v = &a; // v points to a
int * aPtr = (int *) v; // aPtr also points to a
int b = *aPtr; // b gets the value aPtr points to, aka a or 0xFFFF
int aBit = (b >> 3) & 1; // aBit now contains bit 3 of the original a value
// toggle the bit
if (aBit) {
b &= ~(1 << 3); // set bit 3 to 0
} else {
b |= (1 << 3); // set bit 3 to 1
}
*aPtr = b; // update original a
}
I found it easier to think of the memory as a continuous string of characters rather than a void pointer. This way you can address as many bits as you want.
Here is how I have done it.
unsigned char
get_bit(char *array, int bit)
{
int byte, k;
byte = bit/8;
k = 7 - bit % 8;
return array[byte] & (1 << k);
}
void
set_bit(char *array, int bit, unsigned char value)
{
int byte, k;
byte = bit/8;
k = 7 - bit % 8;
if (value)
array[byte] |= (1 << k);
else
array[byte] &= ~(1 << k);
}
How about:
bool isBit4Set = ((*someAddress) & 0x8 != 0);
(*someAddress) |= 0x8; // Set bit 4
Generic solution for printing bytes and bits.
void dump_data(const void *object, size_t size)
{
int i;
printf("[ \n");
for(i = 0; i < size; i++)
{
if (i%4 ==0)
{
printf("#%02X",&((const unsigned char *) object)[i]);
printf("[ ");
}
printf("%02x ", ((const unsigned char *) object)[i] & 0xff);
if ((i+1)%4 == 0)
printf("]\n");
}
printf("]\n");
printf("BINARY FORMAT\n");
for (i = 0; i < size; i++)
{
printf("#%02X",&((const unsigned char *) object)[i]);
printf("[ ");
unsigned char value = (((unsigned char*)object)[i]);
for(int j=0; j<8; j++)
printf("%d ", (value & (0x80 >> j)) ? 1 : 0); // right shifting the value will print bits in reverse.
printf("]\n");
}
}
bool getBit(void* data,int bit){ return ((*((int*)data)) & 1<<bit); }
void setBit(void* data,int bit,bool set){ if(set){ (*((int*)data)) |= 1<<bit; }else{ (*((int*)data)) &= ~(1<<bit); } }
for simple usage

Resources