I'm trying to get the 16 bit CRC CCITT FALSE.
I'm using this page to check it.
http://www.sunshine2k.de/coding/javascript/crc/crc_js.html
And this is my code
unsigned int16 crc16_CCITT(unsigned int8 *data, unsigned int16 len)//CRC16 CCITT False
{
unsigned int16 crc = 0xFFFF;
for(unsigned int16 j = len; j > 0; j--)
{
crc ^= *data++;
for(unsigned int8 i = 0; i < 8; i++)
{
if(crc & 1)
{
//crc = (crc >> 1) ^ 0x8401; // 0x8401 is the reflection of 0x1021
crc = (crc >> 1) ^ 0x1021;
}
else
{
crc >>= 1;
}
}
}
return (crc);
}
As you can see I already tried by reflecting the polynomial and didn't work either.
I don't understand what I'm doing wrong, I already used this routine with the 16bit ARC CRC.(0x8005) and works ok.
Try shifting the bits the other way:
uint16_t crc16_CCITT (unsigned char *ptr, int count)
{
uint16_t crc = 0xffff;
int i = 0;
while (--count >= 0)
{
crc = crc ^ (uint16_t )*ptr++ << 8;
for (i = 0; i < 8; ++i)
{
if (crc & 0x8000)
{
crc = (crc << 1) ^ 0x1021;
}
else
{
crc = crc << 1;
}
}
}
return crc;
}
unsigned int16 and unsigned int8 are ambiguous. It's better to change them to uint16,uint8 or unsigned short,unsigned char. In many header files, int16 is defined as signed short and int8 is defined as signed char.
Related
I am following a painless guide to code correction algorithms. (https://zlib.net/crc_v3.txt) I've managed to write a TABLE algorithm, using extra loop for augmented part (I hope so). I am trying to write a most widely used CRC32 version (with 0x04C11DB7 polynomial), but I can not get the right CRC value.
I've achieved the correct table for CRC32 values with mentioned polynomial.
My code for generating CRC32 (chapter 9 and 10):
#include <stdlib.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#define CRC32_BYTE_POSSIBLE_VALUES 255
#define CRC32_LAST_BIT_MASK 0x80000000
#define CRC32_POLYNOMIAL 0x04C11DB7
uint32_t __crc32_table[CRC32_BYTE_POSSIBLE_VALUES] = { 0 };
void __crc32_fill_crc_table() {
uint32_t reg;
uint8_t byte = 0;
for (;;) {
reg = (byte << 24);
for (uint8_t byte_size = 0; byte_size < 8; byte_size++) {
if (reg & CRC32_LAST_BIT_MASK) {
reg <<= 1;
reg ^= CRC32_POLYNOMIAL;
} else {
reg <<= 1;
}
}
__crc32_table[byte] = reg;
if (byte == 255)
break;
else
byte++;
}
}
void __crc32_print_table(uint32_t *arr) {
printf(" 0x%08X ", arr[0]);
for (uint32_t i = 1; i < 256; i++) {
if (!(i % 8))
printf("\n");
printf(" 0x%08X ", arr[i]);
}
printf("\n");
}
uint8_t inverse_byte(uint8_t byte) {
uint8_t reflected_byte = 0;
for (uint8_t i = 0; i < 8; i++) {
if (byte & (1 << i))
reflected_byte |= (1 << (7 - i));
}
return reflected_byte;
}
uint32_t inverse(uint32_t src) {
uint32_t toret;
for (uint8_t i = 0; i < 32; i++) {
if (src & (1 << i))
toret |= (1 << (31 - i));
}
return toret;
}
uint32_t __crc32_table_approach( unsigned char *data, size_t size) {
uint32_t reg = -1;
uint8_t top_byte;
for (size_t i = 0; i < size; i++) {
top_byte = (uint8_t)(reg >> 24);
reg = (reg << 8) | inverse_byte(data[i]);
reg ^= __crc32_table[top_byte];
}
for (size_t i = 0; i < 4; i++) {
top_byte = (uint8_t) (reg >> 24);
reg = (reg << 8) ;
reg ^= __crc32_table[top_byte];
}
return inverse(reg) ^ -1;
}
uint32_t calc_crc32(unsigned char *data, size_t size) {
if (!__crc32_table[1])
__crc32_fill_crc_table();
__crc32_print_table(__crc32_table);
return __crc32_table_approach(data, size);
}
int main( int argc, char** argv )
{
unsigned char* test = "123456789";
size_t test_len = strlen(test);
uint32_t crc = calc_crc32(test, test_len);
printf("CRC32: 0x%08X", crc);
return 0;
}
The inverse function reverses bits of UINT32 value, and function inverse_byte inverses bits of UINT8 value.
But for the '123456789' string I get the wrong checksum.
Could someone help me? Or give some advice?
Input string: '123456789'
Outputted CRC: CRC32: 0x22016B0A
Desired CRC: CRC32: 0xCBF43926
You made your array one word too short, and so overwrote the allocated memory. It needs to be:
#define CRC32_BYTE_POSSIBLE_VALUES 256
Though that part probably still worked, because C.
You need to initialize the variable you are reversing into:
uint32_t toret = 0;
These lines:
top_byte = (uint8_t)(reg >> 24);
reg = (reg << 8) | inverse_byte(data[i]);
need to be:
top_byte = (uint8_t)(reg >> 24) ^ inverse_byte(data[i]);
reg <<= 8;
and you need to delete these lines:
for (size_t i = 0; i < 4; i++) {
top_byte = (uint8_t) (reg >> 24);
reg = (reg << 8) ;
reg ^= __crc32_table[top_byte];
}
Then you get the right answer.
If you would like to implement the table approach on the augmented message as described in Chapter 9 (requiring another four iterations of the CRC at the end as in your code), then you need to first read these important notes in the document:
Note: The initial register value for this algorithm must be the
initial value of the register for the previous algorithm fed through
the table four times. Note: The table is such that if the previous
algorithm used 0, the new algorithm will too.
To get the same effect as the initial value of 0xffffffff (notably not zero) with the non-augmented message version, which is how that standard CRC is defined, then you'd need to find an initial value such that applying 32 zero bits to it using the CRC gives you 0xffffffff. That value is 0x46af6449, obtained by reversing the CRC bit-wise algorithm:
uint32_t x = -1;
for (unsigned i = 0; i < 32; i++)
x = x & 1 ? ((x ^ 0x4c11db7) >> 1) | 0x80000000 : x >> 1;
Then your code will work if you fix the array size and the toret initialization errors, and simply replace:
uint32_t reg = -1;
with:
uint32_t reg = 0x46af6449;
Either augmented or not, reversing every single input byte as you are doing is a waste of time. You can and should instead just reverse the calculation and the polynomial. See rcgldr's answer.
Example code using right shifting CRC (0xedb88320 is a reflected version of 0x04C11DB7):
#include <iostream>
#include <iomanip>
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
uint32_t crctbl[256];
void gentbl(void)
{
uint32_t crc;
uint32_t c;
uint32_t i;
for(c = 0; c < 0x100; c++){
crc = c;
for(i = 0; i < 8; i++){
crc = (crc & 1) ? (crc >> 1) ^ 0xedb88320 : (crc >> 1);
}
crctbl[c] = crc;
}
}
uint32_t crc32(uint8_t * bfr, size_t size)
{
uint32_t crc = 0xfffffffful;
while(size--)
crc = (crc >> 8) ^ crctbl[(crc & 0xff) ^ *bfr++];
return(crc ^ 0xfffffffful);
}
int main(int argc, char**argv)
{
uint32_t crc;
uint8_t msg[10] = "123456789";
gentbl();
crc = crc32(msg, 9);
std::cout << "crc " << std::hex << std::setw(8) << std::setfill('0') << crc << std::endl;
return(0);
}
I have an array of values in hexadecimal.
I have pre-calculated CRC-CCITT (0x1d0f) of it which is : 0xD7F2
I wrote an alghoritm based on working crc alghoritm wrote in javascript , which I tested with same entry of data (calculation here).
I rewrote it inC, but unfortunatelly, I'm getting different output than expected, actually: 0xB5DB.
So, my question is: is possible to have problem inside alghoritm? Could wrong data types causing problem?
Here is an example with a simple array of size 2. Calculated result by calculator is 0x9770, Result of my alghoritm is 0x5D80.
Calculation alghorithm:
unsigned int crcTest[2] = {0xB6FE,0x8C4A};
int main (void){
unsigned int crc = doCrc(crcTest,2);
printf("Correct CRC16-CCITT is: 0x9770\n");
printf("Calculated result func : 0x%X\n", crc);
return 0;
}
unsigned int doCrc(unsigned int *data, int size)
{
int i, j;
unsigned int crc = 0x1d0f;
for (i = 0; i < size; i++){
unsigned int xr = data[i] << 8;
crc = crc^xr;
for (j = 0; j < 8; j++)
{
if (crc & 0x8000){
crc = (crc << 1);
crc = crc ^ 0x1021;
}
else{
crc = crc << 1;
}
}
}
crc = crc & 0xFFFF;
return crc;
}
Whole source code main.c : Download here
JavaScript code which actually works :
CRC1D0F: function() {
var str = this.CleanedString;
var crc = 0x1d0f;
for (var c = 0; c < str.length; c++) {
crc ^= str.charCodeAt(c) << 8;
for (var i = 0; i < 8; i++) {
if (crc & 0x8000)
crc = (crc << 1) ^ 0x1021;
else
crc = crc << 1;
}
}
return crc & 0xFFFF;
Your code is almost correct:
It should be:
unsigned int doCrc(unsigned char *data, int size)
instead of:
unsigned int doCrc(unsigned int *data, int size)
This works:
#include <stdio.h>
#include <stdlib.h>
unsigned int doCrc(unsigned char *data, int size)
{
int i, j;
unsigned int crc = 0x1d0f;
for (i = 0; i < size; i++) {
unsigned int xr = data[i] << 8;
crc = crc ^ xr;
for (j = 0; j < 8; j++)
{
if (crc & 0x8000) {
crc = (crc << 1);
crc = crc ^ 0x1021;
}
else {
crc = crc << 1;
}
}
}
crc = crc & 0xFFFF;
return crc;
}
unsigned char data[] = "1234567890";
int main(int argc, char *argv[])
{
printf("%0x\n", doCrc(data, strlen(data)));
}
Expected output:
57d8
which is the same as we get here.
I need to create a memory struct for a project in which each word comprises 15 bits. When I check the size of the array I get that it is 2000 bytes in size, I assume it is because compiler byte alignment.
Is there a way to create the struct that it will be 1875 bytes in size?
This is the code I used:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#pragma pack(push,1)
struct myword
{
unsigned int val : 15;
};
struct mymemmory{
struct myword arr[1000];
};
#pragma pack(pop)
int main()
{
int size = sizeof(struct mymemmory);
printf("Size of arr: %d\n",size);
return 0;
}
When I use #pragma pack(push,0) I get that the size is 4000 bytes.
No, there isn't. If you need bit level granularity, you have to implement it yourself, making an array of the 1875 bytes and manually calculating indices and bit masks to extract the desired 15 bits. If you want to stay sane, you factor out this functionality into accessor functions, ideally using C++ or the like where you can make custom classes that abstract away the work entirely (so simple indexing use does all the "real index" and bit shift/mask work behind the scenes; std::vector<bool> already does something quite like this for single bits).
Of course, true sanity is realizing it's silly to quibble over 125 bytes. There are very few scenarios where saving one bit in sixteen for each value (especially for so few values) is worth it, and the ones I can think of (actually needing a compact representation on disk) are still handled better by converting from compact disk representation to expanded memory representation on read, and converted back on write, to avoid the hassle and computational overhead of dealing with all the shifting and masking on every read/write.
Unless you have a machine with 15-bit chars, you'll need to do a lot of bit manipulation to spread your 15-bit values over up to three unsigned chars using shifts and bit masks.
The following code works for machines with CHAR_BIT between 8 and 15 inclusive.
set15_le(mem, index, val) has mem pointing to an array of unsigned char providing the storage for an emulated array 15-bit words, index is the index of a 15-bit word, and val is a 15-bit value to be stored. get15_le(mem, index) returns the 15-bit word from the specified index. The 15-bit words are stored in "little-endian" byte order.
set15_be(mem, index, val) and get15_be(mem, index) are similar to the above, except that the 15-bit words are stored in "big-endian" byte order.
The main function tests both flavors, by storing a set of 15-bit, pseudo-random numbers in the array, reading them back, and checking they match the expected values.
#include <limits.h>
#if CHAR_BIT > 15
#error "Unsupported CHAR_BIT value"
#endif
unsigned short get15_le(const unsigned char *mem, unsigned long index)
{
unsigned long mem_index;
unsigned int mem_bitpos;
unsigned int val_bitpos;
unsigned short val_mask;
unsigned short val;
mem_index = (index * 15) / CHAR_BIT;
mem_bitpos = (index * 15) % CHAR_BIT;
val = 0;
val_bitpos = 0;
val_mask = (1U << 15) - 1;
while (val_mask)
{
unsigned int nbits;
unsigned char mem_mask;
unsigned char mem_byte;
nbits = CHAR_BIT - mem_bitpos;
if (nbits > 15 - val_bitpos)
{
nbits = 15 - val_bitpos;
}
mem_mask = val_mask << mem_bitpos;
mem_byte = mem[mem_index];
mem_byte &= mem_mask;
val |= (mem_byte >> mem_bitpos) << val_bitpos;
mem_bitpos += nbits;
if (mem_bitpos == CHAR_BIT)
{
mem_bitpos = 0;
mem_index++;
}
val_bitpos += nbits;
val_mask >>= nbits;
}
return val;
}
void set15_le(unsigned char *mem, unsigned long index, unsigned short val)
{
unsigned long mem_index;
unsigned int mem_bitpos;
unsigned int val_bitpos;
unsigned short val_mask;
mem_index = (index * 15) / CHAR_BIT;
mem_bitpos = (index * 15) % CHAR_BIT;
val_bitpos = 0;
val_mask = (1U << 15) - 1;
val &= val_mask;
while (val_mask)
{
unsigned int nbits;
unsigned char mem_mask;
unsigned char mem_byte;
nbits = CHAR_BIT - mem_bitpos;
if (nbits > 15 - val_bitpos)
{
nbits = 15 - val_bitpos;
}
mem_mask = val_mask << mem_bitpos;
mem_byte = mem[mem_index];
mem_byte &= ~mem_mask;
mem_byte |= ((val >> val_bitpos) << mem_bitpos) & mem_mask;
mem[mem_index] = mem_byte;
mem_bitpos += nbits;
if (mem_bitpos == CHAR_BIT)
{
mem_bitpos = 0;
mem_index++;
}
val_bitpos += nbits;
val_mask >>= nbits;
}
}
unsigned short get15_be(const unsigned char *mem, unsigned long index)
{
unsigned long mem_index;
unsigned int mem_bitpos;
unsigned int val_bitpos;
unsigned short val_mask;
unsigned short val;
mem_index = (index * 15) / CHAR_BIT;
mem_bitpos = CHAR_BIT - (index * 15) % CHAR_BIT;
val = 0;
val_bitpos = 15;
val_mask = (1U << 15) - 1;
while (val_mask)
{
unsigned int nbits;
unsigned char mem_mask;
unsigned char mem_byte;
nbits = mem_bitpos;
if (nbits > val_bitpos)
{
nbits = val_bitpos;
}
val_bitpos -= nbits;
mem_bitpos -= nbits;
mem_mask = (val_mask >> val_bitpos) << mem_bitpos;
mem_byte = mem[mem_index];
mem_byte &= mem_mask;
val |= (mem_byte >> mem_bitpos) << val_bitpos;
if (mem_bitpos == 0)
{
mem_bitpos = CHAR_BIT;
mem_index++;
}
val_mask >>= nbits;
}
return val;
}
void set15_be(unsigned char *mem, unsigned long index, unsigned short val)
{
unsigned long mem_index;
unsigned int mem_bitpos;
unsigned int val_bitpos;
unsigned short val_mask;
mem_index = (index * 15) / CHAR_BIT;
mem_bitpos = CHAR_BIT - (index * 15) % CHAR_BIT;
val_bitpos = 15;
val_mask = (1U << 15) - 1;
val &= val_mask;
while (val_mask)
{
unsigned int nbits;
unsigned char mem_mask;
unsigned char mem_byte;
nbits = mem_bitpos;
if (nbits > val_bitpos)
{
nbits = val_bitpos;
}
val_bitpos -= nbits;
mem_bitpos -= nbits;
mem_mask = (val_mask >> val_bitpos) << mem_bitpos;
mem_byte = mem[mem_index];
mem_byte &= ~mem_mask;
mem_byte |= ((val >> val_bitpos) << mem_bitpos) & mem_mask;
mem[mem_index] = mem_byte;
if (mem_bitpos == 0)
{
mem_bitpos = CHAR_BIT;
mem_index++;
}
val_mask >>= nbits;
}
}
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
struct mymemory
{
unsigned char content[(1000 * 15 + CHAR_BIT - 1) / CHAR_BIT];
};
int main(void)
{
struct mymemory mem;
unsigned long i;
unsigned short v;
printf("Memory size for 1000 15-bit words = %lu bytes (%lu bits)\n",
(unsigned long)sizeof(mem.content),
(unsigned long)sizeof(mem.content) * CHAR_BIT);
printf("Testing little-endian version\n");
memset(mem.content, 42, sizeof(mem.content));
srand(5);
for (i = 0; i < 1000; i++)
{
v = rand() & ((1U << 15) - 1);
set15_le(mem.content, i, v);
}
srand(5);
for (i = 0; i < 1000; i++)
{
unsigned int w;
v = rand() & ((1U << 15) - 1);
if ((w = get15_le(mem.content, i)) != v)
{
printf("Error at word %lu! got %u, expected %u\n", i, w, v);
break;
}
}
if (i == 1000)
{
printf("Passed!\n");
}
printf("Testing big-endian version\n");
memset(mem.content, 42, sizeof(mem.content));
srand(23);
for (i = 0; i < 1000; i++)
{
v = rand() & ((1U << 15) - 1);
set15_be(mem.content, i, v);
}
srand(23);
for (i = 0; i < 1000; i++)
{
unsigned int w;
v = rand() & ((1U << 15) - 1);
if ((w = get15_be(mem.content, i)) != v)
{
printf("Error at word %lu! got %u, expected %u\n", i, w, v);
break;
}
}
if (i == 1000)
{
printf("Passed!\n");
}
return 0;
}
I have a noob question about casting and shifting. I'm trying to store two elements of uint8_t from an array as a single, signed element in an array of type int16_t. However my result is not correct, and I' don't know why.
I have this code:
uint8_t buffer[BUFFER_SIZE];
int16_t mp3_stereo_buffer[BUFFER_SIZE];
for (i = 0; i < BUFFER_SIZE; i += 2) {
mp3_stereo_buffer[i] = ((uint16_t)buffer[i] << 8) | ((uint16_t)buffer[i + 1]);
}
First, you can pack N uint8_t into N/2 uint16_t elements.
uint8_t buffer[BUFFER_SIZE];
uint16_t mp3_stereo_buffer[BUFFER_SIZE / 2];
Then you need to know if your data is little-endian or big-endian.
For little-endian:
for (i = 0; i < BUFFER_SIZE / 2; i++) {
mp3_stereo_buffer[i] = (uint16_t) (buffer[i*2] | (buffer[i*2+1] << 8));
}
For big-endian:
for (i = 0; i < BUFFER_SIZE / 2; i++) {
mp3_stereo_buffer[i] = (uint16_t) ((buffer[i*2] << 8) | buffer[i*2+1]);
}
p.s. If your data is in fact signed, then you can change the type and the casting to int16_t but beware that the way signed numbers are represented is not guaranteed to be portable.
#define LITTLE_ENDIAN
int main()
{
uint8_t buffer[4]={0x15,0xff,0x63,0xee};
int16_t mp3_stereo_buffer[2];
for (int i = 0; i < 2; i += 1) {
mp3_stereo_buffer[i] = ((int16_t*)buffer)[i];
#ifdef LITTLE_ENDIAN
mp3_stereo_buffer[i]=(mp3_stereo_buffer[i]>>8 &0x00ff) | (mp3_stereo_buffer[i]<<8 & 0xff00);
#endif
printf("%x\n",mp3_stereo_buffer[i]&0xffff);
}
return 0;
}
Let's say I'm given a void* memory address and I need to print the bits located in this memory address. How can I do this?
In my processor memory addresses are 32bits as are memory values, also int are 32 bits.
So I thought of doing this:
unsigned int value = *memory_address;
and then by simple arithmetic (some mod and div operations) to get the bits of the value saved in memory_address.
For example value mod 2 will give last bit of this value and so on. But from what I can tell (I was expecting different bits) it doesn't work. Any ideas why?
Also, is anyone aware of ready C source code that "does" such this, reads/writes bits from memory?
Shift the value by one for each bit and or it with 1
unsigned int value = *((unsigned int*)memory_address);
for( int i = 0; i < 32; i++)
{
printf("%d ", value >> i & 1);
}
You can also do it with math operators. You have to get the bit value (2 to the power of the bit index) and substract that value at each iteration to make sure the modulo doesn't return values that we seen before:
for( int i = 0; i < 32; i++)
{
int bit_value = (int)pow(2,i + 1);
int num_bit_value = value % bit_value;
printf("%d ", num_bit_value ? 1 : 0 );
value -= num_bit_value;
}
int main() {
int a = 0xFFFF;
void * v = &a; // v points to a
int * aPtr = (int *) v; // aPtr also points to a
int b = *aPtr; // b gets the value aPtr points to, aka a or 0xFFFF
int aBit = (b >> 3) & 1; // aBit now contains bit 3 of the original a value
// toggle the bit
if (aBit) {
b &= ~(1 << 3); // set bit 3 to 0
} else {
b |= (1 << 3); // set bit 3 to 1
}
*aPtr = b; // update original a
}
I found it easier to think of the memory as a continuous string of characters rather than a void pointer. This way you can address as many bits as you want.
Here is how I have done it.
unsigned char
get_bit(char *array, int bit)
{
int byte, k;
byte = bit/8;
k = 7 - bit % 8;
return array[byte] & (1 << k);
}
void
set_bit(char *array, int bit, unsigned char value)
{
int byte, k;
byte = bit/8;
k = 7 - bit % 8;
if (value)
array[byte] |= (1 << k);
else
array[byte] &= ~(1 << k);
}
How about:
bool isBit4Set = ((*someAddress) & 0x8 != 0);
(*someAddress) |= 0x8; // Set bit 4
Generic solution for printing bytes and bits.
void dump_data(const void *object, size_t size)
{
int i;
printf("[ \n");
for(i = 0; i < size; i++)
{
if (i%4 ==0)
{
printf("#%02X",&((const unsigned char *) object)[i]);
printf("[ ");
}
printf("%02x ", ((const unsigned char *) object)[i] & 0xff);
if ((i+1)%4 == 0)
printf("]\n");
}
printf("]\n");
printf("BINARY FORMAT\n");
for (i = 0; i < size; i++)
{
printf("#%02X",&((const unsigned char *) object)[i]);
printf("[ ");
unsigned char value = (((unsigned char*)object)[i]);
for(int j=0; j<8; j++)
printf("%d ", (value & (0x80 >> j)) ? 1 : 0); // right shifting the value will print bits in reverse.
printf("]\n");
}
}
bool getBit(void* data,int bit){ return ((*((int*)data)) & 1<<bit); }
void setBit(void* data,int bit,bool set){ if(set){ (*((int*)data)) |= 1<<bit; }else{ (*((int*)data)) &= ~(1<<bit); } }
for simple usage