ZIP 2.0 decryption produces unexpected results - c

I am trying to implement the 'Traditional PKWARE Decryption' described in Section 6.1 of the .ZIP File Format Specification (https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT), but the results are not as expected.
For testing, I am using a small text file zipped with TUGZip using no compression.
The CRC-32 of the original file is 0x7259B728.
The 12 byte 'encryption header' in the .ZIP file contains E6-8B-FE-EB-40-28-E1-55-4E-44-28-E0
The 'password' is 61-7A-62-79-63 (azbyc).
Encryption header after decryption contains C3-7D-EE-4C-11-B3-39-A3-97-55-21-B3
Section 6.1.6 of the specification says:
"After the header is decrypted, the last 1 or 2 bytes in Buffer should be the high-order word/byte of the CRC for the file being decrypted ...."
so I would expect 0x72, but I get 0xB3.
As far as I can tell, I followed the spec exactly, but I am still quite new to C so I recreated the program in C#, and got the same results. I'm obviously missing or misunderstanding something. Any help would be greatly appreciated.
Thank you in advance.
zip.c
//struct ZIP_INFO contains member 'uint32_t *keys'
//
//At initialisation, the values are:
//keys[0] = 0x12345678
//keys[1] = 0x23456789
//keys[2] = 0x34567890
unsigned char zip_decrypt_byte(uint32_t *keys)
{
uint16_t u = (uint16_t)((keys[2] & 0xFFFF) | 0x02);
return (unsigned char)(((u * (u ^ 0x01)) >> 8) & 0xFF);
}
void zip_decrypt_bytes(ZIP_INFO *info, unsigned char *buf, size_t len)
{
unsigned char *u = buf;
while (len--)
{
*u ^= zip_decrypt_byte(info->keys);
zip_update_keys(info->keys, u++);
}
}
void zip_set_password(ZIP_INFO *info, unsigned char *password, size_t len)
{
unsigned char *u = password;
while (len--)
zip_update_keys(info->keys, u++);
}
void zip_update_keys(uint32_t *keys, unsigned char *c)
{
unsigned char u;
keys[0] = crc_32(keys[0], c, 1);
keys[1] += (keys[0] & 0xFF);
keys[1] = ((keys[1] * 134775813) + 1);
u = (unsigned char)((keys[1] >> 24) & 0xFF);
keys[2] = crc_32(keys[2], &u, 1);
}
crc.c
static uint32_t crc_table[256];
uint32_t crc_32(uint32_t crc, unsigned char *buf, size_t len)
{
unsigned char *p;
crc ^= 0xFFFFFFFF;
p = buf;
while (len--)
crc = (crc_table[((crc ^ *p++) & 0xFF)] ^ (crc >> 8));
return (crc ^ 0xFFFFFFFF);
}
void crc_initialise()
{
uint32_t u0, u1, u2;
for (u0 = 0; u0 < 256; u0++)
{
u1 = u0;
for (u2 = 0; u2 < 8; u2++)
{
if (u1 & 0x01)
u1 = ((u1 >> 1) ^ 0xEDB88320);
else
u1 >>= 1;
}
crc_table[u0] = u1;
}
}
Contents of 'test.zip'
50-4B-03-04-14-00-01-00-00-00-F9-1E-55-47-28-B7-59-72-2B-00-00-00-1F-00-00-00-08-00-00-00-74-65-78-74-2E-74-78-74-E6-8B-FE-EB-40-28-E1-55-4E-44-28-E0-15-8E-13-49-CD-B0-B7-E4-43-40-9D-BA-5D-41-A8-BD-EA-47-07-AD-0E-4A-2F-BA-BD-1F-55-94-D3-6E-77-50-4B-01-02-14-00-14-00-01-00-00-00-F9-1E-55-47-28-B7-59-72-2B-00-00-00-1F-00-00-00-08-00-00-00-00-00-00-00-01-00-20-00-00-00-00-00-00-00-74-65-78-74-2E-74-78-74-50-4B-05-06-00-00-00-00-01-00-01-00-36-00-00-00-51-00-00-00-00-00

Related

Swap alternate bytes in a integer

Problem: swap alternate bytes as below:
Input: uint8_t buf[4] = {0xab,0xcd,0xef,0xba};
Output: 0xcdababef
I have the below code for doing that but I am wondering if there is any better way to shorten the code.
#include <stdint.h>
#define SWAP_16(buf) (((buf & 0xFF00) >> 8) | ((buf & 0x00FF) << 8))
int main()
{
unsigned int value;
int i, j=0;
uint8_t buf[4] = {0,4,0,0};
unsigned int mask = 0xFFFF;
unsigned int tmp_value;
unsigned int size = 4;
for (i = size - 1 ;i >= 0; i--) {
tmp_value |= (buf[j] << 8*i);
j++;
}
value = SWAP_16((tmp_value & (mask << 16)) >> 16) << 16 |
SWAP_16(tmp_value & mask);
return 0;
}
Assuming unsigned int is 32-bits, you can simply use:
value = ((value & 0xff00ff00) >> 8) | ((value & 0x00ff00ff) << 8);
to swap the bytes in each pair of bytes in value. It's similar to your SWAP_16() macro except that it does both halves of the value at once.
unsigned int forward = 0x12345678;
unsigned int reverse;
unsigned char *f = &forward;
unsigned char *r = &reverse;
r[0]=f[3];
r[1]=f[2];
r[2]=f[1];
r[3]=f[0];
now reverse will be 0x78563412
Here is one way:
#include <stdio.h>
#include <stdint.h>
int main(void)
{
uint8_t buf[4] = {0xab,0xcd,0xef,0xba};
unsigned int out = buf[1] * 0x1000000u + buf[0] * 0x10000u + buf[3] * 0x100u + buf[2];
printf("%x\n", out);
}
It's not immediately clear from your question if it's not an option, but you could merely just swap the bytes in the array if you know the size won't change:
#include <stdio.h>
#include <stdint.h>
#define SWAPPED(b) { b[1], b[0], b[3], b[2] }
#define PRINT(b) printf("0x0%x\n", *((uint32_t*)b));
int main()
{
uint8_t buf[4] = {8,4,6,1};
uint8_t swapped[4] = SWAPPED(buf);
PRINT(buf);
PRINT(swapped);
return 0;
}
The output for this on my machine is:
0x01060408
0x06010804
This is because of endian-ness and printing an array casted to an integer type, but the bytes are swapped as you ask in your question.
Hope that helps.
Use a union
#include <stdint.h>
#define SWAP_VAR(T, v1, v2) do { \
T v = (v1); \
(v1) = (v2); \
(v2) = v; \
} while (0);
union U32
{
uint32_t u;
unsigned char a[4];
};
uint32_t swap32(uint32_t u)
{
union U32 u32 = {u};
SWAP_VAR(unsigned char, u32.a[0], u32.a[1]);
SWAP_VAR(unsigned char, u32.a[2], u32.a[3]);
return u32.u;
}
Use it like this:
#include <stdint.h>
uint32_t swap32(uint32_t u);
int main(void)
{
uint32_t u = 0x12345678;
u = swap32(u);
}
unsigned int n = ((unsigned int)buf[0] << 16) |
((unsigned int)buf[1] << 24) |
((unsigned int)buf[2] << 0) |
((unsigned int)buf[3] << 8);

reading big-endian files in little-endian system

I have a data file that I need to read in C. It is compirsed of alternating 16-bit integer stored in binary form, and I need only the first column (ie, every other entry starting at 0)
I have a simple python script that reads the files accurately:
import numpy as np
fname = '[filename]'
columntypes = np.dtype([('curr_pA', '>i2'),('volts', '>i2')])
test = np.memmap(fname, dtype=columntypes,mode='r')['curr_pA']
I want to port this to C. Because my machine is natively little-endian I need to manually perform the byte swap. Here's what I have done:
void swapByteOrder_int16(double *current, int16_t *rawsignal, int64_t length)
{
int64_t i;
for (i=0; i<length; i++)
{
current[i] = ((rawsignal[2*i] << 8) | ((rawsignal[2*i] >> 8) & 0xFF));
}
}
int64_t read_current_int16(FILE *input, double *current, int16_t *rawsignal, int64_t position, int64_t length)
{
int64_t test;
int64_t read = 0;
if (fseeko64(input,(off64_t) position*2*sizeof(int16_t),SEEK_SET))
{
return 0;
}
test = fread(rawsignal, sizeof(int16_t), 2*length, input);
read = test/2;
if (test != 2*length)
{
perror("End of file reached");
}
swapByteOrder_int16(current, rawsignal, length);
return read;
}
In the read_current_int16 function I use fread to read a large chunk of data (both columns) into rawsignal array. I then call swapByteOrder_int16 to pick off every other value, and swap its bytes around. I then cast the result to double and store it in current.
It doesn't work. I get garbage as the output in the C code. I think I've been starting at it for too long and can no longer see my own errors. Can anyone spot anything glaringly wrong?
Perform the endian swap as unsigned math and then assign to double.
void swapByteOrder_int16(double *current, const int16_t *rawsignal, size_t length) {
for (size_t i = 0; i < length; i++) {
int16_t x = rawsignal[2*i];
x = (x*1u << 8) | (x*1u >> 8);
current[i] = x;
}
}
I prefer this mask and shift combination:
current[i] = ((rawsignal[2*i] & 0x00ff) << 8) | (rawsignal[2*i] >> 8)
As suggested by several people, doing the shifts as unsigned does the trick. I am answering this with my implementation just for the sake of completeness since I tweaked it a little from the accepted answer:
void swapByteOrder_int16(double *current, uint16_t *rawsignal, int64_t length)
{
union int16bits bitval;
int64_t i;
for (i=0; i<length; i++)
{
bitval.bits = rawsignal[2*i];
bitval.bits = (bitval.bits << 8) | (bitval.bits >> 8);
current[i] = (double) bitval.currentval;
}
}
union int16bits
{
uint16_t bits;
int16_t currentval;
};
Swapping bits with unsigned types will make things much easier:
void swapByteOrder_int16(double *current, void const *rawsignal_, size_t length)
{
uint16_t const *rawsignal = rawsignal_;
size_t i;
for (i=0; i<length; i++)
{
uint16_t tmp = rawsignal[2*i];
tmp = ((tmp >> 8) & 0xffu) | ((tmp << 8) & 0xff00u);
current[i] = (int16_t)(tmp);
}
}
NOTE: when rawsignal is not aligned, you have to memcpy() it.

32 Bit CRC calculation not matching with online generator

Can somebody help me in this calculation of 32 bit CRC.
This is the peice of code I used for 32 bit CRC calculation.
static unsigned int crc32_table[256];
void make_crc_table()
{
int j;
unsigned int crc,byte, mask;
/* Set up the table, if necessary. */
if (crc32_table[1] == 0)
{
for (byte = 0; byte <= 255; byte++)
{
crc = byte;
for (j = 7; j >= 0; j--) // Do eight times
{
mask = -(crc & 1);
crc = (crc >> 1) ^ (0xEDB88320 & mask);
}
crc32_table[byte] = crc;
}
}
for (j=0;j<10;j++)
printf("crc32_table[%d] = %x\n",j,crc32_table[j]);
}
unsigned int crc32cx(unsigned int crc,unsigned char *message,int len)
{
unsigned int word;
do
{
if((word = *(unsigned int *)message) & 0xFF)
{
crc = crc ^ word;
crc = (crc >> 8) ^ crc32_table[crc & 0xFF];
crc = (crc >> 8) ^ crc32_table[crc & 0xFF];
crc = (crc >> 8) ^ crc32_table[crc & 0xFF];
crc = (crc >> 8) ^ crc32_table[crc & 0xFF];
message = message + 4;
len--;
}
}while(len == 0);
return ~crc;
}
main()
{
unsigned int crc = 0xFFFFFFFF;
unsigned char buff[100] = ABCDEFGH;
int len; // lenght in bytes
len = (((strlen(buff)%8)==0) ? (strlen(buff)/8) : ((strlen(buff)/8)+1));
printf("lenght in bytes %d\n",len);
make_crc_table();
printf("crc = %x\n",crc32cx(crc,buff,len));
}
Can somebody help me why this is not matching with online 32 bit CRC calculator. Link given below
http://www.tahapaksu.com/crc/
For input buff=12345678, my CRC is getting matched with the online one.
For other values like buff = ABCD1234, the output is not matching.
Thanks.
The problem here is the way the code is written; let me explain:
unsigned int crc32cx(unsigned int crc,unsigned char *message,int len)
{
unsigned int word;
do
{
if((word = *(unsigned int *)message) & 0xFF)
{
crc = crc ^ word;
crc = (crc >> 8) ^ crc32_table[crc & 0xFF];
crc = (crc >> 8) ^ crc32_table[crc & 0xFF];
crc = (crc >> 8) ^ crc32_table[crc & 0xFF];
crc = (crc >> 8) ^ crc32_table[crc & 0xFF];
message = message + 4;
len--;
}
}while(len == 0);
return ~crc;
}
What this function does is to read 4 chars at a time and them compute the CRC (XOR operation); Wikipedia explains the math behind it.
But you do this operation len times
unsigned char buff[100] = ABCDEFGH;
int len; // lenght in bytes
printf("crc = %x\n",crc32cx(crc,buff,4));
So in your case, you will read 4x4 bytes; And your buffer will contain:
buff = ['A' 'B' 'C' 'D' 'E' 'F' 'G' 'H' '\n' 'trash' 'trash'.... ]
You have 8 bytes of information, followed by a '\n' since you are assigning the buffer a string, than trash, since the buffer is allocated on the stack. And you are reading 16 bytes.
I'm sure you can spot the problem by now, but just in case, I think that crc32cx(crc,buff,2) should solve your issue.
Your CRC code is very nonstandard. When doing a table method you are suppose to input the data byte by byte, not 4 byte chunk by chunk which is definitely causing some input and logic issues. The biggest one being this line if(word = *(unsigned int *)message) & 0xFF) which is completely unnecessary and will ignore valid incoming data in some scenarios.
A nice, simple, and clean crc32 C implementation can be seen here. After looking at it and yours and making a few tweaks it worked.
In your function you would change the loop and your variable to:
unsigned char word;
do
{
word = *message;
crc = crc ^ word;
crc = (crc >> 8) ^ crc32_table[crc & 0xFF];
message++;
len--;
}while(len > 0);
And now in your main you can find the length of your input data just using len = strlen(buff).

Write and read an integer in a char *

I'm coding a little server in c (a chat server) and i want to write and read an integer (and other type of variable like short int, unsigned int blablabla) in my char *data.
I have a structure DataOutput :
typedef struct t_dataoutput
{
char *data;
unsigned int pos;
} DataOutput;
And i have a function to write an int :
int writeInt(DataOutput *out, int i)
{
// here i resize my char *data
out->data[out->pos] = (i >> 24);
out->data[out->pos + 1] = (i >> 16) & 0xff;
out->data[out->pos + 2] = (i >> 8) & 0xff;
out->data[out->pos + 3] = i & 0xff;
out->pos += 4;
}
In my main function i want to try my code :
int main()
{
DataOutput out;
out.writeInt(&out, 9000);
printf("%d\n", (out.data[0] << 24) | (out.data[1] << 16) | (out.data[2] << 8) | (out.data[3]));
}
But the result is not good ... Why ? I don't understand :(
Sorry for my english i'm french ^^ !
Thx for your help !
The DataOutput.data should be forced to unsigned char because char are sometimes signed, depending of the compiler, and shifting signed chars or casting them to int (even implicitly) will propagate the sign bit:
typedef struct t_dataoutput
{
unsigned char *data;
unsigned int pos;
} DataOutput;
The memory for out->data has to be allocated before filled.
You can use realloc in the writeInt function like that:
int writeInt(DataOutput *out, int i)
{
// here i resize my char *data
out->data = realloc(out->data, out->pos + 4);
// TODO: test if out->data == NULL --> not enough memory!
out->data[out->pos] = (i >> 24) & 0xff;
out->data[out->pos + 1] = (i >> 16) & 0xff;
out->data[out->pos + 2] = (i >> 8) & 0xff;
out->data[out->pos + 3] = i & 0xff;
out->pos += 4;
}
Of course I initialize my structure with this function :
void initDataOutput(DataOutput *out)
{
out->data = NULL;
out->pos = 0;
}
But i can't do that :
DataOutput out;
char tmp[4];
out.data = tmp;
out.pos = 0;
Because in my code i could write things like that for example :
DataOutput out;
writeInt(&out, 1);
writeString(&out, "Hello");
sendData(&out);
where the int is a kind of packet id and "Hello" is the connexion message, but if it's an other id it's not the same information in out->data
Oh sorry and I don't understand what you mean when you say : avoid shifting signed integers and chars
I must use an unsigned char *data in my DataOutput structure ?

Converting number to byte array

Hi I have a base 10 number for example 3198, and the hex representation is 0x0C7E
How do I convert that number to hex and put that hex value in a byte array in the format of [00][0C][7E], assuming the biggest hex value i can have is 0xffffff.
Maybe this will work ?
uint32_t x = 0x0C7E;
uint8_t bytes[3];
bytes[0] = (x >> 0) & 0xFF;
bytes[1] = (x >> 8) & 0xFF;
bytes[2] = (x >> 16) & 0xFF;
/* Go back. */
x = (bytes[2] << 16) | (bytes[1] << 8) | (bytes[0] << 0);
Number is already a continuous memory block - no need to convert it to yet ANOTHER array ! Just fetch separate bytes by using pointer arithmetic:
EDIT: Edited to be endianness-independent
#define FAST_ABS(x) ((x ^ (x>>31)) - (x>>31))
int is_big_endian(void)
{
union {
uint32_t i;
char c[4];
} bint = {0x01020304};
return bint.c[0] == 1;
}
uint32_t num = 0xAABBCCDD;
uint32_t N = is_big_endian() * 3;
printf("first byte 0x%02X\n"
"second byte 0x%02X\n"
"third byte 0x%02X\n"
"fourth byte 0x%02X\n",
((unsigned char *) &num)[FAST_ABS(3 - N)],
((unsigned char *) &num)[FAST_ABS(2 - N)],
((unsigned char *) &num)[FAST_ABS(1 - N)],
((unsigned char *) &num)[FAST_ABS(0 - N)]
);
#include <stdio.h>
union uint32_value {
unsigned int value;
struct little_endian {
unsigned char fou;
unsigned char thi;
unsigned char sec;
unsigned char fir;
} le;
struct big_endian {
unsigned char fir;
unsigned char sec;
unsigned char thi;
unsigned char fou;
} be;
};
int main(void)
{
union uint32_value foo;
foo.value = 3198;
printf("%02x %02x %02x %02x\n", foo.le.fir, foo.le.sec, foo.le.thi, foo.le.fou);
return 0;
}

Resources