Unable to compute a CMAC using Cygwin and OpenSSL - c

I want to compute a CMAC using OpenSSL. I found this question which helped me.
But I am encountering a problem with the following code:
#include <openssl/cmac.h>
void dispHex(const unsigned char *buffer, unsigned int size) {
int i=0;
for (i=0; i<size-1; i++) {
printf("%02X ", buffer[i]);
}
printf("%02x\n", buffer[i]);
}
int main() {
size_t out_len;
unsigned char res[16];
unsigned char mac_key[16] = { 0x00, 0x01 ,0x02 ,0x03, 0x04, 0x05, 0x06, 0x07,
0x00, 0x01 ,0x02 ,0x03, 0x04, 0x05, 0x06, 0x07};
unsigned char msg[16] = { 0x00, 0x01 ,0x02 ,0x03, 0x04, 0x05, 0x06, 0x07,
0x00, 0x01 ,0x02 ,0x03, 0x04, 0x05, 0x06, 0x07};
CMAC_CTX *cmac = CMAC_CTX_new();
CMAC_Init(cmac, mac_key, 16, EVP_aes_128_cbc(), NULL);
CMAC_Update(cmac, msg, sizeof(msg));
CMAC_Final(cmac, res, &out_len);
dispHex(res, sizeof(res));
return 0;
}
I compile it with gcc -o test_cmac test_cmac_openssl.c -L C:/OpenSSL-Win32 -llibeay32 -I C:/OpenSSL-Win32/include and it produces test_cmac.exe without problem.
but when I run it (./test_cmac.exe), nothing happens. It just prints an empty line and stops:
xxx#DESKTOP /cygdrive/e/
$ ./test_cmac.exe
xx#DESKTOP /cygdrive/e/
Even if I add printf("..."); before the CMAC computation it goes the same way.
What is strange is that the following program:
#include <openssl/hmac.h>
void dispHex(const unsigned char *buffer, unsigned int size) {
int i=0;
for (i=0; i<size-1; i++) {
printf("%02X ", buffer[i]);
}
printf("%02X\n", buffer[i]);
}
int main() {
size_t out_len;
unsigned char res[32];
unsigned char mac_key[16] = { 0x00, 0x01 ,0x02 ,0x03, 0x04, 0x05, 0x06, 0x07,
0x00, 0x01 ,0x02 ,0x03, 0x04, 0x05, 0x06, 0x07};
unsigned char msg[16] = { 0x00, 0x01 ,0x02 ,0x03, 0x04, 0x05, 0x06, 0x07,
0x00, 0x01 ,0x02 ,0x03, 0x04, 0x05, 0x06, 0x07};
HMAC_CTX hmac;
HMAC_CTX_init(&hmac);
HMAC_Init_ex(&hmac, mac_key, 16, EVP_sha256(), NULL);
HMAC_Update(&hmac, msg, sizeof(msg));
HMAC_Final(&hmac, res, &out_len);
dispHex(res, sizeof(res));
return 0;
}
runs normally: after gcc -o test_hmac test_hmac_openssl.c -L C:/OpenSSL-Win32 -llibeay32 -I C:/OpenSSL-Win32/include and ./test_hmac.exe I get:
xxx#DESKTOP /cygdrive/e/
$ ./test_hmac.exe
...9A 21 F8 2D 60 84 6C 09 08 98 A5 1F 23 8C C8 8F C4 A9 0C C4 49 45 DA 10 B9 39 C0 93 C3 10 60 BE
xxx#DESKTOP /cygdrive/e/
So I'm a little confused... Why does it works with the HMAC primitive but not with the CMAC one? Does anybody already encountered this kind of problem?
I am using OpenSSL 32bit version: openssl version returns OpenSSL 1.0.2e 3 Dec 2015.
I also checked that C:\OpenSSL-Win32\ is declared in the PATH environment variable.

Looking at CMAC_Init, it appears initialization of the CMAC object is not performed because IMPL is NULL:
int CMAC_Init(CMAC_CTX *ctx, const void *key, size_t const EVP_CIPHER *cipher, ENGINE *IMPL)
if (!key && !cipher && !IMPL && keylen == 0) {
/* Not initialised */
if (ctx->nlast_block == -1)
return 0;
if (!EVP_EncryptInit_ex(ctx->cctx, NULL, NULL, NULL, zero_iv))
return 0;
memset(ctx->tbl, 0, EVP_CIPHER_CTX_block_size(ctx->cctx));
ctx->nlast_block = 0;
return 1;
}

Why does it works with the HMAC primitive but not with the CMAC one? Does anybody already encountered this kind of problem?
I'm guessing most of the problems are due to mixing and matching compilers. Shining Light's Win32 OpenSSL use Microsoft's compiler, while you are using GCC. There's probably some mixing and matching of runtimes, too.
I'm kind of surprised the HMAC code worked as expected in the configuration. I guess you got lucky.
The following works for me, but there are some differences:
#include <openssl/evp.h>
#include <openssl/cmac.h>
#include <stdio.h>
#include <string.h>
#include <assert.h>
void dispHex(const unsigned char *buffer, unsigned int size) {
unsigned int i=0;
for (i=0; i<size; i++) {
printf("%02X ", buffer[i]);
}
printf("\n");
}
int main() {
int rc = 0;
size_t out_len = 0;
unsigned char res[EVP_MAX_MD_SIZE];
unsigned char mac_key[16] = { 0x00, 0x01 ,0x02 ,0x03, 0x04, 0x05, 0x06, 0x07,
0x00, 0x01 ,0x02 ,0x03, 0x04, 0x05, 0x06, 0x07};
unsigned char msg[16] = { 0x00, 0x01 ,0x02 ,0x03, 0x04, 0x05, 0x06, 0x07,
0x00, 0x01 ,0x02 ,0x03, 0x04, 0x05, 0x06, 0x07};
CMAC_CTX *cmac = CMAC_CTX_new();
assert(cmac != NULL);
rc = CMAC_Init(cmac, mac_key, sizeof(mac_key), EVP_aes_128_cbc(), NULL);
assert(rc == 1);
rc = CMAC_Update(cmac, msg, sizeof(msg));
assert(rc == 1);
rc = CMAC_Final(cmac, res, &out_len);
assert(rc == 1);
dispHex(res, out_len);
CMAC_CTX_free(cmac);
return 0;
}
The differences between your configuration and the one I used:
The same compiler is used for both OpenSSL and the test program
res is EVP_MAX_MD_SIZE in size
CMAC_Init uses sizeof(mac_key)
out_len is initialized
displayHex uses out_len, and not sizeof(res)
Using sizeof(res) will print the 16 bytes of the MAC, and then print 16 garbage characters because res is declared as unsigned char res[32]. I don't think you got that far, so keep it in mind.
The program produces the following result. I don't know if this is the corrected/expected result, but it produces a result and prints it:
$ ./test.exe
43 91 63 0E 47 4E 75 A6 2D 95 7A 04 1A E8 CC CC
OpenSSL does not support Cygwin-x64, so you will need to use i686 version of things. Also see Issue #4326: Failed to configure for Cygwin-x64 on the OpenSSL Bug Tracker.
Here's how to build OpenSSL under Cygwin-x64. OpenSSL's build script has a few bends, so you can't use config. You have to use Configure and call out the triplet. The bug tracker issue is still valid because config is supposed to get things right.
$ curl https://www.openssl.org/source/openssl-1.0.2f.tar.gz -o openssl-1.0.2f.tar.gz
...
$ tar -xzf openssl-1.0.2f.tar.gz
...
$ cd openssl-1.0.2f
Then:
$ export KERNEL_BITS=64
$ ./Configure Cygwin-x86_64 shared no-ssl2 no-ssl3 --openssldir="$HOME/ssl"
...
$ make depend
...
$ make
...
$ make install_sw
install_sw installs the headers in $OPENSSLDIR/include, and the libraries in $OPENSSLDIR/lib. It does not install the man pages.
You then compile and link with:
$ gcc -I "$HOME/ssl/include" test.c -o test.exe "$HOME/ssl/lib/libcrypto.a"
Linking against libcrypto.a means you avoid library path problems. Things will "just work" for you.

Related

Getting different encryption strings between openssl CLI and C codeHello

I've been playing with openssl and am trying to write a simple program in C for encrypting a string. I'm trying to replicate the following command to encrypt the string "test" and then see the encrypted version using a given key and IV and AES-CBC-128:
echo test | openssl enc -e -aes-128-cbc -nosalt -K 5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a -iv 00000000000000000000000000000000 | xxd
and this returns the encrypted string in hex of
a63b e13d 47a5 b94c c1cb 466e 28af 19d8
I'm trying to replicate this in C with the following:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <openssl/aes.h>
int main() {
AES_KEY aes;
unsigned char* input_string = "test";
unsigned char* encrypt_string;
unsigned int len;
unsigned char key[] = {0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A,
0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A, 0x5A};
unsigned char iv[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
uint32_t result;
result = AES_set_encrypt_key(key, 128, &aes);
if (result < 0) {
fprintf(stderr, "Unable to set encryption key in AES\n");
printf("%i", result);
exit(-1);
}
// set the encryption length
len = 0;
if ((strlen("test") + 1) % AES_BLOCK_SIZE == 0) {
len = strlen("test") + 1;
} else {
len = ((strlen("test") + 1) / AES_BLOCK_SIZE + 1) * AES_BLOCK_SIZE;
}
encrypt_string = (unsigned char*)calloc(len, sizeof(unsigned char));
AES_cbc_encrypt(input_string, encrypt_string, 128, &aes, iv , AES_ENCRYPT);
printf("encrypted string = ");
for (int i=0; i<len; ++i) {
printf("%02X ", encrypt_string[i]);
}
printf("\n");
return 0;
}
and this returns the encrypted string as
encrypted string = C2 5D 07 2D D5 EC DB 94 3B FE 31 9F 51 DE EE 93
which doesn't match what I get from the CLI. What am I missing here that causing these not to match?

tiny-aes-c AES CTR 128 cuts off decrypted string in some cases

I've been trying to use the AES CTR 128 from tiny-aes-c (https://github.com/kokke/tiny-AES-c) to encrypt a randomly generated token, and it works, but not all the time. In some cases the retrieved string after encrypting and decrypting is cut off at some point. Here's the code:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "token_auth.h"
#include "aes.h"
uint8_t * create_token() {
static char charset[] = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
uint8_t *token = malloc(sizeof(uint8_t) * (TOKEN_LENGTH + 1));
int i = 0;
srand ( time(NULL) );
for (i = 0; i < TOKEN_LENGTH; i++) {
int pos = rand() % (int)(strlen(charset) - 1);
token[i] = (int) charset[pos] - 0;
}
token[TOKEN_LENGTH] = 0;
return token;
}
int main() {
uint8_t key[16] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F };
uint8_t iv[16] = { 0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01, 0x00 };
uint8_t *in = create_token();
printf("\nInput: %s\nSize: %d", (char *) in, strlen((char *) in));
struct AES_ctx ctx;
AES_init_ctx_iv(&ctx, key, iv);
AES_CTR_xcrypt_buffer(&ctx, in, strlen((char *) in));
AES_init_ctx_iv(&ctx, key, iv);
AES_CTR_xcrypt_buffer(&ctx, in, strlen((char *) in));
printf("\nDEC: %s\n", (char *) in);
return 0;
}
TOKEN_LENGTH is 128. As an example of the behavior, the string NM5DlWyYInbeNtEWhBxGCdEjHSv2I6FzTMffJNgudrL2UsYe6zVJMA3wvAyhHeQD18UMXckcF8gBAfPGQNqGqwdW9MgS39w7huVfIgtoqJ212SKSIdBaJP9VErOJAmQT comes out NM5DlWyYInbeNtEWhBxGCdEjHSv2 after being encrypted and decrypted. I'm not really good at C, so it might just well be a problem with something else I've done, but at this point I'm lost. Any ideas? Thanks in advance.
The first call to AES_CTR_xcrypt_buffer encrypts the buffer in place in CTR mode.
The buffer still has the same size (128 in your case), but can contain NUL bytes.
The strlen call in the second call of AES_CTR_xcrypt_buffer for decryption can therefore result in a length < 128 if the buffer contains a NUL byte.
By the way: It works in cases where the encryption does not result in a NUL byte in the buffer.
So if you call it with TOKEN_LENGTH as the length parameter decryption will give the original string again:
AES_CTR_xcrypt_buffer(&ctx, in, TOKEN_LENGTH);

C - CRC32 checksum does not match Wireshark on Ethernet Frame Check Sequence

I'm using an online CRC-32 calculator to check that my output is correct however it seems that Wireshark has a different expected FCS for the ethernet packet.
message2 is the ethernet frame minus the FCS as seen in Wireshark
#include <stdio.h>
#include <stdint.h>
unsigned int crc32b(unsigned char *message) {
int i, j;
unsigned int byte, crc, mask;
i = 0;
crc = 0xFFFFFFFF;
while (message[i] != 0) {
//printf("%i %x \n\n", i, message[i]);
byte = message[i];
crc = crc ^ byte;
for (j = 7; j >= 0; j--) {
mask = -(crc & 1);
crc = (crc >> 1) ^ (0xEDB88320 & mask);
}
i = i + 1;
}
return ~crc;
}
int main(void)
{
unsigned char * message = "hello test";
unsigned char * message2 = "aabbccddeeff5cb9017c5a53080000000000000000000000000000";
unsigned int res = crc32b(message2);
printf("%x\n", res);
return 0;
}
I've tried using different Polynomials as defined in [1 - subsection CRC-32 IEEE 802.3], however the result does not match Wireshark.
Output using 0xED Polynomial: 0xd81e4af3
Wireshark FCS expected: 0xa8cd3084
I'd really like to code in the FCS for my ethhdr packet, I guess when creating a software packet, the FCS isn't entered by the NIC...
Sources:
[1] - http://crppit.epfl.ch/documentation/Hash_Function/WiKi/Cyclic_redundancy_check.htm
Your implementation is definitively correct (for NUL terminated C strings). It's a maybe a wrong configuration of the network interface. In default mode Wireshark doesn't get a FCS from the network driver. If you use Linux and the driver supports this, then you must enable this with ethtool to get the FCS.
Unfortunately, on my system this only works for receiving frames:
$ ethtool -K eth0 rx-fcs on
See this for details.
I use a slightly different algorithm in my embedded (for AVR microcontrollers) projects and it works perfectly for me:
#define CRC_POLY 0xEDB88320
uint32_t crc32_calc(uint8_t *data, int len)
{
int i, j;
uint32_t crc;
if (!data)
return 0;
if (len < 1)
return 0;
crc = 0xFFFFFFFF;
for (j = 0; j < len; j++) {
crc ^= data[j];
for (i = 0; i < 8; i++) {
crc = (crc & 1) ? ((crc >> 1) ^ CRC_POLY) : (crc >> 1);
}
}
return (crc ^ 0xFFFFFFFF);
}
A real world example:
The Ethernet frame in Wireshark (with ethtool rx-fcs on):
The test with my used implementation:
uint8_t frame[] = { 0x20, 0xcf, 0x30, 0x1a, 0xce, 0xa1, 0x62, 0x38,
0xe0, 0xc2, 0xbd, 0x30, 0x08, 0x06, 0x00, 0x01,
0x08, 0x00 ,0x06 ,0x04 ,0x00 ,0x01 ,0x62 ,0x38,
0xe0 ,0xc2 ,0xbd ,0x30 ,0x0a, 0x2a, 0x2a, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x2a,
0x2a, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00 };
printf("0x%x\n", crc32_calc(frame, sizeof(frame)));
The output:
$ ./fcs-test
0x6026b722
$
You can see, Wireshark reports 0x22bf2660 as correct FCS. Here is only a different output because of the byte-order. But the CRC calculation algorithm is correct.
EDIT:
I have modified your code:
uint32_t crc32b(uint8_t *message, int len) {
int i, j;
uint32_t crc, mask;
uint8_t byte;
crc = 0xFFFFFFFF;
for (j = 0; j < len; j++) {
byte = message[j];
crc = crc ^ byte;
for (i = 7; i >= 0; i--) {
mask = -(crc & 1);
crc = (crc >> 1) ^ (0xEDB88320 & mask);
}
}
return ~crc;
}
I added a length argument, because your implementation only works correct when message is a NUL terminated C string. If your input is a byte array, then you get a incorrect CRC value.
See the differences (Array and C string):
uint8_t msg_arr[] = { 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff, 0x5c, 0xb9, 0x01, 0x7c, 0x5a, 0x53, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
char *msg_str = "aabbccddeeff5cb9017c5a53080000000000000000000000000000";
printf("0x%x\n", crc32b(msg_arr, sizeof(msg_arr)));
printf("0x%x\n", crc32b(msg_str, strlen(msg_str)));
Output:
$
0x3422dd71
0xd81e4af3
$
There are a number of problems with your code. There are also plenty of existing implementations you could compare (eg, this one linked from the real Wikipedia page on CRC).
unsigned char * message2 = "aabbccddeeff5cb9017c5a53080000000000000000000000000000";
Are you hoping that this will be the octet sequece 0xAA 0xBB 0xCC ... as you see them in Wireshark? Because that isn't at all what you have.
This string actually contains 0x61 0x61 0x62 0x62 ... (assuming your platform uses ASCII encoding) because it is a character string and not an octet string.
specifically, here: byte = message[i]; you assume the first 8 bits of your message are an octet, and again I assume since you didn't say, that you expected this to be 0xAA. It will actually be 0x61.
If you want this to work correctly, translate each pair of characters into an integer value using strtoul(pair, NULL, 16) or similar.
You have a loop for (j = 7; j >= 0; j--) but never use j inside it. You do use the integer constant 1 in an odd-looking way: is there supposed to be a (1 << j) or something?
I suggest fixing the obvious errors, and then writing some self-contained tests before you try comparing whole frames with Wireshark. The code you posted has some basic errors that should be tested, identified and fixed before you get to this point.
Im not sure about your question, but if you want calculate a checksum of a network packet, you have to deploy the data in it's proper structure.
Please make sure your problem is not related with endianness.
The network byte-order is big-endian, here is the point that the things getting a little bit harder.
Little-Endian mostly used in PCs but may vary by hardware and manufacturer.
2byte integer (16 bit integer) with value 255.
Little Endian: FF00
Big Endian: 00FF
Im not sure what kind of checksum you are trying to match with, but checksum not only for data field, mostly it contains all flags and options, issued at last step thats why the implementation require the corresponding data structure.
About checksums, there are a lot of case when you get an invalid checksum with wireshark, it could cause kernel, virtual adapter, accelerated network, dedicated CPU in your NIC, etc...
Example for TCP header:
/* include/linux/tcp.h */
struct tcphdr {
__u16 source;
__u16 dest;
__u32 seq;
__u32 ack_seq;
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u16 res1:4,
doff:4,
fin:1,
syn:1,
rst:1,
psh:1,
ack:1,
urg:1,
ece:1,
cwr:1;
#elif defined(__BIG_ENDIAN_BITFIELD)
__u16 doff:4,
res1:4,
cwr:1,
ece:1,
urg:1,
ack:1,
psh:1,
rst:1,
syn:1,
fin:1;
#else
#error "Adjust your <asm/byteorder.h> defines"
#endif
__u16 window;
__u16 check;
__u16 urg_ptr;
};

How to correctly convert TAG value to the right format so that to Verify HMAC?

I'm working on HMAC generation and verifying to check data integrity. I can correctly generate the MAC value but when sending it through socket to another program for verification, I faced with formatting mismatch. I appreciate your support. Thanks.
unsigned char* MAC(unsigned char* key,unsigned char* message)
{
unsigned char* result;
unsigned int result_len = 32;
int i;
result = (unsigned char*) malloc(sizeof(char) * result_len);
result = HMAC(EVP_sha256 (), key , strlen (key), message , strlen(message) , NULL, NULL);
return result;
}
int verifyMAC(unsigned char* key,unsigned char* message, unsigned char* receivedTag)
{
printf("\n\n ==================== MAC Verification ==================\n\n");
unsigned char* newHash; // newly generated hash value
unsigned int newHash_len = 32;
int i,flag=0;
newHash = (unsigned char*) malloc(sizeof(char) * newHash_len);
newHash = HMAC(EVP_sha256 (), key , strlen (key), message , strlen(message) , NULL, NULL);
for (i=0; i!=newHash_len; i++)
{
if (receivedTag[i]!=newHash[i])
{
printf("DATA MISMATCH: Found %02X instead of %02X at index %d!\n", newHash[i], receivedTag[i], i);
break;
}
}
if (i==newHash_len)
{
printf("MAC verified!\n");
flag = 1;
}
return flag;
}
int main(int argc, char *argv[])
{
unsigned char* key = "1234567890";
unsigned char* message = (unsigned char*) "hello world";
....
}
Console result:
Hashed data: E4 5F 60 72 61 7C CE 5E 06 A9 5B E4 81 C4 33 51 02 3D 99 23 35 99 EA C9 FD AF FC 95 81 42 62 9A
==================== MAC Verification ==================
DATA MISMATCH: Found E4 instead of 65 at index 0!
ERROR: data is modified
I thought this problem was somewhat interesting so I went through the trouble to recreate the scenario. Maybe this is not even right. But a simple case of what I thought the problem is:
void main(int argc, char *argv[])
{
//the original hash
unsigned char newHash[] = {0xE4, 0x5F, 0x60, 0x72, 0x61, 0x7C, 0xCE, 0x5E, 0x06, 0xA9, 0x5B, 0xE4, 0x81, 0xC4, 0x33, 0x51,
0x02, 0x3D, 0x99, 0x23, 0x35, 0x99, 0xEA, 0xC9, 0xFD, 0xAF, 0xFC, 0x95, 0x81, 0x42, 0x62, 0x9A};
//what I think is recieved from the socket
unsigned char* receivedTag = "e45f6072617cce5e06a95be481c43351023d99233599eac9fdaffc958142629a";
for (int i=0; i!=32; i++)
{
if (receivedTag[i]!=newHash[i])
{
printf("DATA MISMATCH: Found %02X instead of %02X at index %d!\n", newHash[i], receivedTag[i], i);
break;
}
}
return;
}
and the output was
DATA MISMATCH: Found E4 instead of 65 at index 0!
So, I thought the solution would be to just convert the Hex array to string just like it was received from the socket.
Maybe this is not the most elegant of ways to do things. But a solution None the less.
char* hexStringToCharString(unsigned char hash[], int length);
void main(int argc, char *argv[])
{
//the original hash
unsigned char newHash[] = {0xE4, 0x5F, 0x60, 0x72, 0x61, 0x7C, 0xCE, 0x5E, 0x06, 0xA9, 0x5B, 0xE4, 0x81, 0xC4, 0x33, 0x51,
0x02, 0x3D, 0x99, 0x23, 0x35, 0x99, 0xEA, 0xC9, 0xFD, 0xAF, 0xFC, 0x95, 0x81, 0x42, 0x62, 0x9A};
//what I think is recieved from the socket
unsigned char* receivedTag = "e45f6072617cce5e06a95be481c43351023d99233599eac9fdaffc958142629a";
char *newString = hexStringToCharString(newHash, 32);
for (int i=0; i!=strlen(newString); i++)
{
if (receivedTag[i]!=newString[i])
{
printf("DATA MISMATCH: Found %02X instead of %02X at index %d!\n", newHash[i], receivedTag[i], i);
break;
}
}
free(newString);
printf("Yay\n");
return;
}
char* hexStringToCharString(unsigned char hash[], int length){
char temp[3];
//need length*2 characters which is 64 plus one for null!
char *theString = (char *)malloc(sizeof(char)*((length*2)+1));
strcpy(theString, "");
for(int i=0;i<length;i++){
sprintf(temp, "%02x", hash[i]);
strcat(theString, temp);
}
return theString;
}
The output in this case
Yay
So, Maybe this is entirely wrong. But if you find this solution needs editing then comment below.

Wrong usage of AES library?

I want to use a memory-saving AES-128 implementation. I found the implementation of Karl Malbrain.
I am using it with the code below:
void encryptUboot(void){
//uint8_t key[AES_KEY_LENGTH] = {0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0xff, 0x00, 0x11, 0x22, 0x33, 0x44, 0x55, 0x66, 0x77, 0x88, 0x99};
uint8_t key[AES_KEY_LENGTH] = {0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x61, 0x20, 0x74, 0x65, 0x73, 0x74, 0x21, 0x21};
uint8_t keyschedule[AES_KEY_LENGTH * AES_ROUNDS] = {0x00};
uint8_t message[5] = "test";
uint8_t cipher[16] = {0x00};
uint8_t i;
if(debug) printf("\n[D] Running AES-128 encryption\n");
aes_expand_key(key, keyschedule);
aes_encrypt(message, keyschedule, cipher);
printf("message: %s | cipher: ", message);
for(i = 0; i<AES_KEY_LENGTH; i++){
printf("%02x ", cipher[i]);
}
}
This outputs:
[D] Running AES-128 encryption
message: test | cipher: 2d 58 45 71 24 43 f5 cd 69 6d 07 b3 a3 29 de 8f
However, using the code from here (zip file) with the code below ...
// AES usage example
// compile as: gcc main.c aes.h aes.c
#include <stdlib.h>
#include "aes.h"
#include <stdio.h>
#include <string.h>
int main(int argc, char *argv[])
{
unsigned char key[KEY_128] = {0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x61, 0x20, 0x74, 0x65, 0x73, 0x74, 0x21, 0x21};
unsigned char ptext[16] = "test";
unsigned char ctext[16];
unsigned char decptext[16];
unsigned int i = 0;
aes_ctx_t *ctx;
init_aes();
ctx = aes_alloc_ctx(key, sizeof(key));
if(!ctx) {
perror("aes_alloc_ctx");
return EXIT_FAILURE;
}
aes_encrypt(ctx, ptext, ctext);
for(i=0;i<KEY_128;i++) printf("%02x ", ctext[i]);
puts("");
aes_decrypt(ctx, ctext, decptext);
puts(decptext);
aes_free_ctx(ctx);
return EXIT_SUCCESS;
}
.. it outputs a different cipher:
1f 53 3f 60 15 d5 ab 16 69 b6 c6 3b 9e 77 2f 0c
test
Do you see my mistake? Obviously, I am instrumenting these libraries in a wrong way.
Thanks,
-P
Although I couldn't find the exact function you use in Malbrains code, I believe your problem lies in the difference in array length for message. The algorithm encrypts blocks of 128 bit (16 bytes), but you only allocated 5 bytes.
uint8_t message[5] = "test";
vs
unsigned char ptext[16] = "test";
Try initialising it with exactly the same data.
uint8_t message[16];
memset(message, 0, sizeof(message));
memcpy(message, "test", 5);

Resources