Given the following function
UART_write(UART_Handle handle, const void *buffer, size_t size);
I want to send via uart a int8_t value ( log it )
What i tried:
int8_t value;
UART_write(uart, value, strlen(value));
const char *echoPrompt = (char *)value;
UART_write(uart, echoPrompt, sizeof(echoPrompt));
const char echoPrompt2[] = {value};
UART_write(uart, echoPrompt2, sizeof(echoPrompt2));
const char* buff = value;
UART_write(uart, value, strlen(value));
The best i got is logging the hex value
Exemple of how the uart_write function works: In orded to log "12" what I need to do is
const uint8_t value[] = {0x31, 0x32};
UART_write(uart, value, sizeof(value));
So my question is, how to log my int8_t variable ( I need to log negative numbers as well)
You will need to convert your integer to string.
snprintf is a standard way to do this, if your libc provides it.
Convert uint8_t to an ascii string C
Determine the maximum string size needed for any value of that type. Is there a better way to size a buffer for printing integers?
#define UINT_BUFFER10_SIZE(type) (1 + (CHAR_BIT*sizeof(type)*LOG10_2_N)/LOG10_2_D + 1)
Form the buffer
char buf[UINT_BUFFER10_SIZE(value)];
"Print" the uint8_t to the buffer.
int len = sprintf(buf, "%d", value);
// or pedantically
int len = snprintf(buf, sizeof buf, "%" PRId8, value); // see <inttypes.h>
assert(len >= 0 && (unsigned)len < sizeof buf);
Send it
UART_write(uart, buf, len);
how to log my int8_t variable
#define INT_BUFFER10_SIZE(type) (2 + ((CHAR_BIT*sizeof(type)-1)*LOG10_2_N)/LOG10_2_D + 1)
char buf[INT_BUFFER10_SIZE(ivalue)];
int len = sprintf(buf, "%d", ivalue);
UART_write(uart, buf, len);
IMO, code should add a helper function to send a string
void UART_write_str(UART_Handle handle, const char *str) {
UART_write(uart, str, strlen(str));
}
Related
I've the following issue:
from a socket, I receive an hexadecimal string, then I convert it to a byte array and save it into a uint8_t array. Here is the conversion function (it works):
static size_t convert_hex(uint8_t *dest, size_t count, const char *src)
{
size_t i = 0;
int value;
for (i; i < count && sscanf(src + i * 2, "%2x", &value) == 1; i++)
{
dest[i] = value;
}
return i;
}
Then, I reconvert this byte array to an hex string, only to know if the conversion was correct. The conversion is correct beacuse the hexadecimal string received from the socket (before the conversion to a byte array), is the same string after the conversion from uint8_t to hexadecimal string: here is the flow -> HEX_STRING -> UINT8_T ARRAY -> HEX_STRING
static size_t convert_hex_inv(char *dest, size_t count, const uint8_t *src)
{
size_t i = 0;
for (i = 0; i < count && sprintf(dest + i * 2, "%02X", src[i]) == 2; i++)
;
return i;
}
Anyway, when I print the byte array, the result seems stange, because it appears to me too short. Where is the problem? Is it in the print?
n = recvfrom(sockfd, (char *)buffer, 1024,
MSG_WAITALL, ( struct sockaddr *) &cliaddr,
&len);
buffer[n] = '\0';
printf("[SKT]\tHex String: %s. Message Type: %c\n", buffer, buffer[3]);
uint8_t uint8_payload[2048];
uint16_t uint_size = convert_hex(uint8_payload, n, buffer);
printf("[SKT]\tByte Array: %" PRIu8 "\n", uint8_payload);
char hex_array[n];
convert_hex_inv(hex_array, uint_size, uint8_payload);
printf("[SKT]\tHex String: %s\n", hex_array);
}
The result is:
[2021-11-11 12:02:23.410] [SKT] Hex String : 0201000000A8C0000000540000000000000000000000000446E88CEB36E7806FFEFFE000192E5B0F001C029FFFE30101F7D0000C003C000D000401D1C0FF6C3C80
[2021-11-11 12:02:23.410] [SKT] Byte Array: 4152851276 -> **THIS VALUE IS QUITE STRANGE**
[2021-11-11 12:02:23.410] [SKT] Hex String: 0201000000A8C0000000540000000000000000000000000446E88CEB36E7806FFEFFE000192E5B0F001C029FFFE30101F7D0000C003C000D000401D1C0FF6C3C80
With the statement
printf("[SKT]\tByte Array: %" PRIu8 "\n", uint8_payload);
You don't print the contents of the array uint8_payload. Instead you print the pointer to its first element (remember that uint8_payload will decay to &uint8_payload[0]).
To print all the elements you need to do it one by one in a loop.
I am creating an HMAC digest in java application and want to verify it into the C program. I have a hardcoded secret key in hex format.
I'm getting Segmentation fault while trying to calculate HmacSHA256 in C. I couldn't figure out what I am messing up.
The java program
byte[] decodedKey = hexStringToByteArray("d44d4435c5eea8791456f2e20d7e176a");
SecretKey key = new SecretKeySpec(decodedKey, 0, decodedKey.length, "AES");
try {
Mac mac = Mac.getInstance("HmacSHA256"); //Creating a Mac object
mac.init(key); //Initializing the Mac object
byte[] bytes = challenge.getBytes();
byte[] macResult = mac.doFinal(bytes);
return macResult;
} catch (NoSuchAlgorithmException e) {
System.out.println("Not valid algorithm"+ e);
} catch (InvalidKeyException e) {
System.out.println("Invalid key"+ e);
}
C program
const char* key = hexstr_to_char("d44d4435c5eea8791456f2e20d7e176a");
unsigned char *result;
unsigned int* resultlen;
hmac_sha256(key, strlen(key),
challenge, strlen("d44d4435c5eea8791456f2e20d7e176a"),
result, resultlen);
unsigned char* hmac_sha256(const void *key, int keylen,
const unsigned char *data, int datalen,
unsigned char *result, unsigned int* resultlen)
{
return HMAC(EVP_sha256(), key, keylen, data, datalen, result, resultlen);
}
unsigned char* hexstr_to_char(const char* hexstr)
{
size_t len = strlen(hexstr);
if (len % 2 != 0)
return NULL;
size_t final_len = len / 2;
unsigned char* chrs = (unsigned char*)malloc((final_len+1) * sizeof(*chrs));
for (size_t i=0, j=0; j<final_len; i+=2, j++)
chrs[j] = (hexstr[i] % 32 + 9) % 25 * 16 + (hexstr[i+1] % 32 + 9) % 25;
chrs[final_len] = '\0';
return chrs;
}
If you read the documentation for HMAC() more carefully you'll see that there are a few ways to call the method to determine size of the output buffer, or to let it return a static array buffer.
You can pass NULL for the result and get the length with a first call, then allocate the result buffer and call again.
Or you can pass NULL for the result buffer, and it will return a static array buffer (which you don't own), however that is documented as not thread-safe.
The easiest way may be to rely on EVP_MAX_MD_SIZE to statically declare your buffer. You should also declare the length variable as int not int*, and pass its address with the & operation - this is very basic C.
Try this:
unsigned int resultlen = 0;
unsigned char resultbuf[EVP_MAX_MD_SIZE];
hmac_sha256(key, strlen(key), challenge, strlen("d44d4435c5eea8791456f2e20d7e176a"),
resultbuf, &resultlen);
Documentation:
https://www.openssl.org/docs/man1.1.1/man3/HMAC.html
There are many C language tutorials out there.
Folks,
Trying to troubleshoot an issue with the base64 function below. About 2-3% of the requests that pass through this process return an incorrect (too short) base64output.
static const char *header_request_gce(request_rec *r, char *a)
{
char *tim = apr_palloc(r->pool, APR_RFC822_DATE_LEN);
apr_rfc822_date(tim, r->request_time);
char *uri = apr_psprintf(r->pool, "%s", r->uri);
char encode[32768];
//encode = malloc(strlen(tim)+strlen(uri)); /* make space for the new string (should check the return value ...) */
strcpy(encode, "GET\n\n\n");
strcat(encode, tim);
strcat(encode, "\n");
strcat(encode, uri);
unsigned int encode_length = strlen(encode);
unsigned char* result;
unsigned char* key = (unsigned char*) "2kcXHh+K+XLtI61/KIV3d1tVzOooTdeOqFii9osz";
static char res_hexstring[8192];
result = HMAC(EVP_sha1(), key, 40, encode, encode_length, NULL, NULL);
char *base64(const unsigned char *input, int length);
char *base64output = base64(result, strlen(result));
return base64output;
}
char *base64(const unsigned char *input, int length)
{
BIO *bmem, *b64;
BUF_MEM *bptr;
b64 = BIO_new(BIO_f_base64());
bmem = BIO_new(BIO_s_mem());
b64 = BIO_push(b64, bmem);
BIO_write(b64, input, length);
BIO_flush(b64);
BIO_get_mem_ptr(b64, &bptr);
char *buff = (char *)malloc(bptr->length);
memcpy(buff, bptr->data, bptr->length-1);
buff[bptr->length-1] = 0;
BIO_free_all(b64);
return buff;
}
The key above has been modified ofcourse, but kept in the correct character format
This line is incorrect:
char *base64output = base64(result, strlen(result));
The data (output from sha1) that you are encoding can contain the NUL byte which means strlen returns a number that is too small (with a probability of 1 - (255/256)^20 which is approximately 7.5%). Rather than call strlen you should just pass in the size as a constant. I believe that if you are just encoding a sha1 hash, the length will always be 20:
char *base64output = base64(result, 20);
There is probably a better way to get that length from an HMAC function or something (so that it updates automatically if you change the hashing algorithm), but I am, admittedly, not very familiar with the hashing functions you're using.
I've found some md5 code that consists of the following prototypes...
I've been trying to find out where I have to put the string I want to hash, what functions I need to call, and where to find the string once it has been hashed. I'm confused with regards to what the uint32 buf[4] and uint32 bits[2] are in the struct.
struct MD5Context {
uint32 buf[4];
uint32 bits[2];
unsigned char in[64];
};
/*
* Start MD5 accumulation. Set bit count to 0 and buffer to mysterious
* initialization constants.
*/
void MD5Init(struct MD5Context *context);
/*
* Update context to reflect the concatenation of another buffer full
* of bytes.
*/
void MD5Update(struct MD5Context *context, unsigned char const *buf, unsigned len);
/*
* Final wrapup - pad to 64-byte boundary with the bit pattern
* 1 0* (64-bit count of bits processed, MSB-first)
*/
void MD5Final(unsigned char digest[16], struct MD5Context *context);
/*
* The core of the MD5 algorithm, this alters an existing MD5 hash to
* reflect the addition of 16 longwords of new data. MD5Update blocks
* the data and converts bytes into longwords for this routine.
*/
void MD5Transform(uint32 buf[4], uint32 const in[16]);
I don't know this particular library, but I've used very similar calls. So this is my best guess:
unsigned char digest[16];
const char* string = "Hello World";
struct MD5Context context;
MD5Init(&context);
MD5Update(&context, string, strlen(string));
MD5Final(digest, &context);
This will give you back an integer representation of the hash. You can then turn this into a hex representation if you want to pass it around as a string.
char md5string[33];
for(int i = 0; i < 16; ++i)
sprintf(&md5string[i*2], "%02x", (unsigned int)digest[i]);
Here's a complete example:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#if defined(__APPLE__)
# define COMMON_DIGEST_FOR_OPENSSL
# include <CommonCrypto/CommonDigest.h>
# define SHA1 CC_SHA1
#else
# include <openssl/md5.h>
#endif
char *str2md5(const char *str, int length) {
int n;
MD5_CTX c;
unsigned char digest[16];
char *out = (char*)malloc(33);
MD5_Init(&c);
while (length > 0) {
if (length > 512) {
MD5_Update(&c, str, 512);
} else {
MD5_Update(&c, str, length);
}
length -= 512;
str += 512;
}
MD5_Final(digest, &c);
for (n = 0; n < 16; ++n) {
snprintf(&(out[n*2]), 16*2, "%02x", (unsigned int)digest[n]);
}
return out;
}
int main(int argc, char **argv) {
char *output = str2md5("hello", strlen("hello"));
printf("%s\n", output);
free(output);
return 0;
}
As other answers have mentioned, the following calls will compute the hash:
MD5Context md5;
MD5Init(&md5);
MD5Update(&md5, data, datalen);
MD5Final(digest, &md5);
The purpose of splitting it up into that many functions is to let you stream large datasets.
For example, if you're hashing a 10GB file and it doesn't fit into ram, here's how you would go about doing it. You would read the file in smaller chunks and call MD5Update on them.
MD5Context md5;
MD5Init(&md5);
fread(/* Read a block into data. */)
MD5Update(&md5, data, datalen);
fread(/* Read the next block into data. */)
MD5Update(&md5, data, datalen);
fread(/* Read the next block into data. */)
MD5Update(&md5, data, datalen);
...
// Now finish to get the final hash value.
MD5Final(digest, &md5);
To be honest, the comments accompanying the prototypes seem clear enough. Something like this should do the trick:
void compute_md5(char *str, unsigned char digest[16]) {
MD5Context ctx;
MD5Init(&ctx);
MD5Update(&ctx, str, strlen(str));
MD5Final(digest, &ctx);
}
where str is a C string you want the hash of, and digest is the resulting MD5 digest.
It would appear that you should
Create a struct MD5context and pass it to MD5Init to get it into a proper starting condition
Call MD5Update with the context and your data
Call MD5Final to get the resulting hash
These three functions and the structure definition make a nice abstract interface to the hash algorithm. I'm not sure why you were shown the core transform function in that header as you probably shouldn't interact with it directly.
The author could have done a little more implementation hiding by making the structure an abstract type, but then you would have been forced to allocate the structure on the heap every time (as opposed to now where you can put it on the stack if you so desire).
All of the existing answers use the deprecated MD5Init(), MD5Update(), and MD5Final().
Instead, use EVP_DigestInit_ex(), EVP_DigestUpdate(), and EVP_DigestFinal_ex(), e.g.
// example.c
//
// gcc example.c -lssl -lcrypto -o example
#include <openssl/evp.h>
#include <stdio.h>
#include <string.h>
void bytes2md5(const char *data, int len, char *md5buf) {
// Based on https://www.openssl.org/docs/manmaster/man3/EVP_DigestUpdate.html
EVP_MD_CTX *mdctx = EVP_MD_CTX_new();
const EVP_MD *md = EVP_md5();
unsigned char md_value[EVP_MAX_MD_SIZE];
unsigned int md_len, i;
EVP_DigestInit_ex(mdctx, md, NULL);
EVP_DigestUpdate(mdctx, data, len);
EVP_DigestFinal_ex(mdctx, md_value, &md_len);
EVP_MD_CTX_free(mdctx);
for (i = 0; i < md_len; i++) {
snprintf(&(md5buf[i * 2]), 16 * 2, "%02x", md_value[i]);
}
}
int main(void) {
const char *hello = "hello";
char md5[33]; // 32 characters + null terminator
bytes2md5(hello, strlen(hello), md5);
printf("%s\n", md5);
}
I am using the itoa() function to convert an int into string, but it is giving an error:
undefined reference to `itoa'
collect2: ld returned 1 exit status
What is the reason? Is there some other way to perform this conversion?
Use snprintf, it is more portable than itoa.
itoa is not part of standard C, nor is it part of standard C++; but, a lot of compilers and associated libraries support it.
Example of sprintf
char* buffer = ... allocate a buffer ...
int value = 4564;
sprintf(buffer, "%d", value);
Example of snprintf
char buffer[10];
int value = 234452;
snprintf(buffer, 10, "%d", value);
Both functions are similar to fprintf, but output is written into an array rather than to a stream. The difference between sprintf and snprintf is that snprintf guarantees no buffer overrun by writing up to a maximum number of characters that can be stored in the buffer.
Use snprintf - it is standard an available in every compilator. Query it for the size needed by calling it with NULL, 0 parameters. Allocate one character more for null at the end.
int length = snprintf( NULL, 0, "%d", x );
char* str = malloc( length + 1 );
snprintf( str, length + 1, "%d", x );
...
free(str);
Before I continue, I must warn you that itoa is NOT an ANSI function — it's not a standard C function. You should use sprintf to convert an int into a string.
itoa takes three arguments.
The first one is the integer to be converted.
The second is a pointer to an array of characters - this is where the string is going to be stored. The program may crash if you pass in a char * variable, so you should pass in a normal sized char array and it will work fine.
The last one is NOT the size of the array, but it's the BASE of your number - base 10 is the one you're most likely to use.
The function returns a pointer to its second argument — where it has stored the converted string.
itoa is a very useful function, which is supported by some compilers - it's a shame it isn't support by all, unlike atoi.
If you still want to use itoa, here is how should you use it. Otherwise, you have another option using sprintf (as long as you want base 8, 10 or 16 output):
char str[5];
printf("15 in binary is %s\n", itoa(15, str, 2));
Better use sprintf(),
char stringNum[20];
int num=100;
sprintf(stringNum,"%d",num);
Usually snprintf() is the way to go:
char str[16]; // could be less but i'm too lazy to look for the actual the max length of an integer
snprintf(str, sizeof(str), "%d", your_integer);
You can make your own itoa, with this function:
void my_utoa(int dataIn, char* bffr, int radix){
int temp_dataIn;
temp_dataIn = dataIn;
int stringLen=1;
while ((int)temp_dataIn/radix != 0){
temp_dataIn = (int)temp_dataIn/radix;
stringLen++;
}
//printf("stringLen = %d\n", stringLen);
temp_dataIn = dataIn;
do{
*(bffr+stringLen-1) = (temp_dataIn%radix)+'0';
temp_dataIn = (int) temp_dataIn / radix;
}while(stringLen--);}
and this is example:
char buffer[33];
int main(){
my_utoa(54321, buffer, 10);
printf(buffer);
printf("\n");
my_utoa(13579, buffer, 10);
printf(buffer);
printf("\n");
}
void itos(int value, char* str, size_t size) {
snprintf(str, size, "%d", value);
}
..works with call by reference. Use it like this e.g.:
int someIntToParse;
char resultingString[length(someIntToParse)];
itos(someIntToParse, resultingString, length(someIntToParse));
now resultingString will hold your C-'string'.
char string[something];
sprintf(string, "%d", 42);
Similar implementation to Ahmad Sirojuddin but slightly different semantics. From a security perspective, any time a function writes into a string buffer, the function should really "know" the size of the buffer and refuse to write past the end of it. I would guess its a part of the reason you can't find itoa anymore.
Also, the following implementation avoids performing the module/devide operation twice.
char *u32todec( uint32_t value,
char *buf,
int size)
{
if(size > 1){
int i=size-1, offset, bytes;
buf[i--]='\0';
do{
buf[i--]=(value % 10)+'0';
value = value/10;
}while((value > 0) && (i>=0));
offset=i+1;
if(offset > 0){
bytes=size-i-1;
for(i=0;i<bytes;i++)
buf[i]=buf[i+offset];
}
return buf;
}else
return NULL;
}
The following code both tests the above code and demonstrates its correctness:
int main(void)
{
uint64_t acc;
uint32_t inc;
char buf[16];
size_t bufsize;
for(acc=0, inc=7; acc<0x100000000; acc+=inc){
printf("%u: ", (uint32_t)acc);
for(bufsize=17; bufsize>0; bufsize/=2){
if(NULL != u32todec((uint32_t)acc, buf, bufsize))
printf("%s ", buf);
}
printf("\n");
if(acc/inc > 9)
inc*=7;
}
return 0;
}
Like Edwin suggested, use snprintf:
#include <stdio.h>
int main(int argc, const char *argv[])
{
int n = 1234;
char buf[10];
snprintf(buf, 10, "%d", n);
printf("%s\n", buf);
return 0;
}
If you really want to use itoa, you need to include the standard library header.
#include <stdlib.h>
I also believe that if you're on Windows (using MSVC), then itoa is actually _itoa.
See http://msdn.microsoft.com/en-us/library/yakksftt(v=VS.100).aspx
Then again, since you're getting a message from collect2, you're likely running GCC on *nix.
see this example
#include <stdlib.h> // for itoa() call
#include <stdio.h>
int main() {
int num = 145;
char buf[5];
// convert 123 to string [buf]
itoa(num, buf, 10);
// print our string
printf("%s\n", buf);
return 0;
}
see this link having other examples.
itoa() function is not defined in ANSI-C, so not implemented by default for some platforms (Reference Link).
s(n)printf() functions are easiest replacement of itoa(). However itoa (integer to ascii) function can be used as a better overall solution of integer to ascii conversion problem.
itoa() is also better than s(n)printf() as performance depending on the implementation. A reduced itoa (support only 10 radix) implementation as an example: Reference Link
Another complete itoa() implementation is below (Reference Link):
#include <stdbool.h>
#include <string.h>
// A utility function to reverse a string
char *reverse(char *str)
{
char *p1, *p2;
if (! str || ! *str)
return str;
for (p1 = str, p2 = str + strlen(str) - 1; p2 > p1; ++p1, --p2)
{
*p1 ^= *p2;
*p2 ^= *p1;
*p1 ^= *p2;
}
return str;
}
// Implementation of itoa()
char* itoa(int num, char* str, int base)
{
int i = 0;
bool isNegative = false;
/* Handle 0 explicitely, otherwise empty string is printed for 0 */
if (num == 0)
{
str[i++] = '0';
str[i] = '\0';
return str;
}
// In standard itoa(), negative numbers are handled only with
// base 10. Otherwise numbers are considered unsigned.
if (num < 0 && base == 10)
{
isNegative = true;
num = -num;
}
// Process individual digits
while (num != 0)
{
int rem = num % base;
str[i++] = (rem > 9)? (rem-10) + 'a' : rem + '0';
num = num/base;
}
// If number is negative, append '-'
if (isNegative)
str[i++] = '-';
str[i] = '\0'; // Append string terminator
// Reverse the string
reverse(str);
return str;
}
Another complete itoa() implementatiton: Reference Link
An itoa() usage example below (Reference Link):
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
int main()
{
int a=54325;
char buffer[20];
itoa(a,buffer,2); // here 2 means binary
printf("Binary value = %s\n", buffer);
itoa(a,buffer,10); // here 10 means decimal
printf("Decimal value = %s\n", buffer);
itoa(a,buffer,16); // here 16 means Hexadecimal
printf("Hexadecimal value = %s\n", buffer);
return 0;
}
if(InNumber == 0)
{
return TEXT("0");
}
const int32 CharsBufferSize = 64; // enought for int128 type
TCHAR ResultChars[CharsBufferSize];
int32 Number = InNumber;
// Defines Decreasing/Ascending ten-Digits to determine each digit in negative and positive numbers.
const TCHAR* DigitalChars = TEXT("9876543210123456789");
constexpr int32 ZeroCharIndex = 9; // Position of the ZERO character from the DigitalChars.
constexpr int32 Base = 10; // base system of the number.
// Convert each digit of the number to a digital char from the top down.
int32 CharIndex = CharsBufferSize - 1;
for(; Number != 0 && CharIndex > INDEX_NONE; --CharIndex)
{
const int32 CharToInsert = ZeroCharIndex + (Number % Base);
ResultChars[CharIndex] = DigitalChars[CharToInsert];
Number /= Base;
}
// Insert sign if is negative number to left of the digital chars.
if(InNumber < 0 && CharIndex > INDEX_NONE)
{
ResultChars[CharIndex] = L'-';
}
else
{
// return to the first digital char if is unsigned number.
++CharIndex;
}
// Get number of the converted chars and construct string to return.
const int32 ResultSize = CharsBufferSize - CharIndex;
return TString{&ResultChars[CharIndex], ResultSize};