char* to float* - c

I want to write a function that unpacks a char* buffer to a float* buffer.
The char* buffer was originally packed as floats to bytes.
What is the best way to do this?
I have implemented integers below.
I have packed an array of 4 byte sized integers in a char* buffer.
This is the function I used to unpack it and assign it to int*.
void CharToInt(int* tar, char* src, int len) {
char* val = src;
char* end = src + len-1;
for( ; val<end; tar++,val+=sizeof(int)) {
*tar = ( (((*val) < 0) ? 256 + (*val) : (*val) ) +
(((*val+1) < 0) ? 256 + (*val+1) : (*val+1) << 8 ) +
(((*val+2) < 0) ? 256 + (*val+2) : (*val+2) << 16) +
(((*val+3) < 0) ? 256 + (*val+3) : (*val+3) << 24) );
}
}
Adding 256 to a signed byte seems to takes care of the signed char. (-128 to 127).
This is because it is the same as flipping bits and subtracting 1?
Anyway, I am looking for the float* equivalent.

"restrict" is not C89, only C99. sizeof(float) can != 4 on several machines.
This solution is independent from sizeof(float), works on all ANSI C environments and returns the number of successful imported floats in "float".
int chars_to_floats(const char *chars, float *floats, int len)
{
int converted = 0;
float *fp = (float*)chars;
while( len >= sizeof*fp )
{
*floats++ = *fp++;
len -= sizeof*fp;
++converted;
}
return converted;
}

// Converts a single group of 4 chars to 1 float.
float char_to_float(const char* cs)
{
return *(float*)cs;
}
// Converts every 4 chars into one float. len is the number of floats to unpack.
void chars_to_floats(const char* restrict chars, float* restrict floats, size_t len)
{
for(size_t i = 0; i < len; ++i)
{
*floats = char_to_float(chars);
*chars += 4;
*floats += 1;
}
}

Related

Converting a string of chars into its decimal value then back to its character valus

unsigned long long int power(int base, unsigned int exponent)
{
if (exponent == 0)
return 1;
else
return base * power(base, exponent - 1);
}
I am working on a program where I need to take in a string of 8 characters (e.g. "I want t") then convert this into a long long int in the pack function. I have the pack function working fine.
unsigned long long int pack(char unpack[])
{
/*converting string to long long int here
didn't post code because its large*/
}
After I enter "I want t" I get "Value in Decimal = 5269342824372117620" and then I send the decimal to the unpack function. So I need to convert 5269342824372117620 back into "I want t". I tried bit manipulation which was unsuccessful any help would be greatly appreciated.
void unpack(long long int pack)
{
long long int bin;
char convert[100];
for(int i = 63, j = 0, k = 0; i >= 0; i--,j++)
{
if((pack & (1 << i)) != 0)
bin += power(2,j);
if(j % 8 == 0)
{
convert[k] = (char)bin;
bin = 0;
k++;
j = -1;
}
}
printf("String: %s\n", convert);
}
A simple solution for your problem is to consider the characters in the string to be digits in a large base that encompasses all possible values. For example base64 encoding can convert strings of 8 characters to 48-bit numbers, but you can only use a subset of at most 64 different characters in the source string.
To convert any 8 byte string into a number, you must use a base of at least 256.
Given your extra input, After I enter "I want t" I get "Value in Decimal = 5269342824372117620", and since 5269342824372117620 == 0x492077616e742074, you do indeed use base 256, big-endian order and ASCII encoding for the characters.
Here is a simple portable pack function for this method:
unsigned long long pack(const char *s) {
unsigned long long x = 0;
int i;
for (i = 0; i < 8; i++) {
x = x * 256 + (unsigned char)s[i];
}
return x;
}
The unpack function is easy to derive: compute the remainders of divisions in the reverse order:
char *unpack(char *dest, unsigned long long x) {
/* dest is assumed to have a length of at least 9 */
int i;
for (i = 8; i-- > 0; ) {
s[i] = x % 256;
x = x / 256;
}
s[8] = '\0'; /* set the null terminator */
return s;
}
For a potentially faster but less portable solution, you could use this, but you would get a different conversion on little-endian systems such as current Macs and PCs:
#include <string.h>
unsigned long long pack(const char *s) {
unsigned long long x;
memcpy(&x, s, 8);
return x;
}
char *unpack(char *s, unsigned long long x) {
memcpy(s, &x, 8);
s[8] = '\0';
return s;
}

C - Write access violation

I have an error at the last line, in nullString, a function setting all the string to '\0' with a simple for()
void function ( unsigned char inputArray[], size_t inputSize )
{
size_t cellSize;
if (inputSize <= 256)
cellSize = 1;
else
cellSize = ceil(inputSize / 2 / 256) + 1;
// Sub Box
unsigned char subBox[255];
for (size_t line = 0; line < 255; line++)
subBox[line] = 0;
generate_SubBox(subBox, key);
// Sub Box
// Sub Box reverse
unsigned char subBox_Inverse[255];
for (size_t line = 0; line < 255; line++)
subBox_Inverse[line] = 0;
generate_SubBox_Inverse(subBox_Inverse, subBox, key);
// Sub Box reverse
unsigned char* inputArray2 = NULL;
inputArray2 = malloc(sizeof(unsigned char)* inputSize / 2);
verifyMalloc(inputArray2);
nullString(inputArray2, inputSize / 2);
unsigned char string_temp[3] = { 0 };
size_t w = 0;
for (size_t i = 0; i < inputSize / 2; i++)
{
string_temp[0] = inputArray[w];
string_temp[1] = inputArray[w + 1];
inputArray2[i] = strtoll(string_temp, NULL, 16);
w += 2;
}
}
I tried neutralizing line per line all instructions coming before nullString() by commenting them but it doesn't change anything.
If I neutralize nullString, the error comes after, at
inputArray2[i] = strtoll(...)
Hope you've got the answer :)
Thanks in advance !
EDIT:
Here is nullString:
void nullString(unsigned char input[], size_t length)
{
for (size_t x = 0; x < length; x++)
input[x] = '\0';
}
I commented all the instructions before nullString, the error is still there.
I also verified variables and they all look like good
EDIT 2:
verifyMalloc:
void verifyMalloc(int* pointer)
{
if (pointer == NULL)
{
perror("Erreur");
Sleep(15000);
exit(0);
}
}
Everything we're seeing is seriously hinting at you forgetting to #include <stdlib.h> (and ignoring the warnings resulting from that).
This is what might happens when you use malloc() without including stdlib.h in the same file:
the compiler consider the malloc() function to be declared implicitly, which means it is assuming that it's return types is int (instead of *void).
This might work when sizeof (int) is the same as sizeof (*void). But when int is 32-bits while pointers are 64-bits then the address returned by malloc() might lose half of it's bits and point to an invalid address.
Try using
void bzero(void *s, size_t n); or
void *memset(void *s, int c, size_t n);
instead of your nullString() and for()something[x]=0 loops.
Then, this does not make all of the array zeroed:
unsigned char string_temp[3] = { 0 };
This makes
string[0] = 0;
string[1] = god_knows;
string[2] = god_knows_not;
so either - unsigned char string_temp[3] = {0,0,0};
or bzero(string_temp,3);
Consequently, when you do this:
string_temp[0] = inputArray[w];
string_temp[1] = inputArray[w + 1];
inputArray2[i] = strtoll(string_temp, NULL, 16);
strtoll() will be guessing when to stop. No guarantee this would be at string_temp[2].
Then, this should be enough:
unsigned char* inputArray2 = malloc(sizeof(unsigned char) * inputSize / 2);
inputArray2 will be NULL if malloc failed, or a valid pointer if it succeeded.
You may want to check your inputSize / this_and_that arithmetics. Does it really deliver what you expect? You might be surprised by division result of integer operands.
This also looks suspicious:
inputArray2[i] = strtoll(string_temp, NULL, 16);
strtoll returns longlong integer but your inputArray2 is of unsigned char type. So you are trying to store 8 bytes (sizeof longlong = 8) and you reserved place only for one (sizeof char = 1)
Redeclare your inputArray2 as long long
long long *inputArray2 = malloc(sizeof(long long) * inputSize /2 );
And try this with memset():
size_t size = sizeof(long long) * inputSize/2;
//Do you really need long long? You are storing max three digits. uint_8 will be enough
long long* inputArray2 = malloc(size);
memset(inputArray2, 0, size);

How to convert epoch decimal time to 64 bit binary and back to decimal in C

I am trying to create a script that will convert decimals to binary based on specified size and then reverse the process, meaning from binary to decimal. So far the script and the output from my point of view (beginner) the script looks correct. I can convert all numbers from decimal to binary and vice versa. I am stack on the last part, that I am trying to convert the epoch time from a 64 bit binary number to decimal. I can not understand where I am going wrong since the rest of the numbers seem to recovered correctly. The source points that I found the scripts that I am using are Binary to Decimal and Decimal to Binary.
Update: modified code to short version:
I have modified the code to simply demonstrate the problem. The code works fine up to 32 bit binary conversion. But since I need to convert up to 64 I do not know how to do that. I noticed that because I used before int I reached the maximum limitations 32 bits, so I modified that to long long int to reach the 64 bit.
I have provided a sample of simple conversion of decimal as 1 in 32 bit format and 64 that demonstrate the problem. The epoch time is the desired output but I need to verify that the code works before I attempt the conversion.
Sample of code:
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <inttypes.h>
#define MAX_CHARACTERS 65
typedef struct rec {
char transmit[MAX_CHARACTERS];
char receive[MAX_CHARACTERS];
}RECORD;
char *decimal_to_binary(int n , int num); /* Define function */
char *decimal_to_binary(int n , int num) {
long long int c, d, count;
char *pointer;
count = 0;
pointer = (char*) malloc( num + 1 );
if ( pointer == NULL )
exit(EXIT_FAILURE);
for ( c = num - 1; c >= 0; c-- ) {
d = n >> c;
if ( d & 1 )
*( pointer + count ) = 1 + '0';
else
*( pointer + count ) = 0 + '0';
count++;
}
*( pointer + count ) = '\0';
return pointer;
}
int binary_decimal(long long int n); /* Define function */
int binary_decimal(long long int n) { /* Function to convert binary to decimal.*/
int decimal=0, i=0, rem;
while (n!=0) {
rem = n%10;
n/=10;
decimal += rem*pow(2,i);
++i;
}
return decimal;
}
int main(void) {
RECORD *ptr_record;
ptr_record = (RECORD *) malloc (sizeof(RECORD));
if (ptr_record == NULL) {
printf("Out of memmory!\nExit!\n");
exit(0);
}
int LI_d = 1;
char *LI_b = decimal_to_binary(LI_d,32);
memset( (*ptr_record).transmit , '\0' , sizeof((*ptr_record).transmit) );
strncat((*ptr_record).transmit , LI_b , strlen(LI_b) );
printf("LI: %s\n",(*ptr_record).transmit);
//transmit and receive
memset( (*ptr_record).receive , '\0' , sizeof((*ptr_record).receive) );
strncpy( (*ptr_record).receive , (*ptr_record).transmit , strlen((*ptr_record).transmit) );
char *LI_rcv_b = strndup( (*ptr_record).receive , 64 );
int LI_rcv_i = atoi (LI_rcv_b);
int final_LI = binary_decimal(LI_rcv_i);
printf("Final_LI: %i\n",final_LI);
free( ptr_record );
return 0;
}
Sample of output for 32 bit conversion:
LI: 00000000000000000000000000000001
Final_LI: 1
Sample of output for 64 bit conversion:
LI: 0000000000000000000000000000000100000000000000000000000000000001
Final_LI: -1
decimal_to_binary(int n, ...): better to use unsigned math
//char *decimal_to_binary(int n, int num) {
char *decimal_to_binary(unsigned long long n, int num) {
// long long int c, d, count;
unsigned long long int d;
int c, count;
char *pointer;
count = 0;
pointer = malloc(num + 1); // drop cast
if (pointer == NULL)
exit(EXIT_FAILURE);
for (c = num - 1; c >= 0; c--) {
d = n >> c;
if (d & 1)
*(pointer + count) = 1 + '0';
else
*(pointer + count) = 0 + '0';
count++;
}
*(pointer + count) = '\0';
return pointer;
}
Simplify binary_decimal(). Again use unsigned math, drop pow()
/* Function to convert binary to decimal.*/
unsigned long binary_decimal(unsigned long long int n) {
unsigned long decimal = 0;
while (n != 0) {
decimal *= 2;
decimal += n % 10;
n /= 10;
}
return decimal;
}
main() has lots of issues
int main(void) {
RECORD *ptr_record;
ptr_record = malloc(sizeof(RECORD)); // drop cast
if (ptr_record == NULL) {
printf("Out of memory!\nExit!\n"); // spelling fix
exit(0);
}
// use unsigned long long
unsigned long long LI_d = 1;
LI_d = (unsigned long long) -1;
char *LI_b = decimal_to_binary(LI_d, 32);
memset((*ptr_record).transmit, '\0', sizeof((*ptr_record).transmit));
// strncat((*ptr_record).transmit, LI_b, strlen(LI_b));
strncat((*ptr_record).transmit, LI_b, sizeof((*ptr_record).transmit) - 1);
printf("LI: %s\n", (*ptr_record).transmit);
//transmit and receive
memset((*ptr_record).receive, '\0', sizeof((*ptr_record).receive));
// strncpy((*ptr_record).receive, (*ptr_record).transmit, strlen((*ptr_record).transmit));
strncpy((*ptr_record).receive, (*ptr_record).transmit, sizeof((*ptr_record).transmit) - 1);
// char *LI_rcv_b = strndup((*ptr_record).receive, 64);
char *LI_rcv_b = strndup((*ptr_record).receive, MAX_CHARACTERS);
// At this point, approach is in error
// Cannot take a 64-decimal digit string and convert to a typical long long.
// int LI_rcv_i = atoi(LI_rcv_b);
// int final_LI = binary_decimal(LI_rcv_i);
// printf("Final_LI: %i\n", final_LI);
// Suspect you want to convert 64-binary digit string to a 64-bit integer
// maybe by somehow using binary_decimal - suggest re-write of that function
unsigned long long LI_rcv_i = strtoull(LI_rcv_b, NULL, 2);
printf("Final_LI: %llu\n", LI_rcv_i);
free(ptr_record);
return 0;
}
Output
LI: 11111111111111111111111111111111
Final_LI: 4294967295

Bitwise Operations C on long hex Linux

Briefly: Question is related to bitwise operations on hex - language C ; O.S: linux
I would simply like to do some bitwise operations on a "long" hex string.
I tried the following:
First try:
I cannot use the following because of overflow:
long t1 = 0xabefffcccaadddddffff;
and t2 = 0xdeeefffffccccaaadacd;
Second try: Does not work because abcdef are interpreted as string instead of hex
char* t1 = "abefffcccaadddddffff";
char* t2 = "deeefffffccccaaadacd";
int len = strlen(t1);
for (int i = 0; i < len; i++ )
{
char exor = *(t1 + i) ^ *(t2 + i);
printf("%x", exor);
}
Could someone please let me know how to do this? thx
Bitwise operations are usually very easily extended to larger numbers.
The best way to do this is to split them up into 4 or 8 byte sequences, and store them as an array of uints. In this case you need at least 80 bits for those particular strings.
For AND it is pretty simple, something like:
unsigned int A[3] = { 0xabef, 0xffcccaad, 0xddddffff };
unsigned int B[3] = { 0xdeee, 0xfffffccc, 0xcaaadacd };
unsigned int R[3] = { 0 };
for (int b = 0; b < 3; b++) {
R[b] = A[b] & B[b];
}
A more full example including scanning hex strings and printing them:
#include<stdio.h>
#include<string.h>
#include<stdlib.h>
typedef unsigned int uint;
void long_Print(int size, const uint a[]) {
printf("0x");
for (int i = 0; i < size; i++) {
printf("%x", a[i]);
}
}
void long_AND(int size, const uint a[], const uint b[], uint r[]) {
for (int i = 0; i < size; i++) {
r[i] = a[i] & b[i];
}
}
// Reads a long hex string and fills an array. Returns the number of elements filled.
int long_Scan(int size, const char* str, uint r[]) {
int len = strlen(str);
int ri = size;
for (const char* here = &str[len]; here != str; here -= 8) {
if (here < str) {
char* tmp = (char*)malloc(4);
tmp[0] = '%';
tmp[1] = (char)(str - here + '0');
tmp[2] = 'x';
tmp[3] = '\0';
sscanf(str, tmp, &r[ri--]);
free(tmp);
break;
}
else {
sscanf(here, "%8x", &r[ri--]);
}
}
for (; ri >= 0; ri--) {
r[ri] == 0;
}
return size - ri;
}
int main(int argc, char* argv[])
{
uint A[3] = { 0 };
uint B[3] = { 0 };
uint R[3] = { 0 };
long_Scan(3, "abefffcccaadddddffff", A);
long_Scan(3, "deeefffffccccaaadacd", B);
long_Print(3, A);
puts("\nAND");
long_Print(3, B);
puts("\n=");
long_AND(3, A, B, R);
long_Print(3, R);
getchar();
return 0;
}
You'll certainly need to use a library that can handle arbitrarily long integers. Consider using libgmp: http://gmplib.org/
Before you can do any sort of bitwise operations, you need to be working with integers. "abeffccc" is not an integer. It is a string. You need to use something like strtol
to first convert the string to an integer.
If your values are too big to fit into a 64-bit long long int (0xFFFFFFFF,FFFFFFFF) then you'll need to use a Big Integer library, or something similar, to support arbitrarily large values. As H2CO3 mentioned, libgmp is an excellent choice for large numbers in C.
Instead of using unsigned long directly, you could try using an array of unsigned int. Each unsigned int holds 32 bits, or 8 hex digits. You would therefore have to chop-up your constant into chunks of 8 hex digits each:
unsigned int t1[3] = { 0xabef , 0xffcccaad , 0xddddffff };
Note that for sanity, you should store them in reverse order so that the first entry of t1 contains the lowest-order bits.

What is the proper way of implementing a good "itoa()" function?

I was wondering if my implementation of an "itoa" function is correct. Maybe you can help me getting it a bit more "correct", I'm pretty sure I'm missing something. (Maybe there is already a library doing the conversion the way I want it to do, but... couldn't find any)
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
char * itoa(int i) {
char * res = malloc(8*sizeof(int));
sprintf(res, "%d", i);
return res;
}
int main(int argc, char *argv[]) {
...
// Yet, another good itoa implementation
// returns: the length of the number string
int itoa(int value, char *sp, int radix)
{
char tmp[16];// be careful with the length of the buffer
char *tp = tmp;
int i;
unsigned v;
int sign = (radix == 10 && value < 0);
if (sign)
v = -value;
else
v = (unsigned)value;
while (v || tp == tmp)
{
i = v % radix;
v /= radix;
if (i < 10)
*tp++ = i+'0';
else
*tp++ = i + 'a' - 10;
}
int len = tp - tmp;
if (sign)
{
*sp++ = '-';
len++;
}
while (tp > tmp)
*sp++ = *--tp;
return len;
}
// Usage Example:
char int_str[15]; // be careful with the length of the buffer
int n = 56789;
int len = itoa(n,int_str,10);
The only actual error is that you don't check the return value of malloc for null.
The name itoa is kind of already taken for a function that's non-standard, but not that uncommon. It doesn't allocate memory, rather it writes to a buffer provided by the caller:
char *itoa(int value, char * str, int base);
If you don't want to rely on your platform having that, I would still advise following the pattern. String-handling functions which return newly allocated memory in C are generally more trouble than they're worth in the long run, because most of the time you end up doing further manipulation, and so you have to free lots of intermediate results. For example, compare:
void delete_temp_files() {
char filename[20];
strcpy(filename, "tmp_");
char *endptr = filename + strlen(filename);
for (int i = 0; i < 10; ++i) {
itoa(endptr, i, 10); // itoa doesn't allocate memory
unlink(filename);
}
}
vs.
void delete_temp_files() {
char filename[20];
strcpy(filename, "tmp_");
char *endptr = filename + strlen(filename);
for (int i = 0; i < 10; ++i) {
char *number = itoa(i, 10); // itoa allocates memory
strcpy(endptr, number);
free(number);
unlink(filename);
}
}
If you had reason to be especially concerned about performance (for instance if you're implementing a stdlib-style library including itoa), or if you were implementing bases that sprintf doesn't support, then you might consider not calling sprintf. But if you want a base 10 string, then your first instinct was right. There's absolutely nothing "incorrect" about the %d format specifier.
Here's a possible implementation of itoa, for base 10 only:
char *itobase10(char *buf, int value) {
sprintf(buf, "%d", value);
return buf;
}
Here's one which incorporates the snprintf-style approach to buffer lengths:
int itobase10n(char *buf, size_t sz, int value) {
return snprintf(buf, sz, "%d", value);
}
A good int to string or itoa() has these properties;
Works for all [INT_MIN...INT_MAX], base [2...36] without buffer overflow.
Does not assume int size.
Does not require 2's complement.
Does not require unsigned to have a greater positive range than int. In other words, does not use unsigned.
Allows use of '-' for negative numbers, even when base != 10.
Tailor the error handling as needed. (needs C99 or later):
char* itostr(char *dest, size_t size, int a, int base) {
// Max text needs occur with itostr(dest, size, INT_MIN, 2)
char buffer[sizeof a * CHAR_BIT + 1 + 1];
static const char digits[36] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
if (base < 2 || base > 36) {
fprintf(stderr, "Invalid base");
return NULL;
}
// Start filling from the end
char* p = &buffer[sizeof buffer - 1];
*p = '\0';
// Work with negative `int`
int an = a < 0 ? a : -a;
do {
*(--p) = digits[-(an % base)];
an /= base;
} while (an);
if (a < 0) {
*(--p) = '-';
}
size_t size_used = &buffer[sizeof(buffer)] - p;
if (size_used > size) {
fprintf(stderr, "Scant buffer %zu > %zu", size_used , size);
return NULL;
}
return memcpy(dest, p, size_used);
}
I think you are allocating perhaps too much memory. malloc(8*sizeof(int)) will give you 32 bytes on most machines, which is probably excessive for a text representation of an int.
i found an interesting resource dealing with several different issues with the itoa implementation
you might wanna look it up too
itoa() implementations with performance tests
I'm not quite sure where you get 8*sizeof(int) as the maximum possible number of characters -- ceil(8 / (log(10) / log(2))) yields a multiplier of 3*. Additionally, under C99 and some older POSIX platforms you can create an accurately-allocating version with sprintf():
char *
itoa(int i)
{
int n = snprintf(NULL, 0, "%d", i) + 1;
char *s = malloc(n);
if (s != NULL)
snprintf(s, n, "%d", i);
return s;
}
HTH
You should use a function in the printf family for this purpose. If you'll be writing the result to stdout or a file, use printf/fprintf. Otherwise, use snprintf with a buffer big enough to hold 3*sizeof(type)+2 bytes or more.
sprintf is quite slow, if performance matters it is probably not the best solution.
if the base argument is a power of 2 the conversion can be done with a shift and masking, and one can avoid reversing the string by recording the digits from the highest positions. For instance, something like this for base=16
int num_iter = sizeof(int) / 4;
const char digits[] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
/* skip zeros in the highest positions */
int i = num_iter;
for (; i >= 0; i--)
{
int digit = (value >> (bits_per_digit*i)) & 15;
if ( digit > 0 ) break;
}
for (; i >= 0; i--)
{
int digit = (value >> (bits_per_digit*i)) & 15;
result[len++] = digits[digit];
}
For decimals there is a nice idea to use a static array big enough to record the numbers in the reversed order, see here
Integer-to-ASCII needs to convert data from a standard integer type
into an ASCII string.
All operations need to be performed using pointer arithmetic, not array indexing.
The number you wish to convert is passed in as a signed 32-bit integer.
You should be able to support bases 2 to 16 by specifying the integer value of the base you wish to convert to (base).
Copy the converted character string to the uint8_t* pointer passed in as a parameter (ptr).
The signed 32-bit number will have a maximum string size (Hint: Think base 2).
You must place a null terminator at the end of the converted c-string Function should return the length of the converted data (including a negative sign).
Example my_itoa(ptr, 1234, 10) should return an ASCII string length of 5 (including the null terminator).
This function needs to handle signed data.
You may not use any string functions or libraries.
.
uint8_t my_itoa(int32_t data, uint8_t *ptr, uint32_t base){
uint8_t cnt=0,sgnd=0;
uint8_t *tmp=calloc(32,sizeof(*tmp));
if(!tmp){exit(1);}
else{
for(int i=0;i<32;i++){
if(data<0){data=-data;sgnd=1;}
if(data!=0){
if(data%base<10){
*(tmp+i)=(data%base)+48;
data/=base;
}
else{
*(tmp+i)=(data%base)+55;
data/=base;
}
cnt++;
}
}
if(sgnd){*(tmp+cnt)=45;++cnt;}
}
my_reverse(tmp, cnt);
my_memcopy(tmp,ptr,cnt);
return ++cnt;
}
ASCII-to-Integer needs to convert data back from an ASCII represented string into an integer type.
All operations need to be performed using pointer arithmetic, not array indexing
The character string to convert is passed in as a uint8_t * pointer (ptr).
The number of digits in your character set is passed in as a uint8_t integer (digits).
You should be able to support bases 2 to 16.
The converted 32-bit signed integer should be returned.
This function needs to handle signed data.
You may not use any string functions or libraries.
.
int32_t my_atoi(uint8_t *ptr, uint8_t digits, uint32_t base){
int32_t sgnd=0, rslt=0;
for(int i=0; i<digits; i++){
if(*(ptr)=='-'){*ptr='0';sgnd=1;}
else if(*(ptr+i)>'9'){rslt+=(*(ptr+i)-'7');}
else{rslt+=(*(ptr+i)-'0');}
if(!*(ptr+i+1)){break;}
rslt*=base;
}
if(sgnd){rslt=-rslt;}
return rslt;
}
I don't know about good, but this is my implementation that I did while learning C
static int ft_getintlen(int value)
{
int l;
int neg;
l = 1;
neg = 1;
if (value < 0)
{
value *= -1;
neg = -1;
}
while (value > 9)
{
l++;
value /= 10;
}
if (neg == -1)
{
return (l + 1);
}
return (l);
}
static int ft_isneg(int n)
{
if (n < 0)
return (-1);
return (1);
}
static char *ft_strcpy(char *dest, const char *src)
{
unsigned int i;
i = 0;
while (src[i] != '\0')
{
dest[i] = src[i];
i++;
}
dest[i] = src[i];
return (dest);
}
char *ft_itoa(int n)
{
size_t len;
char *instr;
int neg;
neg = ft_isneg(n);
len = ft_getintlen(n);
instr = (char *)malloc((sizeof(char) * len) + 1);
if (n == -2147483648)
return (ft_strcpy(instr, "-2147483648"));
if (!instr)
return (NULL);
if (neg == -1)
n *= -1;
instr[len--] = 0;
if (n == 0)
instr[len--] = 48;
while (n)
{
instr[len--] = ((n % 10) + 48);
n /= 10;
}
if (neg == -1)
instr[len] = '-';
return (instr);
}
This should work:
#include <string.h>
#include <stdlib.h>
#include <math.h>
char * itoa_alloc(int x) {
int s = x<=0 ? 1 ? 0; // either space for a - or for a 0
size_t len = (size_t) ceil( log10( abs(x) ) );
char * str = malloc(len+s + 1);
sprintf(str, "%i", x);
return str;
}
If you don't want to have to use the math/floating point functions (and have to link in the math libraries) you should be able to find non-floating point versions of log10 by searching the Web and do:
size_t len = my_log10( abs(x) ) + 1;
That might give you 1 more byte than you needed, but you'd have enough.
There a couple of suggestions I might make. You can use a static buffer and strdup to avoid repeatedly allocating too much memory on subsequent calls. I would also add some error checking.
char *itoa(int i)
{
static char buffer[12];
if (snprintf(buffer, sizeof(buffer), "%d", i) < 0)
return NULL;
return strdup(buffer);
}
If this will be called in a multithreaded environment, remove "static" from the buffer declaration.
This is chux's code without safety checks and the ifs. Try it online:
char* itostr(char * const dest, size_t const sz, int a, int const base) {
bool posa = a >= 0;
char buffer[sizeof a * CHAR_BIT + 1];
static const char digits[36] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
char* p = &buffer[sizeof buffer - 1];
do {
*(p--) = digits[abs(a % base)];
a /= base;
} while (a);
*p = '-';
p += posa;
size_t s = &buffer[sizeof(buffer)] - p;
memcpy(dest, p, s);
dest[s] = '\0';
return dest;
}
main()
{
int i=1234;
char stmp[10];
#if _MSC_VER
puts(_itoa(i,stmp,10));
#else
puts((sprintf(stmp,"%d",i),stmp));
#endif
return 0;
}

Resources