I'm working on a C project and I have some instructions of type:
add $1 $2 $3
So I'm reading the line as a string, parsing through it and have a corresponding integer for add, say - 2. Could anyone please tell me how I could convert this to binary in order to write it to a file?
The registers are 5 bits and the operation is 6 bits. The total will be 32 (the last 10 bits are unused).
So the registers are stored in say op[] = "2", char r1[] = "1", char r2[] = "2" etc (note that register number can be as high as 31). Could anyone give me an example for a function that would convert this to binary in the format 000010 00001 00010 00011 0000000000
The easiest way will be using a bit field:
struct code {
unsigned opcode : 6;
unisgned operand1 : 5;
unisgned operand2 : 5;
unisgned operand2 : 5;
} test_code;
Now you can simply assign to the different members:
test_code.opcode = 0x02;
test_code.operator1 = 0x01;
test_code.operator2 = 0x02;
test_code.operator3 = 0x03;
atoi(op) will give you 2, so you can just string it together
As far as putting it into that structure you want, just create a structure that has bitfields in it and place it in a union with a 32 bit unsigned integer, and you can take the value directly.
Quick pseudo code
const char* int32_to_bin(int32_t value) {
int pos = 0;
char output[33];
while(value > 0) {
if (value & 1) output[pos++] = '1';
else output[pos++] = '0';
value >>= 1;
}
output[pos] = 0;
return output;
}
What you're asking is a C question but you tag as objective-c so I'll cover both.
C:
These variables such as op[].. are really defined as like char op[..] (not sure if your length), which are C strings of course.
So the operation is 6 bit and each register is 5 bits, that's 15 + 6 = 21 bit word. I'll assume the top 11 bits are zeroes.
What you need are 4 more variables that are integers:
int opint; int r0int; int r1int; int r2int;
You want the integer value of those strings to go in to those integers. You can use atoi() to achieve this, such as opint = atoi(op);
Now that you've got your integers derived from strings, you need to create the 32 bit word. The easiest way to do this is to first create one integer that holds those bits in the right place. You can do it (assuming 32 bit integers) like this:
int word = 0;
word |= ((opint & 0x3f) << (21 - 6))) |
(r0int & 0x1f) << (21 - 11)) |
(r1int & 0x1f) << (21 - 16))
(r2int & 0x1f));
Where the << is shifting in to place. After this, you should have the word integer properly formed. Next, just turn it in to a binary representation (if that's even necessary? Not sure on your application)
Objective-C
The only difference is that I assume those strings start as NSString *op; etc. In this case, get the integers by opint = [op intValue];, then form the word as I describe.
This code will convert a string to binary
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
char* stringToBinary(char* s) ;
void binchar(char output[], char character);
void itoa(int value, char* str, int base);
int main(int argc, char const *argv[])
{
printf("%s\n", stringToBinary("asdf") );
}
char* stringToBinary(char* s) {
if(s == NULL) return 0; /* no input string */
size_t len = strlen(s);
char *binary = malloc(len*8 + 1); // each char is one byte (8 bits) and + 1 at the end for null terminator
int i =0;
char output[9];
for(i=0; i< len; i++){
binchar(output, s[i]);
strcat(binary,output);
}
return binary;
}
void binchar(char output[], char character)
{
//char output[9];
itoa(character, output, 2);
}
// since GCC is not fully supporting itoa function here is its implementaion
// itoa implementation is copied from here http://www.strudel.org.uk/itoa/
void itoa(int value, char* str, int base) {
static char num[] = "0123456789abcdefghijklmnopqrstuvwxyz";
char* wstr=str;
int sign;
// Validate base
if (base<2 || base>35){ *wstr='\0'; return; }
// Take care of sign
if ((sign=value) < 0) value = -value;
// Conversion. Number is reversed.
do *wstr++ = num[value%base]; while(value/=base);
if(sign<0) *wstr++='-';
*wstr='\0';
// Reverse string
void strreverse(char* begin, char* end);
strreverse(str,wstr-1);
}
void strreverse(char* begin, char* end) {
char aux;
while(end>begin)
aux=*end, *end--=*begin, *begin++=aux;
}
Related
Hi I am pretty new to coding and I really need help.
Basically I have a decimal value and I converted it to a binary value.
Using this method
long decimalToBinary(long n)
{
int remainder;
long binary = 0, i = 1;
while(n != 0)
{
remainder = n%2;
n = n/2;
binary= binary + (remainder*i);
i = i*10;
}
return binary;
}
And I want to give each character of the binary into it's own space inside an array. However, I can't seem to save digits from the return values in my string array. I think it has something to do with converting the long to string but I could be wrong! Here is what I have so far.
I do not want to use sprintf(); I do not wish to print the value I just want the value stored inside it so that the if conditions can read it. Any help would be appreciated!
int decimalG = 24;
long binaryG = decimalToBinary(decimalG);
char myStringG[8] = {binaryG};
for( int i = 0; i<8; i++)
{
if (myStringG[i] == '1' )
{
T1();
}
else
{
T0();
}
}
In this case since the decimal is 24, the binary would be 11000 therefore it should execute the the function T1(); 2 times and T0() 6 times. But it doesn't do that and I can't seem to find the answer to store the saved values in the array.
*Ps the Itoa(); function is also not an option. Thanks in Advance! :)
As the post is tagged arm using malloc() might not be the best approach, although the simplest. If you insist on using arrays:
#include <stdio.h>
#include <stdlib.h>
int decimalToBinary(long n, char out[], int len)
{
long remainder;
// C arrays are zero based
len--;
// TODO: check if the input is reasonable
while (n != 0) {
// pick a bit
remainder = n % 2;
// shift n one bit to the right
// It is the same as n = n/2 but
// is more telling of what you are doing:
// shifting the whole thing to the right
// and drop the least significant bit
n >>= 1;
// Check boundaries! Always!
if (len < 0) {
// return zero for "Fail"
return 0;
}
// doing the following four things at once:
// cast remainder to char
// add the numerical value of the digit "0"
// put it into the array at place len
// decrement len
out[len--] = (char) remainder + '0';
}
// return non-zero value for "All OK"
return 1;
}
// I don't know what you do here, but it
// doesn't matter at all for this example
void T0()
{
fputc('0', stdout);
}
void T1()
{
fputc('1', stdout);
}
int main()
{
// your input
int decimalG = 24;
// an array able to hold 8 (eight) elements of type char
char myStringG[8];
// call decimalToBinary with the number, the array and
// the length of that array
if (!decimalToBinary(decimalG, myStringG, 8)) {
fprintf(stderr, "decimalToBinary failed\n");
exit(EXIT_FAILURE);
}
// Print the whole array
// How to get rid of the leading zeros is left to you
for (int i = 0; i < 8; i++) {
if (myStringG[i] == '1') {
T1();
} else {
T0();
}
}
// just for the optics
fputc('\n', stdout);
exit(EXIT_SUCCESS);
}
Computing the length needed is tricky, but if you know the size of long your Micro uses (8, 16, 32, or even 64 bit these days) you can take that as the maximum size for the array. Leaves the leading zeros but that should not be a problem, or is it?
To achieve your goal, you don't have to convert a decimal value to binary:
unsigned decimalG = 24; // Assumed positive, for negative values
// have implementation-defined representation
for (; decimalG; decimalG >>= 1) {
if(decimalG & 1) {
// Do something
} else {
// Do something else
}
}
Or you can use a union, but I'm not sure whether this approach is well defined by the standard.
If you stick to writing decimalToBinary, note that you'll have to use an array:
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
char *decimalToBinary(unsigned n);
int
main(void) {
int decimalG = 15;
char *binary = decimalToBinary(decimalG);
puts(binary);
free(binary);
}
char *
decimalToBinary(unsigned n) {
// Don't forget to free() it after use!!!
char *binary = malloc(sizeof(unsigned) * CHAR_BIT + 1);
if(!binary) return 0;
size_t i;
for (i = 0; i < sizeof(unsigned) * CHAR_BIT; i++) {
binary[i] = '0' + ((n >> i) & 1); // in reverse order
}
binary[i] = 0;
return binary;
}
Use the itoa (integer-to-ascii) function.
http://www.cplusplus.com/reference/cstdlib/itoa/
EDIT: Correction:
Don't be an idiot, use the itoa (integer-to-ascii) function.
http://www.cplusplus.com/reference/cstdlib/itoa/
EDIT:
Maybe I wasn't clear enough. I saw the line that said:
*Ps the Itoa(); function is also not an option.
This is completely unreasonable. You want to reinvent the wheel, but you want someone else to do it? What do you possibly have against itoa? It's part of the standard. It will always exist, no matter what platform you're targeting or version of C that you're using.
I want to give each character of the binary into it's own
space inside an array. However, I can't seem to save digits
from the return values in my string array.
There are a number of ways to approach this, if I understand what you are asking. First, there is no need to actually store the results of the binary representation of your number in an array to call T1() or T0() based on the bit value of any given bit that makes up the number.
Take your example 24 (binary 11000). If I read your post correctly you state:
In this case since the decimal is 24, the binary
would be 11000 therefore it should execute the the
function T1() 2 times and T0() 6 times.
(I'm not sure where you get 6 times, it looks like you intended that T0() would be called 3 times)
If you have T0 and T1 defined, for example, to simply let you know when they are called, e.g.:
void T1 (void) { puts ("T1 called"); }
void T0 (void) { puts ("T0 called"); }
You can write a function (say named callt) to call T1 for each 1-bit and T0 for each 0-bit in a number simply as follows:
void callt (const unsigned long v)
{
if (!v) { putchar ('0'); return; };
size_t sz = sizeof v * CHAR_BIT;
unsigned long rem = 0;
while (sz--)
if ((rem = v >> sz)) {
if (rem & 1)
T1();
else
T0();
}
}
So far example if you passed 24 to the function callt (24), the output would be:
$ ./bin/dec2bincallt
T1 called
T1 called
T0 called
T0 called
T0 called
(full example provided at the end of answer)
On the other hand, if you really do want to give each character of the binary into it's own space inside an array, then you would simply need to pass an array to capture the bit values (either the ASCII character representations for '0' and '1', or just 0 and 1) instead of calling T0 and T1 (you would also add a few lines to handle v=0 and also the nul-terminating character if you will use the array as a string) For example:
/** copy 'sz' bits of the binary representation of 'v' to 's'.
* returns pointer to 's', on success, empty string otherwise.
* 's' must be adequately sized to hold 'sz + 1' bytes.
*/
char *bincpy (char *s, unsigned long v, unsigned sz)
{
if (!s || !sz) {
*s = 0;
return s;
}
if (!v) {
*s = '0';
*(s + 1) = 0;
return s;
}
unsigned i;
for (i = 0; i < sz; i++)
s[i] = (v >> (sz - 1 - i)) & 1 ? '1' : '0';
s[sz] = 0;
return s;
}
Let me know if you have any additional questions. Below are two example programs. Both take as their first argument the number to convert (or to process) as binary (default: 24 if no argument is given). The first simply calls T1 for each 1-bit and T0 for each 0-bit:
#include <stdio.h>
#include <stdlib.h>
#include <limits.h> /* for CHAR_BIT */
void callt (const unsigned long v);
void T1 (void) { puts ("T1 called"); }
void T0 (void) { puts ("T0 called"); }
int main (int argc, char **argv) {
unsigned long v = argc > 1 ? strtoul (argv[1], NULL, 10) : 24;
callt (v);
return 0;
}
void callt (const unsigned long v)
{
if (!v) { putchar ('0'); return; };
size_t sz = sizeof v * CHAR_BIT;
unsigned long rem = 0;
while (sz--)
if ((rem = v >> sz)) {
if (rem & 1) T1(); else T0();
}
}
Example Use/Output
$ ./bin/dec2bincallt
T1 called
T1 called
T0 called
T0 called
T0 called
$ ./bin/dec2bincallt 11
T1 called
T0 called
T1 called
T1 called
The second stores each bit of the binary representation for the value as a nul-terminated string and prints the result:
#include <stdio.h>
#include <stdlib.h>
#define BITS_PER_LONG 64 /* define as needed */
char *bincpy (char *s, unsigned long v, unsigned sz);
int main (int argc, char **argv) {
unsigned long v = argc > 1 ? strtoul (argv[1], NULL, 10) : 24;
char array[BITS_PER_LONG + 1] = "";
printf (" values in array: %s\n", bincpy (array, v, 16));
return 0;
}
/** copy 'sz' bits of the binary representation of 'v' to 's'.
* returns pointer to 's', on success, empty string otherwise.
* 's' must be adequately sized to hold 'sz + 1' bytes.
*/
char *bincpy (char *s, unsigned long v, unsigned sz)
{
if (!s || !sz) {
*s = 0;
return s;
}
if (!v) {
*s = '0';
*(s + 1) = 0;
return s;
}
unsigned i;
for (i = 0; i < sz; i++)
s[i] = (v >> (sz - 1 - i)) & 1 ? '1' : '0';
s[sz] = 0;
return s;
}
Example Use/Output
(padding to 16 bits)
$ ./bin/dec2binarray
values in array: 0000000000011000
$ ./bin/dec2binarray 11
values in array: 0000000000001011
So I have a binary representation of a number as a character array. What I need to do is shift this representation to the right by 11 bits.
For example,
I have a char array which is currently storing this string: 11000000111001
After performing a bitwise shift, I will get 110 with some zeros before it.
I tried using this function but it gave me strange output:
char *shift_right(unsigned char *ar, int size, int shift)
{
int carry = 0; // Clear the initial carry bit.
while (shift--) { // For each bit to shift ...
for (int i = size - 1; i >= 0; --i) { // For each element of the array from high to low ...
int next = (ar[i] & 1) ? 0x80 : 0; // ... if the low bit is set, set the carry bit.
ar[i] = carry | (ar[i] >> 1); // Shift the element one bit left and addthe old carry.
carry = next; // Remember the old carry for next time.
}
}
return ar;
}
Any help on this would be very much appreciated; let me know if I'm not being clear.
They are just characters...
char *shift_right(unsigned char *ar, int size, int shift)
{
memmove(&ar[shift], ar, size-shift);
memset(ar, '0', shift);
return(ar);
};
Or, convert the string to a long-long, shift it, then back to a string:
char *shift_right(char *ar, int size, int shift)
{
unsigned long long x;
char *cp;
x=strtoull(ar, &cp, 2); // As suggested by 'Don't You Worry Child'
x = x >> shift;
while(cp > ar)
{
--cp;
*cp = (1 & x) ? '1' : '0';
x = x >> 1;
}
return(ar);
};
If you really want to use bitwise shifting, then you can't do it on a string. Simply not Possible!!
You have to convert it to integer (use strtol for that) then do bitwise shifting. After that, convert it back to string (no standard library function for that, use for loop).
I would advise to keep the code simple and readable.
#include <stdio.h>
#include <stdlib.h>
void shift_right (char* dest, const char* source, int shift_n)
{
uint16_t val = strtoul(source, NULL, 2);
val >>= shift_n;
for(uint8_t i=0; i<16; i++)
{
if(val & 0x8000) // first item of the string is the MSB
{
dest[i] = '1';
}
else
{
dest[i] = '0';
}
val <<= 1; // keep evaluating the number from MSB and down
}
dest[16] = '\0';
}
int main()
{
const char str [16+1] = "0011000000111001";
char str_shifted [16+1];
puts(str);
shift_right(str_shifted, str, 11);
puts(str_shifted);
return 0;
}
This question already has answers here:
Conversion of Char to Binary in C
(3 answers)
Closed 9 years ago.
I want a really basic way to print out the binary representation of a char. I can't seem to find any example code anywhere.
I assumed you could do it in a few lines but everything I find is overly long and complex using lots of functions I haven't used before. atoi comes up a lot but it's not standard.
Is there a simple function or simple way of writing a function to take a char variable and then print out a binary representation?
Eg: char 'x' is the argument taken in by the function and "x is 0111 1000" is printed out.
It's for a school assignment where I must take user input of a string and print out the string in binary. I just need to get the basics of converting a char to binary but i'm struggling at the moment.
What you'd want to do is use bitwise operators to mask the bits one by one and print them to the standard output.
A char in C is guaranteed to be 1 byte, so loop to 8.
Within each iteration, mask off the highest order bit.
Once you have it, just print it to standard output.
Here is a quick stab which hopefully makes sense...
main() {
char a = 10;
int i;
for (i = 0; i < 8; i++) {
printf("%d", !!((a << i) & 0x80));
}
printf("\n");
return 0;
}
CodePad.
In order to get the bit, I shift to the left to get the numbered bit (highest to lowest so printing it is easy) and then mask it off. I then translate it to 0 or 1 with !!.
you can use this method
const char *byte_to_binary(int x)
{
static char b[9];
b[0] = '\0';
int z;
for (z = 128; z > 0; z >>= 1)
{
strcat(b, ((x & z) == z) ? "1" : "0");
}
return b;
}
to get the binary representation and print with it
for example
printf("%s\n", byte_to_binary(15));
void printBits(size_t const size, void const * const ptr)
{
unsigned char *b = (unsigned char*) ptr;
unsigned char byte;
int i, j;
for (i=size-1;i>=0;i--)
{
for (j=7;j>=0;j--)
{
byte = b[i] & (1<<j);
byte >>= j;
printf("%u", byte);
}
}
puts("");
}
int main(int argv, char* argc[])
{
int i = 23;
uint ui = UINT_MAX;
float f = 23.45f;
printBits(sizeof(i), &i);
printBits(sizeof(ui), &ui);
printBits(sizeof(f), &f);
return 0;
}
Try this:-
#include <limits.h>
char *chartobin ( unsigned char c )
{
static char bin[CHAR_BIT + 1] = {0};
int i;
for( i = CHAR_BIT - 1; i >= 0; i-- )
{
bin[i] = (c % 2) + '0';
c /= 2;
}
return bin;
}
Is is possible to convert int to "string" in C just using casting? Without any functions like atoi() or sprintf()?
What I want would be like this:
int main(int argc, char *argv[]) {
int i = 500;
char c[4];
c = (char)i;
i = 0;
i = (int)c;
}
The reason is that I need to generate two random ints (0 to 500) and send both as one string in a message queue to another process. The other process receives the message and do the LCM.
I know how to do with atoi() and itoa(). But my teachers wants just using cast.
Also, why isn't the following possible to compile?
typedef struct
{
int x;
int y;
} int_t;
typedef struct
{
char x[sizeof(int)];
char y[sizeof(int)];
} char_t;
int main(int argc, char *argv[])
{
int_t rand_int;
char_t rand_char;
rand_int.x = (rand() % 501);
rand_int.y = (rand() % 501);
rand_char = (char_t)rand_int;
}
Of course it's not possible, because an array is an object and needs storage. Casts result in values, not objects. Some would say the whole point/power of C is that you have control over the storage and lifetime of objects.
The proper way to generate a string containing a decimal representation of an integer is to create storage for it yourself and use snprintf:
char buf[sizeof(int)*3+2];
snprintf(buf, sizeof buf, "%d", n);
You have to convert 500 to "500".
"500" is the same as '5' then '0' then '0' then 0. The last element 0 is the null terminator of a string.
500 is equal to 5 * 100 + 0 * 10 + 0 * 1. You have to do some math here. Basically you have to use the / operator.
Then this could be also useful: '5' is the same as '0' + 5.
Without giving away an exact coded answer, what you'll want to do is loop through each digit of the integer (by computing its remainder modulo 10 via the % operator), and then add its value to the ASCII value of '0', casting the result back to a char, and placing that result in a null-terminated string.
An example which pretends like implicit casts don't exist might look like this:
char c = (char) ( ((int) '0') + 5 ); // c should now be '5'.
You can determine the length of the resulting string by computing the log base 10 of the number, or by simply allocating it dynamically as you go using realloc().
Casting is a horrible way to do this due to endianness, but here is an example anyhow - there are some occasions where it is useful (unions work better these days though, due to compiler handling of these types of casts).
#include <stdio.h> //for printf
#define INT(x) ((int*)(x)) //these are not endian-safe methods
#define CHAR(x) ((char*)(x))
int main(void)
{
int *x=INT(&"HI !");
printf("%X\n",*x); //look up the ascii and note the order
printf("%s\n",CHAR(x));
return 0;
}
For an int with a value <500, if the most significant byte comes first, then you get a "string" (pointer to a char array) of "" (or {0}) but if the endianness is LSB first (x86 is little endian) then you would get a usable 3 byte "string" char* (not necessarily human readable characters) but there is no guarantee that there will be a zero byte in an integer and since all you have is a pointer to the address where the int was stored, if you were to run normal string functions on it, they would go past the end of the original int into no-mans-land (in small test programs it will often be environment variables) ... anyhow for more portability you can use network byte order (which for little endian is a no-op):
#include <arpa/inet.h>
uint32_t htonl(uint32_t hostlong);
uint16_t htons(uint16_t hostshort);
uint32_t ntohl(uint32_t netlong);
uint16_t ntohs(uint16_t netshort);
These functions just byteswap as necessary to get network byte order. On your x86 they will be optimized away, so you might as well use them for portability.
Just because it is not listed yet: Here a way to convert int to char array with variable size allocation by using snprintf:
int value = 5
// this will just output the length which is to expect
int length = snprintf( NULL, 0, "%d", value );
char* valueAsString = malloc( length + 1 );// one more for 0-terminator
snprintf( valueAsString, length + 1, "%d", value );
get the number of divisions then add one by one to your buffer
char *int2str(int nb) {
int i = 0;
int div = 1;
int cmp = nb;
char *nbr = malloc(sizeof(char) * 12);
if (!nbr)
return (NULL);
if (nb < 0)
nbr[i++] = '-';
while ((cmp /= 10) != 0)
div = div * 10;
while (div > 0) {
nbr[i++] = abs(nb / div) + 48;
nb = nb % div;
div /= 10;
}
nbr[i] = '\0';
return (nbr);
}
Even more compact:
char *lotaa(long long nb) {
int size = (nb ? floor(log10(llabs(nb))) : 0) + (nb >= 0 ? 1 : 2);
char *str = malloc(size + 1);
str[0] = '-';
str[size] = 0;
for(nb = llabs(nb); nb > 0 || (size > 0 && str[1] == 0); nb /= 10)
str[--size] = '0' + nb % 10;
return (str);
}
I need some help in cutting a string into a pair of characters and then converting it to HEX format.
eg. char *ADDRESS = "0011AABB";
I want the above address to be split "00", "11", "AA" and "BB" and after that is split converting it to 0x00, 0x11, 0xAA and 0xBB which will be stored in an unsigned char.
Thanks
You suggest that your address-strings are of type char *, but I assume you
want a solution that gaurantees not to trash them, i.e. one that takes them
as type char const *.
I assume also that the addresses they can represent are 32-bit, as per the
example char *ADDRESS = "0011AABB".
In that case a solution that does exactly what you ask for in an obvious
way is:
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#define ADDRESS_BYTES 4 // Bytes in an address
/* Convert a hex numeral encoding an address to the unsigned chars that it
encodes
`addr_str` - in: a hex numeral encoding an address
`bytes` - out: The unsigned char bytes of the address, high-byte first.
return - The number of bytes output: ADDRESS_BYTES if `addr_str` is a
valid hex numeral, otherwise 0.
*/
unsigned address_bytes(char const *addr_str, unsigned char bytes[ADDRESS_BYTES])
{
char buf[3] = {0}; // 3-bytes of 0-filled working space.
char *endp;
unsigned i = 0;
unsigned j = 0;
assert(strlen(addr_str) == 2 * ADDRESS_BYTES); // Insist on 8-char string
for ( ;i < 2 * ADDRESS_BYTES; ++j) { // Take chars 2 at a time
buf[i % 2] = addr_str[i]; ++i; // Next char to buf[0]
buf[i % 2] = addr_str[i]; ++i; // Next + 1 char to buf[1]
// Convert buffer from hex numeral to unsigned char in next byte.
bytes[j] = (unsigned char)strtoul(buf,&endp,16);
if (*endp) { // Check for invalid hex.
return 0; // Failure
}
}
return j; // = 4
}
// A test program...
#include <stdio.h>
int main(void)
{
unsigned char bytes[ADDRESS_BYTES];
char const * address = "0011AABB";
unsigned done_bytes = address_bytes(address,bytes);
printf("The %d valid address bytes are (hex):",done_bytes);
unsigned i = 0;
for ( ;i < done_bytes; ++i) {
printf(" %02x",(unsigned)bytes[i]);
}
putchar('\n');
return 0;
}
However, exactly what you ask for is not an efficient solution.
You can accomplish your goal by simply converting an 8-char hex-numeral encoding
a 32-bit address into the encoded 32-bit unsigned integer, and then getting the
4 unsigned char bytes that compose this unsigned integer in high-byte-first order.
Converting the hex numeral to a uint32_t can be done with a single call to
strtoul. Then getting the unsigned char bytes of this uint32_t in
high-byte-first order is simply a matter of knowing whether that uint32_t is
big-endian or little-endian. So here is a better solution:
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <inttypes.h>
unsigned address_bytes(char const *address, unsigned char bytes[ADDRESS_BYTES])
{
union {
uint32_t i;
char c[ADDRESS_BYTES];
} endian_tester = {0x01020304};
int big_endian = endian_tester.c[0] == 1;
uint32_t addr = 1;
char *endp;
assert(strlen(address) == 2 * ADDRESS_BYTES);
addr = (uint32_t)strtoul(address,&endp,16);
if (*endp) {
return 0;
}
endp = (char *)&addr;
if (big_endian) {
// The least significant byte is highest in memory
bytes[0] = endp[0];
bytes[1] = endp[1];
bytes[2] = endp[2];
bytes[3] = endp[3];
} else {
// The least significant byte is lowest in memory
bytes[0] = endp[3];
bytes[1] = endp[2];
bytes[2] = endp[1];
bytes[3] = endp[0];
}
return ADDRESS_BYTES;
}
And if you are able and willing to make the non-portable assumption that
the address-strings are encoded in ASCII, then you could avoid calling
strtoul entirely and compute the output bytes directly from the input
chars, using the characters' positions in the ASCII collating sequence
to get the unsigned char values that they encode:
#include <assert.h>
#include <string.h>
#include <ctype.h>
unsigned address_bytes(char const *address, unsigned char bytes[ADDRESS_BYTES])
{
unsigned i = 0;
unsigned j = 0;
assert(strlen(address) == 2 * ADDRESS_BYTES);
for ( ; i < 2 * ADDRESS_BYTES; ++i,++j) {
// First character of a pair..
if (isdigit(address[i])) {
// A decimal digit encodes its ASCII value - '0'
bytes[j] = address[i] - '0';
} else if (isxdigit(address[i])) {
// A hex digit encodes 10 + its ASCII value - 'A'
bytes[j] = 10 + address[i] - 'A';
} else {
return 0; // Invalid hex
}
++i; // Second character of a pair...
bytes[j] <<= 4; // Shift the 1st character's value 1 nibble high
// OR the 2nd character's value into the low nibble of the byte...
if (isdigit(address[i])) {
bytes[j] |= address[i] - '0';
} else if (isxdigit(address[i])) {
bytes[j] |= 10 + address[i] - 'A';
} else {
return 0; // Invalid hex
}
}
return ADDRESS_BYTES;
}
The last might be the fastest if that matters.
Built and tested with GCC 4.7.2 and clang 3.2