I am communicating with a board that requires I send it 2 signed byte.
explaination of data type
what I need to send
Would I need to bitwise manipulation or can I just send 16bit integer as the following?
int16_t rc_min_angle = -90;
int16_t rc_max_angle = 120;
write(fd, &rc_min_angle, 2);
write(fd, &rc_max_angle, 2);
int16_t has the correct size but may or may not be the correct endianness. To ensure little endian order use macros such as the ones from endian.h:
#define _BSD_SOURCE
#include <endian.h>
...
uint16_t ec_min_angle_le = htole16(ec_min_angle);
uint16_t ec_max_angle_le = htole16(ec_max_angle);
write(fd, &ec_min_angle_le, 2);
write(fd, &ec_max_angle_le, 2);
Here htole16 stands for "host to little endian 16-bit". It converts from the host machine's native endianness to little endian: if the machine is big endian it swaps the bytes; if it's little endian it's a no-op.
Also note that you have you pass the address of the values to write(), not the values themselves. Sadly, we cannot inline the calls and write write(fd, htole16(ec_min_angle_le), 2).
If endian functions are not available, simply write the bytes in little endian order.
Perhaps with a compound literal.
// v------------- compound literal ---------------v
write(fd, &(uint8_t[2]){rc_min_angle%256, ec_min_angle/256}, 2);
write(fd, &(uint8_t[2]){rc_max_angle%256, ec_max_angle/256}, 2);
// ^-- LS byte ---^ ^-- MS byte ---^
// &
I added the & assuming the write() is a like write(2) - Linux.
If you don't need to have it type-generic, you can simply do:
#include <stdint.h>
#include <unistd.h>
/*most optimizers will turn this into `return 1;`*/
_Bool little_endian_eh() { uint16_t x = 1; return *(char *)&x; }
void swap2bytes(void *X) { char *x=X,t; t=x[0]; x[0]=x[1]; x[1]=t; }
int main()
{
int16_t rc_min_angle = -90;
int16_t rc_max_angle = 120;
//this'll very likely be a noop since most machines
//are little-endian
if(!little_endian_eh()){
swap2bytes(&rc_min_angle);
swap2bytes(&rc_max_angle);
}
//TODO error checking on write calls
int fd =1;
write(fd, &rc_min_angle, 2);
write(fd, &rc_max_angle, 2);
}
To send little-endian data, you can just generate the bytes manually:
int write_le(int fd, int16_t val) {
unsigned char val_le[2] = {
val & 0xff, (uint16_t) val >> 8
};
int nwritten = 0, total = 2;
while (nwritten < total) {
int n = write(fd, val_le + nwritten, total - nwritten);
if (n == -1)
return nwritten > 0 ? nwritten : -1;
nwritten += n;
}
return nwritten;
}
A good compiler will recognize that the code does nothing and compile the bit manipulation to no-op on a little-endian platform. (See e.g. gcc generating the same code for the variant with and without the bit-twiddling.)
Note also that you shouldn't ignore the return value of write() - not only can it encounter an error, it can also write less than you gave it to, in which case you must repeat the write.
Related
I'm confused by one line of code in an implementation of MD5,
void MD5_Update(MD5_CTX *ctx, const void *data, unsigned long size)
{
MD5_u32plus saved_lo;
unsigned long used, available;
saved_lo = ctx->lo;
if ((ctx->lo = (saved_lo + size) & 0x1fffffff) < saved_lo)
ctx->hi++;
ctx->hi += size >> 29;
used = saved_lo & 0x3f;
if (used)
{
available = 64 - used;
if (size < available)
{
memcpy(&ctx->buffer[used], data, size);
return;
}
memcpy(&ctx->buffer[used], data, available);
data = (const unsigned char *)data + available;
size -= available;
body(ctx, ctx->buffer, 64);
}
if (size >= 64)
{
data = body(ctx, data, size & ~(unsigned long)0x3f);
size &= 0x3f;
}
memcpy(ctx->buffer, data, size);
}
The question line is if ((ctx->lo = (saved_lo + size) & 0x1fffffff) < saved_lo), it seems the 'size' counts bytes, but the 'ctx->lo' and 'saved_lo' count bits. Why add them together? There are also some similar codes in Github, and also some projects use these code. So anyone can give some explanation?
The remarks about "bit counters" are likely misleading - ctx->hi and ctx->lo count bytes, just like size does.
You correctly notice that you're just adding size (bytes) to ctx->lo (and then checking for overflow/propagating overflow into ctx->hi). The overflow check is pretty simple - lo is used as a 29-bit integer, and if the result after adding/masking is less than the original value, then overflow occurred.
The checks around used are also evidence for ctx->lo and ctx->hi being byte counters -- body processes data 64 bytes at a time, and the lo counter is ANDed with 0x3F (i.e. 63).
I'm trying to convert a uint32_t from network byte order to host format. I'm reading from a tcp connection 4 bytes that I store in the buffer like this:
ssize_t read = 0;
char *file_buf;
size_t fb_size = 4 * sizeof(char);
file_buf = malloc(fb_size);
read = recv(socket_file_descriptor,file_buf,fb_size,0);
so I store the number in file_buf but I want a number, how can I do this?
This looks straightforward:
ssize_t read = 0;
uint32_t myInteger; // Declare a 32-bit uint.
// Pass a pointer to the integer, and the size of the integer.
read = recv(socket_file_descriptor,&myInteger,sizeof(myInteger),0);
myInteger = ntohl(myInteger); // Change from Network order to Host order.
Here's how I would do it. Note the use of ntohl() to convert the data from network-endian to host-endian form:
#include <stdio.h>
#include <stdint.h>
#include <arpa/inet.h>
#include <sys/socket.h>
[...]
char file_buf[4];
if (recv(socket_file_descriptor,file_buf,fb_size,0) == sizeof(file_buf))
{
uint32_t * p = (uint32_t *) file_buf;
uint32_t num = ntohl(*p);
printf("The number is %u\n", num);
}
else printf("Short read or network error?\n");
Some OSes (Linux with glibc, BSDs) have size-specific endianness conversion functions too, to supplement ntohl() and ntohs().
#include <endian.h> // Might need <sys/endian.h> instead on some BSDs
void your_function(uint32_t bigend_int) {
uint32_t host_int = be32toh(bigend_int);
}
Edit:
But since you seem to have easy access to the individual bytes, there's always Rob Pike's preferred approach:
uint32_t host_int = (file_buf[3]<<0) | (file_buf[2]<<8) | (file_buf[1]<<16) | (file_buf[0]<<24);
I need to convert a short value from the host byte order to little endian. If the target was big endian, I could use the htons() function, but alas - it's not.
I guess I could do:
swap(htons(val))
But this could potentially cause the bytes to be swapped twice, rendering the result correct but giving me a performance penalty which is not alright in my case.
Here is an article about endianness and how to determine it from IBM:
Writing endian-independent code in C: Don't let endianness "byte" you
It includes an example of how to determine endianness at run time ( which you would only need to do once )
const int i = 1;
#define is_bigendian() ( (*(char*)&i) == 0 )
int main(void) {
int val;
char *ptr;
ptr = (char*) &val;
val = 0x12345678;
if (is_bigendian()) {
printf(“%X.%X.%X.%X\n", u.c[0], u.c[1], u.c[2], u.c[3]);
} else {
printf(“%X.%X.%X.%X\n", u.c[3], u.c[2], u.c[1], u.c[0]);
}
exit(0);
}
The page also has a section on methods for reversing byte order:
short reverseShort (short s) {
unsigned char c1, c2;
if (is_bigendian()) {
return s;
} else {
c1 = s & 255;
c2 = (s >> 8) & 255;
return (c1 << 8) + c2;
}
}
;
short reverseShort (char *c) {
short s;
char *p = (char *)&s;
if (is_bigendian()) {
p[0] = c[0];
p[1] = c[1];
} else {
p[0] = c[1];
p[1] = c[0];
}
return s;
}
Then you should know your endianness and call htons() conditionally. Actually, not even htons, but just swap bytes conditionally. Compile-time, of course.
Something like the following:
unsigned short swaps( unsigned short val)
{
return ((val & 0xff) << 8) | ((val & 0xff00) >> 8);
}
/* host to little endian */
#define PLATFORM_IS_BIG_ENDIAN 1
#if PLATFORM_IS_LITTLE_ENDIAN
unsigned short htoles( unsigned short val)
{
/* no-op on a little endian platform */
return val;
}
#elif PLATFORM_IS_BIG_ENDIAN
unsigned short htoles( unsigned short val)
{
/* need to swap bytes on a big endian platform */
return swaps( val);
}
#else
unsigned short htoles( unsigned short val)
{
/* the platform hasn't been properly configured for the */
/* preprocessor to know if it's little or big endian */
/* use potentially less-performant, but always works option */
return swaps( htons(val));
}
#endif
If you have a system that's properly configured (such that the preprocessor knows whether the target id little or big endian) you get an 'optimized' version of htoles(). Otherwise you get the potentially non-optimized version that depends on htons(). In any case, you get something that works.
Nothing too tricky and more or less portable.
Of course, you can further improve the optimization possibilities by implementing this with inline or as macros as you see fit.
You might want to look at something like the "Portable Open Source Harness (POSH)" for an actual implementation that defines the endianness for various compilers. Note, getting to the library requires going though a pseudo-authentication page (though you don't need to register to give any personal details): http://hookatooka.com/poshlib/
This trick should would: at startup, use ntohs with a dummy value and then compare the resulting value to the original value. If both values are the same, then the machine uses big endian, otherwise it is little endian.
Then, use a ToLittleEndian method that either does nothing or invokes ntohs, depending on the result of the initial test.
(Edited with the information provided in comments)
My rule-of-thumb performance guess is that depends whether you are little-endian-ising a big block of data in one go, or just one value:
If just one value, then the function call overhead is probably going to swamp the overhead of unnecessary byte-swaps, and that's even if the compiler doesn't optimise away the unnecessary byte swaps. Then you're maybe going to write the value as the port number of a socket connection, and try to open or bind a socket, which takes an age compared with any sort of bit-manipulation. So just don't worry about it.
If a large block, then you might worry the compiler won't handle it. So do something like this:
if (!is_little_endian()) {
for (int i = 0; i < size; ++i) {
vals[i] = swap_short(vals[i]);
}
}
Or look into SIMD instructions on your architecture which can do it considerably faster.
Write is_little_endian() using whatever trick you like. I think the one Robert S. Barnes provides is sound, but since you usually know for a given target whether it's going to be big- or little-endian, maybe you should have a platform-specific header file, that defines it to be a macro evaluating either to 1 or 0.
As always, if you really care about performance, then look at the generated assembly to see whether pointless code has been removed or not, and time the various alternatives against each other to see what actually goes fastest.
Unfortunately, there's not really a cross-platform way to determine a system's byte order at compile-time with standard C. I suggest adding a #define to your config.h (or whatever else you or your build system uses for build configuration).
A unit test to check for the correct definition of LITTLE_ENDIAN or BIG_ENDIAN could look like this:
#include <assert.h>
#include <limits.h>
#include <stdint.h>
void check_bits_per_byte(void)
{ assert(CHAR_BIT == 8); }
void check_sizeof_uint32(void)
{ assert(sizeof (uint32_t) == 4); }
void check_byte_order(void)
{
static const union { unsigned char bytes[4]; uint32_t value; } byte_order =
{ { 1, 2, 3, 4 } };
static const uint32_t little_endian = 0x04030201ul;
static const uint32_t big_endian = 0x01020304ul;
#ifdef LITTLE_ENDIAN
assert(byte_order.value == little_endian);
#endif
#ifdef BIG_ENDIAN
assert(byte_order.value == big_endian);
#endif
#if !defined LITTLE_ENDIAN && !defined BIG_ENDIAN
assert(!"byte order unknown or unsupported");
#endif
}
int main(void)
{
check_bits_per_byte();
check_sizeof_uint32();
check_byte_order();
}
On many Linux systems, there is a <endian.h> or <sys/endian.h> with conversion functions. man page for ENDIAN(3)
I have a piece of hardware that I'm trying to control via my computer's built-in SPI driver. The SPI driver is controlled via ioctl.
I can successfully drive the hardware from a small C program; but when I try to duplicate the C program in Ruby I run into problems.
Using IO#ioctl to set basic registers (with u32 and u8 ints) works fine (I know because I can also use ioctl to read back the values I set); but as soon as I try to set a complex struct, the program fails with
small.rb:51:in 'ioctl': Connection timed out # rb_ioctl - /dev/spidev32766.0 (Errno::ETIMEDOUT)
I might be running into trouble because the spi_ioc_transfer struct has two pointers to byte buffers but the pointers are typed as unsigned 64-bit ints even on 32-bit platforms -- necessitating a cast to (unsigned long) in C. I'm trying to replicate that in Ruby but am quite unsure of myself.
Below are the C program which works and the Ruby port which doesn't work. The do_latch functions are necessary so I can see the result in my hardware; but are probably not germane to this problem.
C (which works):
#include <stdint.h>
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <linux/spi/spidev.h>
int do_latch() {
int fd = open("/sys/class/gpio/gpio1014/value", O_RDWR);
write(fd, "1", 1);
write(fd, "0", 1);
close(fd);
}
int do_transfer(int fd, uint8_t *bytes, size_t len) {
uint8_t *rx_bytes = malloc(sizeof(uint8_t) * len);
struct spi_ioc_transfer transfer = {
.tx_buf = (unsigned long)bytes,
.rx_buf = (unsigned long)rx_bytes,
.len = len,
.speed_hz = 100000,
.delay_usecs = 0,
.bits_per_word = 8,
.cs_change = 0,
.tx_nbits = 0,
.rx_nbits = 0,
.pad = 0
};
if(ioctl(fd, SPI_IOC_MESSAGE(1), &transfer) < 1) {
perror("Could not send SPI message");
exit(1);
}
free(rx_bytes);
}
int main() {
int fd = open("/dev/spidev32766.0", O_RDWR);
uint8_t mode = 0;
ioctl(fd, SPI_IOC_WR_MODE, &mode);
uint8_t lsb_first = 0;
ioctl(fd, SPI_IOC_WR_LSB_FIRST, lsb_first);
uint32_t speed_hz = 100000;
ioctl(fd, SPI_IOC_WR_MAX_SPEED_HZ, speed_hz);
size_t data_len = 36;
uint8_t *tx_data = malloc(sizeof(uint8_t) * data_len);
memset(tx_data, 0xFF, data_len);
do_transfer(fd, tx_data, data_len);
do_latch();
sleep(2);
memset(tx_data, 0x00, data_len);
do_transfer(fd, tx_data, data_len);
do_latch();
free(tx_data);
close(fd);
return 0;
}
Ruby (which fails on the ioctl line in do_transfer):
SPI_IOC_WR_MODE = 0x40016b01
SPI_IOC_WR_LSB_FIRST = 0x40016b02
SPI_IOC_WR_BITS_PER_WORD = 0x40016b03
SPI_IOC_WR_MAX_SPEED_HZ = 0x40046b04
SPI_IOC_WR_MODE32 = 0x40046b05
SPI_IOC_MESSAGE_1 = 0x40206b00
def do_latch()
File.open("/sys/class/gpio/gpio1014/value", File::RDWR) do |file|
file.write("1")
file.write("0")
end
end
def do_transfer(file, bytes)
##########################################################################################
#begin spi_ioc_transfer struct (cat /usr/include/linux/spi/spidev.h)
#pack bytes into a buffer; create a new buffer (filled with zeroes) for the rx
tx_buff = bytes.pack("C*")
rx_buff = (Array.new(bytes.size) { 0 }).pack("C*")
#on 32-bit, the struct uses a zero-extended pointer for the buffers (so it's the same
#byte layout on 64-bit as well) -- so do some trickery to get the buffer addresses
#as 64-bit strings even though this is running on a 32-bit computer
tx_buff_pointer = [tx_buff].pack("P").unpack("L!")[0] #u64 (zero-extended pointer)
rx_buff_pointer = [rx_buff].pack("P").unpack("L!")[0] #u64 (zero-extended pointer)
buff_len = bytes.size #u32
speed_hz = 100000 #u32
delay_usecs = 0 #u16
bits_per_word = 8 #u8
cs_change = 0 #u8
tx_nbits = 0 #u8
rx_nbits = 0 #u8
pad = 0 #u16
struct_array = [tx_buff_pointer, rx_buff_pointer, buff_len, speed_hz, delay_usecs, bits_per_word, cs_change, tx_nbits, rx_nbits, pad]
struct_packed = struct_array.pack("QQLLSCCCCS")
#in C, I pass a pointer to the the structure; so mimic that here
struct_pointer_packed = [struct_packed].pack("P")
#end spi_ioc_transfer struct
##########################################################################################
file.ioctl(SPI_IOC_MESSAGE_1, struct_pointer_packed)
end
File.open("/dev/spidev32766.0", File::RDWR) do |file|
file.ioctl(SPI_IOC_WR_MODE, [0].pack("C"));
file.ioctl(SPI_IOC_WR_LSB_FIRST, [0].pack("C"));
file.ioctl(SPI_IOC_WR_MAX_SPEED_HZ, [0].pack("L"));
data_bytes = Array.new(36) { 0x00 }
do_transfer(file, data_bytes)
do_latch()
sleep(2)
data_bytes = []
data_bytes = Array.new(36) { 0xFF }
do_transfer(file, data_bytes)
do_latch()
end
I pulled the magic number constants out by having C print them (they're macros in C). I can validate that most of them work; I'm a little unsure about the ioctl message that fails (SPI_IOC_MESSAGE_1) since that doesn't work and it's a complicated macro. Still, I have no reason to think that it's incorrect and it's always the same when I look at it from C.
When I print out the structure in C and then print it out in Ruby, the only differences are in the buffer addresses, so if something's going wrong, that feels like the right place to look. But I've run out of things to try.
I can also print out the addresses in both versions and they look like what I would expect, 32 bits extended to 64 bits, and match the values in the structure (although the structure is little-endian -- this is an ARM).
Structure in C (that works):
60200200 00000000 a8200200 00000000 24000000 40420f00 00000800 00000000
Structure in Ruby (that fails):
a85da27f 00000000 08399b7f 00000000 24000000 40420f00 00000800 00000000
Is there an obvious mistake that I'm making when I lay out the struct in Ruby? Is there something else that I'm missing?
My next step is to write a library in C and use FFI to access it from Ruby. But that seems like giving up; and using the native ioctl function feels like the better approach if I can ever make it work.
Update
Above, I'm doing
struct_array = [tx_buff_pointer, rx_buff_pointer, buff_len, speed_hz, delay_usecs, bits_per_word, cs_change, tx_nbits, rx_nbits, pad]
struct_packed = struct_array.pack("QQLLSCCCCS")
#in C, I pass a pointer to the the structure; so mimic that here
struct_pointer_packed = [struct_packed].pack("P")
file.ioctl(SPI_IOC_MESSAGE_1, struct_pointer_packed)
because I have to pass a pointer to the struct in C. But that's what's causing the error!
Instead, it needs to be
struct_array = [tx_buff_pointer, rx_buff_pointer, buff_len, speed_hz, delay_usecs, bits_per_word, cs_change, tx_nbits, rx_nbits, pad]
struct_packed = struct_array.pack("QQLLSCCCCS")
file.ioctl(SPI_IOC_MESSAGE_1, struct_packed)
I guess Ruby is automatically making it an array when it marshalls it over?
Unfortunately, now it only intermittently works. The second call never works and the first call doesn't work if I pass in all zeros. It's very mysterious.
It is a common issue not to flush the buffer, you could check it out and try it.
Flush:
Flushes any buffered data within ios to the underlying operating system (note that this is Ruby internal buffering only; the OS may buffer the data as well).
rb_io_flush(VALUE io)
{
return rb_io_flush_raw(io, 1);
}
#include <stdio.h>
#include <stdlib.h>
int main(void)
{
int *int_pointer = (int *) malloc(sizeof(int));
// open output file
FILE *outptr = fopen("test_output", "w");
if (outptr == NULL)
{
fprintf(stderr, "Could not create %s.\n", "test_output");
return 1;
}
*int_pointer = 0xabcdef;
fwrite(int_pointer, sizeof(int), 1, outptr);
//clean up
fclose(outptr);
free(int_pointer);
return 0;
}
this is my code and when I see the test_output file with xxd it gives following output.
$ xxd -c 12 -g 3 test_output
0000000: efcdab 00 ....
I'm expecting it to print abcdef instead of efcdab.
Which book are you reading? There are a number of issues in this code, casting the return value of malloc for example... Most importantly, consider the cons of using an integer type which might vary in size and representation from system to system.
An int is guaranteed the ability to store values between the range of -32767 and 32767. Your implementation might allow more values, but to be portable and friendly with people using ancient compilers such as Turbo C (there are a lot of them), you shouldn't use int to store values larger than 32767 (0x7fff) such as 0xabcdef. When such out-of-range conversions are performed, the result is implementation-defined; it could involve saturation, wrapping, trap representations or raising a signal corresponding to computational error, for example, the latter of two which could cause undefined behaviour later on.
You need to translate to an agreed-upon field format. When sending data over the write, or writing data to a file to be transferred to other systems, it's important that the protocol for communication be agreed upon. This includes using the same size and representation for integer fields. Both output and input should be followed by a translation function (serialisation and deserialisation, respectively).
Your fields are binary, and so your file should be opened in binary mode. For example, use fopen(..., "wb") rather than "w". In some situations, '\n' characters might be translated to pairs of \r\n characters, otherwise; Windows systems are notorious for this. Can you imagine what kind of havoc and confusion this could wreak? I can, because I've answered a question about this problem.
Perhaps uint32_t might be a better choice, but I'd choose unsigned long as uint32_t isn't guaranteed to exist. On that note, for systems which don't have htonl (which returns uint32_t according to POSIX), that function could be implemented like so:
uint32_t htonl(uint32_t x) {
return (x & 0x000000ff) << 24
| (x & 0x0000ff00) << 8
| (x & 0x00ff0000) >> 8
| (x & 0xff000000) >> 24;
}
As an example inspired by the above htonl function, consider these macros:
typedef unsigned long ulong;
#define serialised_long(x) serialised_ulong((ulong) x)
#define serialised_ulong(x) (x & 0xFF000000) / 0x1000000 \
, (x & 0xFF0000) / 0x10000 \
, (x & 0xFF00) / 0x100 \
, (x & 0xFF)
typedef unsigned char uchar;
#define deserialised_long(x) (x[3] <= 0x7f \
? deserialised_ulong(x) \
: -(long)deserialised_ulong((uchar[]) { 0x100 - x[0] \
, 0xFF - x[1] \
, 0xFF - x[2] \
, 0xFF - x[3] })
#define deserialised_ulong(x) ( x[0] * 0x1000000UL \
+ x[1] * 0x10000UL \
+ x[2] * 0x100UL \
+ x[3] )
#include <stdio.h>
#include <stdlib.h>
int main(void)
{
FILE *f = fopen("test_output", "wb+");
if (f == NULL)
{
fprintf(stderr, "Could not create %s.\n", "test_output");
return 1;
}
ulong value = 0xABCDEF;
unsigned char datagram[] = { serialised_ulong(value) };
fwrite(datagram, sizeof datagram, 1, f);
printf("%08lX serialised to %02X%02X%02X%02X\n", value, datagram[0], datagram[1], datagram[2], datagram[3]);
rewind(f);
fread(datagram, sizeof datagram, 1, f);
value = deserialised_ulong(datagram);
printf("%02X%02X%02X%02X deserialised to %08lX\n", datagram[0], datagram[1], datagram[2], datagram[3], value);
fclose(f);
return 0;
}
Use htonl()
It converts from whatever the host-byte-order is (endianness of your machine) to network byte order. So whatever machine you're running on you will get the the same byte order. These calls are used so that regardless of the host you're running on the bytes are sent over the network in the right order, but it works for you too.
See the man pages of htonl and byteorder. There are various conversion functions available, also for different integer sizes, 16-bit, 32-bit, 64-bit ...
#include <stdio.h>
#include <stdlib.h>
#include <arpa/inet.h>
int main(void) {
int *int_pointer = (int *) malloc(sizeof(int));
// open output file
FILE *outptr = fopen("test_output", "w");
if (outptr == NULL) {
fprintf(stderr, "Could not create %s.\n", "test_output");
return 1;
}
*int_pointer = htonl(0xabcdef); // <====== This ensures correct byte order
fwrite(int_pointer, sizeof(int), 1, outptr);
//clean up
fclose(outptr);
free(int_pointer);
return 0;
}