How to construct a C macro for low-level bit masking? - c

I'm beginning to learn programming embedded C micro-controllers and want to do something that would make my life easier. Usually when dealing with bit masking everybody uses stuff like:
char a = (1 << 3) | (1 << 1) | (1 << 5);
I want to use a macro for something like this. For just one mask I can simply define this macro:
#define M(n) (1 << (n))
Nothing fancy. The problem is that I cannot come with a good solution that would allow me to type:
a = MM( 3, 1, 5 ); or at least a = MM( 3, 3, 1, 5 ); (where the first 3 is the number of arguments)
instead of a = M(3) | M(1) | M(5);
I came up with a solution which implied using functions with multiple arguments but it's really bugging me that I cannot do it using a macro.

Found the answer.
#define M(a) (1 << (a))
#define M2(a, b) (M(a) | M(b))
#define M3(a, b...) (M(a) | M2(b))
#define M4(a, b...) (M(a) | M3(b))
// can be continued
#define GET_MACRO( _1, _2, _3, _4, NAME, ... ) NAME
#define MM(args...) GET_MACRO( args, M4, M3, M2, M )(args)
this uses the answer in this thread

Maybe this can help:
#define BITMASK_SET(x,y) ((x) |= (y))
#define BITMASK_CLEAR(x,y) ((x) &= (~(y)))
#define BITMASK_FLIP(x,y) ((x) ^= (y))
#define BITMASK_CHECK(x,y) ((x) & (y))

use boost
#include <boost/preprocessor/tuple/to_seq.hpp>
#include <boost/preprocessor/seq/for_each_i.hpp>
#include <boost/preprocessor/control/if.hpp>
#define M(n) (1 << (n))
#define F(r, data, i, elem) BOOST_PP_IF(i, data, ) M(elem)
#define MM(...) BOOST_PP_SEQ_FOR_EACH_I(F, | , BOOST_PP_TUPLE_TO_SEQ((__VA_ARGS__)) )
...
char a = MM(3, 1, 5);//char a = (1 << (3)) | (1 << (1)) | (1 << (5)) ;

Related

Purpose of double underscore pointer operator for C functions

I am writing some C code that is for a microcontroller and have come across a curious couple of statements in some generated drivers for a peripheral I am using. Seemingly, a function uint8_t gapm_reset_req_handler (void) is supposed to reset a handler and return a status. The function is seemingly failing in its purpose, which surprises me as it seems simple enough. The relevant code I would like to ask about is this function and that INTERFACE_UNPACK_UINT8 line.
uint8_t gapm_reset_req_handler (void) {
uint8_t u8Operation, u8Status;
INTERFACE_MSG_INIT(GAPM_RESET_CMD, TASK_GAPM);
INTERFACE_PACK_ARG_UINT8(GAPM_RESET);
INTERFACE_SEND_WAIT(GAPM_CMP_EVT, TASK_GAPM);
INTERFACE_UNPACK_UINT8(&u8Operation);
INTERFACE_UNPACK_UINT8(&u8Status);
INTERFACE_MSG_DONE();
if(u8Operation!=GAPM_RESET)
return AT_BLE_FAILURE;
return u8Status;}
These INTERFACE messages are defined in another file, and I am a bit lost at what exactly is supposed to be accomplished by the generated code regarding the use of the double underscore on the ptr variable. Does anyone have any intuition as to what is going on? To me, it looks like some operation on the value that is passed to it but the use of the double underscore confuses me as I thought that was just for macros. Any thoughts are greatly appreciated!
Specific line
#define INTERFACE_UNPACK_UINT8(ptr)\
*ptr = *__ptr++
Full Definition of INTERFACE Code:
#ifndef __INTERFACE_H__
#define __INTERFACE_H__
#include "event.h"
#define INTERFACE_HDR_LENGTH 9
#define INTERFACE_API_PKT_ID 0x05
#define INTERFACE_SEND_BUF_MAX 600
#define INTERFACE_RCV_BUFF_LEN 500
extern uint8_t interface_send_msg[INTERFACE_SEND_BUF_MAX];
void platform_send_lock_aquire(void);
void platform_send_lock_release(void);
#define INTERFACE_MSG_INIT(msg_id, dest_id) \
do{\
uint8_t* __ptr = interface_send_msg;\
uint16_t __len;\
platform_send_lock_aquire();\
*__ptr++ = (INTERFACE_API_PKT_ID);\
*__ptr++ = ((msg_id) & 0x00FF );\
*__ptr++ = (((msg_id)>>8) & 0x00FF );\
*__ptr++ = ((dest_id) & 0x00FF );\
*__ptr++ = (((dest_id)>>8) & 0x00FF );\
*__ptr++ = ((TASK_EXTERN) & 0x00FF );\
*__ptr++ = (((TASK_EXTERN)>>8) & 0x00FF );\
__ptr += 2
#define INTERFACE_PACK_ARG_UINT8(arg)\
*__ptr++ = (arg)
#define INTERFACE_PACK_ARG_UINT16(arg)\
*__ptr++ = ((arg) & 0x00FF);\
*__ptr++ = (((arg) >> 8) & 0x00FF)
#define INTERFACE_PACK_ARG_UINT32(arg) \
*__ptr++ = (uint8_t)((arg) & 0x00FF );\
*__ptr++ = (uint8_t)(( (arg) >> 8) & 0x00FF) ;\
*__ptr++ = (uint8_t)(( (arg) >> 16) & 0x00FF);\
*__ptr++ = (uint8_t)(( (arg) >> 24) & 0x00FF)
#define INTERFACE_PACK_ARG_BLOCK(ptr,len)\
memcpy(__ptr, ptr, len);\
__ptr += len
#define INTERFACE_PACK_ARG_DUMMY(len)\
__ptr += len
#define INTERFACE_PACK_LEN()\
__len = __ptr - &interface_send_msg[INTERFACE_HDR_LENGTH];\
interface_send_msg[7] = ((__len) & 0x00FF );\
interface_send_msg[8] = (((__len)>>8) & 0x00FF);\
__len += INTERFACE_HDR_LENGTH;
#define INTERFACE_SEND_NO_WAIT()\
INTERFACE_PACK_LEN();\
interface_send(interface_send_msg, __len)
#define INTERFACE_SEND_WAIT(msg, src)\
watched_event.msg_id = msg;\
watched_event.src_id = src;\
INTERFACE_PACK_LEN();\
interface_send(interface_send_msg, __len);\
if(platform_cmd_cmpl_wait()){return AT_BLE_FAILURE;}\
__ptr = watched_event.params;\
#define INTERFACE_MSG_DONE()\
platform_send_lock_release();\
}while(0)
#define INTERFACE_UNPACK_INIT(ptr)\
do{\
uint8_t* __ptr = (uint8_t*)(ptr);\
#define INTERFACE_UNPACK_UINT8(ptr)\
*ptr = *__ptr++
#define INTERFACE_UNPACK_UINT16(ptr)\
*ptr = (uint16_t)__ptr[0]\
| ((uint16_t)__ptr[1] << 8);\
__ptr += 2
#define INTERFACE_UNPACK_UINT32(ptr)\
*ptr = (uint32_t)__ptr[0] \
| ((uint32_t)__ptr[1] << 8) \
| ((uint32_t)__ptr[2] << 16)\
| ((uint32_t)__ptr[3] << 24);\
__ptr += 4
#define INTERFACE_UNPACK_BLOCK(ptr, len)\
memcpy(ptr, __ptr, len);\
__ptr += len
#define INTERFACE_UNPACK_SKIP(len)\
__ptr += (len)
#define INTERFACE_UNPACK_DONE()\
}while(0)
void interface_send(uint8_t* msg, uint16_t u16TxLen);
#endif /* HCI_H_ */
*ptr = *__ptr++ is simply a byte copy followed by increasing the source pointer by one. __ptr is a local variable declared inside one of the macros then re-used in the other macros.
Notably, it is bad practice to use identifiers starting with underscore and particularly with two underscore or one underscore + an upper case letter. These are reserved for the compiler and standard lib, and the lib you post does not appear to belong to either. So there is reason to believe it was badly designed.
The following function-like macro nightmare confirms this - this is some horrible code with non-existent type safety and massive potential for undefined behavior upon bitwise arithmetic with signed numbers. People used to write macro crap like this before function inlining became industry standard back in the 1980s-1990s. Although stdint.h was introduced in 1999 so more likely they were just incompetent.
As for what the code does, it is much simpler than it looks. There's just various macros for shoveling data from one data type to another, apparently part of some protocol encoding/decoding. They also seem to make various assumptions about endianess that aren't portable.
Please never use or trust code provided to you by some silicon vendor. They have a very long tradition of employing the absolutely worst programmers in the world. If someone wrote microcontroller code like this in a normal company, they would get fired immediately. Similarly, don't trust the average open source barf posted on Github either.

How to define a macro of two tokens in Cpp

In my Arduinoc (cpp) code, I have these macros that set/clear bit y of register x:
#define SET(x,y) x |= (1 << y)
#define CLEAR(x,y) x &= ~(1<< y)
In several places I then use:
CLEAR(PORTB,7)
or
SET(PORTB,7)
I would like to define a macro to be PORTB,7 so it only appear once, in a header file, not all over my code. (I show only one example, but I have several conbinations of PORTx,N in my code).
I tried:
#define CLOCK PORTB,7
#define CLOCK_HIGH SET(CLOCK)
but it then fails to build with:
error: macro "SET" requires 2 arguments, but only 1 given CLOCK_HIGH; delay(DELAY); CLOCK_LOW; delay(DELAY);
Is there a way to achieve that?
You must first expand the macro inside. Ie. do another pass. Your code may look like this:
#define SET(x,y) do{ (x) |= (1u << (y)); }while(0)
#define CLEAR(x,y) do{ (x) &= ~(1u << (y)); }while(0)
#define HIGH(a) SET(a) // another empty pass, just forward
// the `a` is expanded and the second `SET` takes two arguments
// or better, but not fully compliant:
// #define HIGH(...) SET(__VA_ARGS__)
#define CLOCK PORTB, 7
#define CLOCK_HIGH() HIGH(CLOCK)
int main() {
int PORTB;
CLOCK_HIGH();
}
As a good measure research about macro pitfalls and research good practices when writing macros..

How to write generic #define macro in C and write less code

Let's say I have 2 sets of values for P_A, P_B, P_C as below
#define X_P_A 2
#define X_P_B 3
#define X_P_C 4
#define Y_P_A 5
#define Y_P_B 6
#define Y_P_C 7
There are 3 types of users:- once that only need X variants, once that only need Y variants and once those may need both.
eg
#ifdef X
#define P_A X_P_A
#define P_B X_P_B
#define P_C X_P_C
#endif
#ifdef Y
#define P_A Y_P_A
#define P_B Y_P_B
#define P_C Y_P_C
#endif
Users that need both will make the decision at run time and call X_P_<> or Y_P_<> as needed.
Is there a way to make it simpler, so that I don't have to write conditional macros for each field
ifdef X
// do something magical does defines all P_<> to X_P_<>
#endif
I know it sounds stupid. You may ask why not just use X_P_<> variants on X. I am just trying to understand if it is possible.
I am okay with changing the way the macros the defined.
Is something similar to below code possible : (problem with below code is that compilation fails because #if not allowed within #define)
#define A 1
#define B 2
#define C 3
/* Not a correct #define macro */
#define X_P(x) \
#if(x == A) 2 \
#elif(x == B) 3 \
#elif(x == C) 4 \
#endif
#ifdef X
#define P(x) X_P(x)
#endif
You could do it with one variant of X-Macros:
#define IMPLEMENT(X) \
X(P_A, 1, 5) \
X(P_B, 2, 6) \
X(P_C, 3, 7)
enum {
// Just one
#define X1_P(n, x, y) n = x,
IMPLEMENT(X1_P)
// Both
#define X2_P(n, x, y) X_##n = x,
#define Y2_P(n, x, y) Y_##n = y,
IMPLEMENT(X2_P)
IMPLEMENT(Y2_P)
DUMMY // Just in case compiler is strict about trailing comma
};
Which would expand to:
enum {
P_A = 1, P_B = 2, P_C = 3,
X_P_A = 1, X_P_B = 2, X_P_C = 3,
Y_P_A = 5, Y_P_B = 6, Y_P_C = 7,
DUMMY
};
#define X_P(x) ((x) - 0x10 + 1) // 1 is 0x31 and A is 0x41 hence A will give 0x41 - 0x10 + 1 = 0x32
#define Y_P(y) ((y) - 0x10 + 5) // same logic applies
Would it be what you are looking for ? Not fully answering your question though

big endian swap macro "uswap" leads to unexpected error [closed]

Closed. This question needs details or clarity. It is not currently accepting answers.
Want to improve this question? Add details and clarify the problem by editing this post.
Closed 8 years ago.
Improve this question
I'm facing following problem.
The macro
#define uswap_32(x) \
((((x) & 0xff000000) >> 24) | \
(((x) & 0x00ff0000) >> 8) | \
(((x) & 0x0000ff00) << 8) | \
(((x) & 0x000000ff) << 24))
get's following number as argument x = 0x49074808
Why does my program break/resets here??
Thx
EDIT:
Description of my real application:
I have a bootloader sitting at flash start address 0x08000000U going till 0x08004000U.
After the bootloader there is a uImage header(taken from uboot) in flash, with size 0x40.
In my application, I just want to check, if there is actually a correct uImage header, because I have two bootloader versions. One can handle images of type uImage and the other one can't. In the last case, after the bootloader application there is no uImage header at all, there is application code!
In the application I just want to check the header crc:
#define UIMAGE_FLASH_ADDRESS (0x08004000U)
image_header_t *header;
header = (image_header_t *) UIMAGE_FLASH_ADDRESS;
if (image_check_hcrc(header))
/* do something...*/
static int image_check_hcrc(const image_header_t *hdr)
{
uint32_t hcrc;
uint32_t len = image_get_header_size();
image_header_t header;
/* Copy header so we can blank CRC field for re-calculation */
memcpy(&header, (char *)hdr, image_get_header_size());
header.ih_hcrc = 0; // byte order independent
hcrc = crc32(0, (unsigned char *)&header, len);
return hcrc == image_get_hcrc(hdr);
}
The call for uswap_32() happens in the last line of above function:
#define uswap_32(x) \
((((x) & 0xff000000) >> 24) | \
(((x) & 0x00ff0000) >> 8) | \
(((x) & 0x0000ff00) << 8) | \
(((x) & 0x000000ff) << 24))
# define cpu_to_be32(x) uswap_32(x)
# define be32_to_cpu(x) uswap_32(x)
#define uimage_to_cpu(x) be32_to_cpu(x)
#define cpu_to_uimage(x) cpu_to_be32(x)
#define image_get_hdr_l(f) \
static inline uint32_t image_get_##f(const image_header_t *hdr) \
{ \
return uimage_to_cpu(hdr->ih_##f); \
}
image_get_hdr_l(magic) /* image_get_magic */
image_get_hdr_l(hcrc) /* image_get_hcrc */
image_get_hdr_l(time) /* image_get_time */
image_get_hdr_l(size) /* image_get_size */
image_get_hdr_l(load) /* image_get_load */
image_get_hdr_l(ep) /* image_get_ep */
image_get_hdr_l(dcrc) /* image_get_dcrc */
#define image_get_hdr_b(f) \
static inline uint8_t image_get_##f(const image_header_t *hdr) \
{ \
return hdr->ih_##f; \
}
image_get_hdr_b(os) /* image_get_os */
image_get_hdr_b(arch) /* image_get_arch */
image_get_hdr_b(type) /* image_get_type */
image_get_hdr_b(comp) /* image_get_comp */
It is a good idea to assign x to a local variable within a macro. Otherwise, if an expression is passed as an argument to the macro, it will be evaluated 4 times. For example, uswap(2+3), or even worse, uswap(some_func(x)).
Second issue - you need to add explicit UL type modifier for the constants. Here is a safer version of the macro:
#define uswap_32(x) ({\
uint32_t _x = (x);\
(uint32_t)(\
((_x & 0xff000000UL) >> 24) | \
((_x & 0x00ff0000UL) >> 8) | \
((_x & 0x0000ff00UL) << 8) | \
((_x & 0x000000ffUL) << 24)); \
})

Is there a C preprocessor that eliminates #ifdefs but also evaluates preprocessor macros?

I'm having a project that does lots of this
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
// do some legacy stuff
#else
// do current stuff
#endif
where KERNEL_VERSION is defined as
#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
I'd like to eliminate the defines that are not relevant for the current version, but tools like sunifdef don't evaluate the KERNEL_VERSION macro, so something like
sunifdef --replace -DKERNEL_VERSION\(a,b,c\)=\(\(\(a\)\<\<16\)+\(\(b\)\<\<8\)+\(c\)\) -DLINUX_VERSION_CODE=3.13.1 *
fails with the message
sunifdef: error 0x04200: Garbage in argument "-DKERNEL_VERSION(a,b,c)=(((a)<<16)+((b)<<8)+(c))"
How do I get around this?
With sunifdef 3.1.3, you can't do it, as you demonstrated. Nor can you do it with earlier versions of coan such as 4.2.2.
However, with coan 5.2 (the current version), you can almost do what you are after.
$ cat legacy.c
#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37)
do(some,legacy,stuff)
#else
do(current,stuff)
#endif
$ coan source -DLINUX_VERSION_CODE=0x020635 legacy.c
coan: /Users/jleffler/soq/legacy.c: line 1: warning 0x0041c: "-DKERNEL_VERSION(a,b,c)=(((a) << 16) + ((b) << 8) + (c))" has been assumed for the current file
#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
do(current,stuff)
$ coan source -DLINUX_VERSION_CODE=0x020624 legacy.c
coan: /Users/jleffler/soq/legacy.c: line 1: warning 0x0041c: "-DKERNEL_VERSION(a,b,c)=(((a) << 16) + ((b) << 8) + (c))" has been assumed for the current file
#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
do(some,legacy,stuff)
$
This is close to what you want, but not quite. It gives 'correct' output, but maybe not 'helpful' output. It gives you the code that would be compiled for the LINUX_VERSION_CODE specified on the command line, whereas you'd probably like the conditionals based on LINUX_VERSION_CODE and KERNEL_VERSION that are not false to survive into the output.
The successor to sunifdef seems to be coan, and the following command seemed to work (on one simple file):
coan source "-DLINUX_VERSION=KERNEL_VERSION(2,18,1)" \
"-DKERNEL_VERSION(a,b,c)=((a)*0x10000 + (b)*0x100 + (c))" \
testfile.c
I think using the KERNEL_VERSION macro to define LINUX_VERSION is prettier, but you might prefer Chris Dodd's version in hex. Two dots in a number is definitely not going to work.
The error is coming from the fact that you can't define macros with arguments on the command line with -D -- you can only define simple macros. However, you shouldn't NEED to define KERNEL_VERSION on the command line as the #define in the source should be fine. You should only need -DLINUX_VERSION_CODE=0x30d01 -- you need to define it as a single integer constant (hex is easiest) rather than withs dots.

Resources