I want to stop the execution of my C code after I detected an NaN and send the error message to MATLAB using mexWarnMsgTxt.
The C code is executed from MATLAB over mex file. I tried to use abort() and exit() which indeed kill the c program but also MATLAB (I guess because it is the calling process here.)
#include "mex.h"
#include "matrix.h"
for (int i = 0; i <= 5; i++)
{
if (mxIsFinite(out[i])) {
}
else if (mxIsInf(out[i])) {
char *err_msg = malloc(max_len_err_msg);
snprintf(err_msg, max_len_err_msg, "Inf detected in file %s at line %d", __FILE__, __LINE__);
mexWarnMsgTxt(err_msg);
abort();
//free(err_msg);
//abort(1);
/* NOTE: Test for NaN is here for illustration only. If a double
* is not finite and is not infinity, then it is a NaN */
}
else if (mxIsNaN(out[i])) {
char *err_msg = malloc(max_len_err_msg);
snprintf(err_msg, max_len_err_msg, "NaN detected in file %s at line %d", __FILE__, __LINE__);
mexWarnMsgTxt(err_msg);
abort();
//free(err_msg);
}
}
I just want my mexFunction to stop but not to terminate Matlab.
mex functions are normal C functions so, to leave the function early, just use return.
If you function allocated resources that need to be cleaned up manually, the established idiom in C is to use a goto cleanup; (this is one of the few, if not the only, acceptable and generally accepted uses of goto):
void mexFunction(
int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[]
) {
for (int i = 0; i <= 5; i++) {
if (mxIsInf(out[i])) {
char *err_msg = malloc(max_len_err_msg);
snprintf(err_msg, max_len_err_msg, "Inf detected in file %s at line %d", __FILE__, __LINE__);
mexWarnMsgTxt(err_msg);
free(err_msg);
goto cleanup;
} else if (mxIsNaN(out[i])) {
char *err_msg = malloc(max_len_err_msg);
snprintf(err_msg, max_len_err_msg, "NaN detected in file %s at line %d", __FILE__, __LINE__);
mexWarnMsgTxt(err_msg);
free(err_msg);
goto cleanup;
}
…
}
cleanup:
// Perform cleanup here.
}
(Note that in this code the err_msg cleanup is performed in its own scope rather than with the global cleanup.)
But in the case where no cleanup is to be performed, the goto statements are unnecessary and can be replaced by return.
My current solution is to define a global variable abort_flag in C, set it to 1 if the error occured and based on that break all my loops and return from the functions.
Kind of "manual" but works:
int abort_flag = 0;
// to use in other file insert into header: extern int abort_flag;
// in the NaN case (see above)
abort_flag = 1;
// in the loops
if (abort_flag==1) { break; };
// in the functions
if (abort_flag==1) { return; };
Related
I'm working on a C program to manipulate large volumes of CSV data. It was running fine in develoment with a smallish test file. But when the size of the file increases, it starts to fail. Depending on whether I compile it with gcc or minGW's gcc, it fails with segmentation error or 3221225477 / 0xC0000005 at different places, always either :
if (fclose(fp)) {
printf("Error closing file: %s, %s, %d.\n", fileName, __func__, __LINE__);
exit(300);
}
Note it doesn't get past the fclose(). Or one of these:
data_PE_T12 = calloc(width_T*dataDepthDay, sizeof(*data_PE_T12));
It's long, so I'll try to show the relevant parts. First the Main function:
#include <stdio.h>
#include <string.h> // strtok
#include <stdlib.h> // atoi & atof
#include <time.h> // time functions
#include <math.h> // expf()
...
// Array Sizes
static int dataDepth, dataDepthDay;
static int fromTime, toTime;
static int width, width_T, width_H, width_C;
// Array Pointers
static int *timeArray, *timeArrayDay, *timeArrayPE;
static struct sensorHeader_t *headerArray, *headerArray_T, *headerArray_H, *headerArray_C;
// of depth dataDepthDay
static float *data_E_T25, *data_E_T30;
static float *data_E_T12, *data_E_T18, *data_E_H60, *data_E_H70, *data_E_C1500;
static float *data_PE_T12, *data_PE_T18, *data_PE_H60, *data_PE_H70, *data_PE_C1500;
... plus loads more.
// functions
void grabDepth(void); // OK
void grabPayload(void); // OK
... plus loads more.
int main(int argc, char **argv)
{
// Grab Input File Name
if (argc == 2) {
strcpy(rawFile, "in/");
strcat(rawFile, argv[1]);
} else { // dev
strcpy(rawFile, "in/sensor_report.csv");
}
printf("size max = %d", __SIZE_MAX__);
// Parse and Copy File
grabDepth();
grabPayload();
// Run functions
genRawData(); // Raw T, H & C files
genExposureE(); //
genExposureAPE(); //
return 0;
}
Next the first function that is called. This one opens the main input file and pulls out a number of array widths and depths that are used to calloc for arrays already declared as static pointers. The idea is that this will make the memory handling nice and flexible as the file size increases...
void grabDepth(void)
{
// 1. Open File
FILE *fp = fopen(rawFile, "r");
char buf[15000]; // Big enough to deal with lots of devices.
if (!fp) {
printf("Can't open the file: %s: %s, %d.\n", rawFile, __func__, __LINE__);
exit(100);
}
while (fgets (buf, sizeof(buf), fp)) {
int lineLen = strlen(buf);
int colNum = 1;
char *field = strtok(buf, ",");
if (field && strcmp(field, "From") == 0) {
// printf("\n\n*** row 2 ***\n\n");
// int fromTime, toTime = 0;
while (field) {
if (colNum == 2) {
fromTime = atof(field);
}
if (colNum == 4) {
toTime = atof(field);
}
field = strtok(NULL, ",");
colNum++;
}
// printf("FromTime = %d. ToTime = %d.\n", fromTime, toTime);
dataDepth = ( toTime - fromTime )/900;
// printf("dataDepth = %d.\n", dataDepth);
continue; // to next iteration.
}
// 3. Grab file width from line 10 (commsType) Check if buf Overruns too
if (field && strcmp(field, "TimeStamp") == 0) {
// First Check Line is long enough!
if (lineLen == sizeof(buf)-1) { // buf has overrun!
printf("File Read-Line Overrun: %s, %d.\n", rawFile, __func__, __LINE__);
exit(200);
}
// printf("Line Length = %d\n", lineLen);
// printf("Buf Size = %d\n", sizeof(buf));
width = -2; // ignore timestamps : I ballsed up the commas in csv file (-2 instead of -1)
while (field) {
if(field = strtok(NULL, ",")) {
width ++;
}
}
break; // out of loop!
}
}
//dataDepthDay = dataDepth/96 + (dataDepth % 96 !=0); // round up?!
dataDepthDay = dataDepth/96; // round down?!
printf("\n 1. grabDepth() Results\n");
printf( "------------------------\n");
printf("Raw Data Width = %d\n", width);
printf("Raw Data Depth = %d\n", dataDepth);
printf("dataDepthDay Depth = %d\n\n", dataDepthDay);
if (fclose(fp)) {
printf("Error closing file: %s, %d.\n", rawFile, __func__, __LINE__);
exit(300);
}
}
After that, it's just calling one function after another, all of which follow the general pattern of:
void _genRawData(char* sensorType, struct sensorHeader_t *sensorHeader, float *dataArray, int *timeArray, size_t dataDepth, size_t width) {
FILE *fp;
strcpy(fileName, "out/");
strcat(fileName, sensorType);
strcat(fileName, "_raw.csv");
fp = fopen(fileName, "w");
// check file opened OK.
if (fp == NULL) {
printf("Error! Couldn't Create file: %s\n", fileName);
return;
}
printf("building file : %s\n", fileName);
// Allocate Memory
timeArrayDay = calloc(dataDepthDay, sizeof(*timeArrayDay));
timeArrayPE = calloc(dataDepthDay, sizeof(*timeArrayPE)); // xxxx same array as day time array!?
data_E_T12 = calloc(width_T*dataDepthDay, sizeof(*data_E_T12));
data_E_T18 = calloc(width_T*dataDepthDay, sizeof(*data_E_T18));
data_E_H60 = calloc(width_H*dataDepthDay, sizeof(*data_E_H60));
data_E_H70 = calloc(width_H*dataDepthDay, sizeof(*data_E_H70));
// do stuff and build new arrays up and put into files...
if (fclose(fp)) {
printf("Error closing file: %s, %d.\n", rawFile, __func__, __LINE__);
exit(300);
}
}
I've only called calloc once on each 2-D array, and for the sake of debugging I've removed the free() calls.
I figure I'm doing something wrong with memory management, which is biting me when the array sizes grow past a certain point, but I can't figure out what's wrong. I've tried to make sure the memory I access has been allocated correctly and working on a big powerful actual computer (I'm an embedded person usually), I wouldn't expect any issues with OS handing out data? Isn't there plenty to go around!?
In case the outcome is of use to others. I suspected there was an issue with the calloc and subsequent use of the allocated memory. So I tried 2 things:
1: Checked the memory usage in the code:
// Add Values & Write Line on new Day & Reset Accumulator
for (i=0; i < dataDepth; i++) {
for (j=0; j < width; j++) {
if (newDay) {
fprintf(fp, ",%.2f", APE_Accum[j]);
data_E_Array[(data_E_Index-1)*width+j] = APE_Accum[j];
if ((data_E_Index-1)*width+j+1 > (width_T*dataDepthDay)) {
printf("Oh bugger...\n");
printf("width_T*dataDepthDay = %d\n", width_T*dataDepthDay);
printf("data_E_Index-1 = %d\n", data_E_Index-1);
printf("width = %d\n", width);
printf("dataDepthDay = %d\n", dataDepthDay);
printf("width_T = %d\n", width_T);
printf("j = %d\n\n", j);
Really messy code, so you can understand how I lost track of the array bounds. Basically, it became obvious that I'd messed up my calculation of the size of the calloc. I was possible to find the problem like this, but I don't think it's a vaiable answer to my question, since it would scale to large or even more convoluted code.
2: Valgrind. Following #dbush s advice. I moved over to Ubuntu, installed Valgrind, and recompiled...
$ sudo apt install valgrind
$ ps aux | grep-i apt
$ gcc -o graphomatic ./graphomatic.c -lm -g
$ valgrind --leak-check=full --show-leak-kinds=all --verbose --track-origins=yes --log-file=valgrind-log
$ less valgrind-log
And Bob's your uncle. The problems jumped right out. I needed to add the -lm to link to the math library. And teh -g to make sure line numbers were included in the Valgrind output.
==15878== Invalid write of size 4
==15878== at 0x4038EA: _genExposureE (graphomatic.c:867)
==15878== by 0x404A0C: genExposureE (graphomatic.c:1235)
==15878== by 0x400EAA: main (graphomatic.c:122)
==15878== Address 0x75cd604 is 0 bytes after a block of size 660 alloc'd
==15878== at 0x4C2FB55: calloc (in /usr/lib/valgrind/vgpreload_memcheck-amd64-linux.so)
==15878== by 0x404911: genExposureE (graphomatic.c:1222)
==15878== by 0x400EAA: main (graphomatic.c:122)
The code=3221225477 in terminal/shell.
This error/exception is thrown at times when you have segmentation errors.
By segmentation error, I mean that either you are trying to access a memory not allocated in an array.
Example:
vector<int> arr={1,2,3,4,5,6,7,8,9};
cout<<arr.at(arr.size());
This error code can also be shown as out of bound error, out of range error, or segmentation error, etc.
I'm trying to compile some example C code with GCC/MinGW on Windows 7. The example code includes some local header files which ultimately include stdio.h, and I get this error when I try to compile:
c:\mingw\include\stdio.h:345:12: error: expected '=', ',', ';', 'asm' or '__attribute__' before '__mingw__snprintf'
extern int __mingw_stdio_redirect__(snprintf)(char*, size_t, const char*, ...);
This is a weird one to me. How could there possibly be errors in stdio.h?
regarding:
if (i == 0)
{
printf("\nNo interfaces found! Make sure WinPcap is installed.\n");
return 0;
}
pcap_freealldevs(alldevs);
since the variable i is initialized to 0 and never modified, this if() statement will always be true. One result is the call to: pcap_freealldev() will never be called.
the scope of variables should be limited as much as reasonable.
Code should never depend on the OS to clean up after itself. suggest
#include <stdio.h>
#include <stdlib.h>
#include "pcap.h"
int main( void )
{
pcap_if_t *alldevs = NULL;
char errbuf[PCAP_ERRBUF_SIZE];
/* Retrieve the device list from the local machine */
if (pcap_findalldevs_ex(PCAP_SRC_IF_STRING, NULL /* auth is not needed */, &alldevs, errbuf) == -1)
{
fprintf(stderr,"Error in pcap_findalldevs_ex: %s\n", errbuf);
exit(1);
}
/* Print the list */
for( pcap_if_t *d = alldevs; d != NULL; d= d->next)
{
printf("%d. %s", ++i, d->name);
if (d->description)
printf(" (%s)\n", d->description);
else
printf(" (No description available)\n");
}
if ( ! alldevs )
{
printf("\nNo interfaces found! Make sure WinPcap is installed.\n");
}
/* We don't need any more the device list. Free it */
pcap_freealldevs(alldevs);
}
I want to remove all comments in a toy.c file. From Remove comments from C/C++ code I see that I could use
gcc -E -fpreprocessed -P -dD toy.c
But some of my code (say deprecated functions that I don't want to compile) are wrapped up between #if 0 and endif, as if they were commented out.
One one hand, the above command does not remove this type of "comment" because its removal is only possible during macro expansion, which -fpreprocessed prevents;
On the other hand, I have other macros I don't want to expand, so dropping -fpreprocessed is a bad idea.
I see a dilemma here. Is there a way out of this situation? Thanks.
The following toy example "toy.c" is sufficient to illustrate the problem.
#define foo 3 /* this is a macro */
// a toy function
int main (void) {
return foo;
}
// this is deprecated
#if 0
int main (void) {
printf("%d\n", foo);
return 0;
}
#endif
gcc -E -fpreprocessed -P -dD toy.c gives
#define foo 3
int main (void) {
return foo;
}
#if 0
int main (void) {
printf("%d\n", foo);
return 0;
}
#endif
while gcc -E -P toy.c gives
int main (void) {
return 3;
}
There's a pair of programs, sunifdef ("Son of unifdef", which is available from unifdef) and coan, that can be used to do what you want. The question Is there a C pre-processor which eliminates #ifdef blocks based on values defined/undefined? has answers which discuss these programs.
For example, given "xyz37.c":
#define foo 3 /* this is a macro */
// a toy function
int main (void) {
return foo;
}
// this is deprecated
#if 0
int main (void) {
printf("%d\n", foo);
}
#endif
Using sunifdef
sunifdef -DDEFINED -ned < xyz37.c
gives
#define foo 3 /* this is a macro */
// a toy function
int main (void) {
return foo;
}
// this is deprecated
and given this file "xyz23.c":
#if 0
This is deleted
#else
This is not deleted
#endif
#if 0
Deleted
#endif
#if defined(XYZ)
XYZ is defined
#else
XYZ is not defined
#endif
#if 1
This is persistent
#else
This is inconsistent
#endif
The program
sunifdef -DDEFINE -ned < xyz23.c
gives
This is not deleted
#if defined(XYZ)
XYZ is defined
#else
XYZ is not defined
#endif
This is persistent
This is, I think, what you're after. The -DDEFINED options seems to be necessary; choose any name that you do not use in your code. You could use -UNEVER_DEFINE_THIS instead, if you prefer. The -ned option evaluates the constant terms and eliminates the relevant code. Without it, the constant terms like 0 and 1 are not eliminated.
I've used sunifdef happily for a number of years (encroaching on a decade). I've not yet found it to make a mistake, and I've used it to clean up some revoltingly abstruse sets of 'ifdeffery'. The program coan is a development of sunifdef with even more capabilities.
The preprocessor doesn't make exceptions. You cannot use it here to do that.
A simple state machine using python can work. It even handles nesting (well, maybe not all cases are covered like nested #if 0 but you can compare the source before & after and manually validate). Also commented code isn't supported (but it seems that you have it covered)
the input (slightly more complex than yours for the demo):
#define foo 3
int main (void) {
return foo;
}
#if 0
int main (void) {
#ifdef DDD
printf("%d\n", foo);
#endif
}
#endif
void other_function()
{}
now the code, using regexes to detect #if & #endif.
import re
rif0 = re.compile("\s*#if\s+0")
rif = re.compile("\s*#(if|ifn?def)")
endif = re.compile("\s*#endif")
if_nesting = 0
if0_nesting = 0
suppress = False
with open("input.c") as fin, open("output.c","w") as fout:
for l in fin:
if rif.match(l):
if_nesting += 1
if rif0.match(l):
suppress = True
if0_nesting = if_nesting
elif endif.match(l):
if if0_nesting == if_nesting:
suppress = False
if_nesting -= 1
continue # don't write the #endif
if not suppress:
fout.write(l))
the output file contains:
#define foo 3
int main (void) {
return foo;
}
void other_function()
{}
so the nesting worked and the #if 0 part was successfully removed. Not something that sed "/#if 0/,/#endif/d can achieve.
Thanks for the other two answers.
I am now aware of unifdef and sunifdef. I am happy to know the existence of these tools, and that I am not the only one who wants to do this kind of code cleaning.
I have also written a rm_if0_endif.c (attached below) for removing an #if 0 ... #endif block which is sufficient for me. Its philosophy is based on text processing. It scans an input C script, locating #if 0 and the correct enclosing endif, so that this block can be omitted during char-to-char copying.
The text processing approach is limited, as it is designed for #if 0 ... #endif case only, but is all I need for now. A C program is not the only way for this kind of text processing. Jean-François Fabre's answer demonstrates how to do it in Python. I can also do something similar in R, using readLines, startsWith and writeLines. I chose to do it in C as I am not yet an expert in C so this task drives me to learn. Here is a demo of my rm_if0_endif.c. Note that the program can concatenate several C files and add header for each file.
original input file input.c
#define foo 3 /* this is a macro */
// a toy function
int test1 (void) {
return foo;
}
#if 0
#undef foo
#define foo 4
#ifdef bar
#warning "??"
#endif
// this is deprecated
int main (void) {
printf("%d\n", foo);
return 0;
}
#endif
// another toy
int test2 (void) {
return foo;
}
gcc pre-processing output "gcc_output.c" (taken as input for my program)
gcc -E -fpreprocessed -P -dD input.c > gcc_output.c
#define foo 3
int test1 (void) {
return foo;
}
#if 0
#undef foo
#define foo 4
#ifdef bar
#warning "??"
#endif
int main (void) {
printf("%d\n", foo);
return 0;
}
#endif
int test2 (void) {
return foo;
}
final output final_output.c from my program
rm_if0_endif.c has a utility function pattern_matching and a workhorse function rm_if0_endif:
void rm_if0_endif (char *InputFile,
char *OutputFile, char *WriteMode, char *OutputHeader);
The attached file below has a main function, doing
rm_if0_endif("gcc_output.c",
"final_output.c", "w", "// this is a demo of 'rm_if0_endif.c'\n");
It produces:
// this is a demo of 'rm_if0_endif.c'
#define foo 3
int test1 (void) {
return foo;
}
int test2 (void) {
return foo;
}
Appendix: rm_if0_endif.c
#include <stdio.h>
int pattern_matching (FILE *fp, const char *pattern, int length_pattern) {
int flag = 1;
int i, c;
for (i = 0; i < length_pattern; i++) {
c = fgetc(fp);
if (c != pattern[i]) {
flag = 0; break;
}
}
return flag;
}
void rm_if0_endif (char *InputFile,
char *OutputFile, char *WriteMode, char *OutputHeader) {
FILE *fp_r = fopen(InputFile, "r");
FILE *fp_w = fopen(OutputFile, WriteMode);
fpos_t pos;
if (fp_r == NULL) perror("error when opening input file!");
fputs(OutputHeader, fp_w);
int c, i, a1, a2;
int if_0_flag, if_flag, endif_flag, EOF_flag;
const char *if_0 = "if 0";
const char *endif = "endif";
EOF_flag = 0;
while (EOF_flag == 0) {
do {
c = fgetc(fp_r);
while ((c != '#') && (c != EOF)) {
fputc(c, fp_w);
c = fgetc(fp_r);
}
if (c == EOF) {
EOF_flag = 1; break;
}
fgetpos(fp_r, &pos);
if_0_flag = pattern_matching(fp_r, if_0, 4);
fsetpos(fp_r, &pos);
if (if_0_flag == 0) fputc('#', fp_w);
} while (if_0_flag == 0);
if (EOF_flag == 1) break;
a1 = 1; a2 = 0;
do {
c = fgetc(fp_r);
while (c != '#') c = fgetc(fp_r);
fgetpos(fp_r, &pos);
if_flag = pattern_matching(fp_r, if_0, 2);
fsetpos(fp_r, &pos);
if (if_flag == 1) a1++;
fgetpos(fp_r, &pos);
endif_flag = pattern_matching(fp_r, endif, 5);
fsetpos(fp_r, &pos);
if (endif_flag == 1) a2++;
} while (a1 != a2);
for (i = 0; i < 5; i++) c = fgetc(fp_r);
if (c == EOF) {
EOF_flag == 1;
}
}
fclose(fp_r);
fclose(fp_w);
}
int main (void) {
rm_if0_endif("gcc_output.c",
"final_output.c", "w", "// this is a demo of 'rm_if0_endif.c'\n");
return 0;
}
Our code (in a simple library implementation) is beginning to look like this:
err = callToUnderlyingLibrary1();
if (err!=0) {
printf ("blah %d\n", err);
...
}
err = callToUnderlyingLibrary2();
if (err!=0) {
printf ("blah %d\n", err);
...
}
err = callToUnderlyingLibrary3();
if (err!=0) {
printf ("blah %d\n", err);
...
}
This is cumbersome and ugly. Is there a better way to do this ? Perhaps using the C preprocessor ? I was thinking something like:
CHECK callToUnderlyingLibrary1();
CHECK callToUnderlyingLibrary2();
CHECK callToUnderlyingLibrary3();
where the CHECK macro invokes the function and does the rudimentary error checking.
Are there preferred idiomatic ways of handling this ?
Another macro-based approach which you can use to mitigate the shortcomings in C fairly easily:
#define CHECK(x) do { \
int retval = (x); \
if (retval != 0) { \
fprintf(stderr, "Runtime error: %s returned %d at %s:%d", #x, retval, __FILE__, __LINE__); \
return /* or throw or whatever */; \
} \
} while (0)
Then to invoke it you have:
CHECK(doSomething1());
CHECK(doSomething2());
// etc.
For bonus points you could easily extend the CHECK macro to take a second argument y that is what to do on failure:
#define CHECK(x, y) do { \
int retval = (x); \
if (retval != 0) { \
fprintf(stderr, "Runtime error: %s returned %d at %s:%d", #x, retval, __FILE__, __LINE__); \
y; \
} \
} while (0)
// We're returning a different error code
CHECK(someFunction1(foo), return someErrorCode);
// We're actually calling it from C++ and can throw an exception
CHECK(someFunction2(foo), throw SomeException("someFunction2 failed")):
Usually, in C, one uses goto for error handling:
int foo()
{
if (Function1() == ERROR_CODE) goto error;
...
struct bar *x = acquire_structure;
...
if (Function2() == ERROR_CODE) goto error0;
...
release_structure(x);
return 0;
error0:
release_structure(x);
error:
return -1;
}
This can be improved with macros and more clever instruction flow (to avoid repeating cleanup code), but I hope you see the point.
I think you should look at exceptions and exception handling. http://www.cplusplus.com/doc/tutorial/exceptions/
try{
callToUnderlyingLibrary1();
callToUnderlyingLibrary2();
callToUnderlyingLibrary3();
}catch(exception& e)
//Handle exception
}
your library functions can throw exceptions if there is an error
Here is a proposition, you may or may not like it:
make your functions return 0 on failure, something else on success
if something fails in your functions, have them set a global (or static) variable to the error code (like errno)
create a die() function that prints the error depending of the error code (or whatever you want it to do)
call your functions with do_something(foo, bar) || die("Argh...");
I prefer a variant of Alexandra C.'s goto-approach:
int foo()
{
int rv = 0;
struct bar *x = NULL;
struct bar *y = NULL;
rv = Function1();
if (rv != OK){
goto error;
}
//...
x = acquire_structure();
if (x==NULL){
rv = ERROR_MEMORY;
goto error;
}
//...
rv = Function2();
if (rv != OK){
goto error;
}
//...
y = acquire_structure();
if (y==NULL){
rv = ERROR_MEMORY;
goto error;
}
//...
rv = release_structure(x);
x = NULL;
if (rv != OK){
goto error;
}
rv = release_structure(y);
y = NULL;
if (rv != OK){
goto error;
}
return OK;
error:
if (x!=NULL){
release_structure(x);
}
return rv;
}
When you use multiple goto-destinations, it is easy to mix them up. Or perhaps you move the initialization of a variable, but forget to update the gotos. And it can be very difficult to test all ways a C-method can fail.
I prefer having a single goto-destination that performs all the cleanup. I find that makes it easier to avoid mistakes.
You could do what you said, which is some rudimentary macro:
#define CHECK(x) (err = x()); \
if (err) { \
printf("blah %d on line %d of file %s\n", err, __LINE__, __FILE__); \
} \
else (void)0
And you could use it like
int err = 0;
CHECK(callToUnderlyingLibrary1); // don't forget the semicolon at the end
CHECK(callToUnderlyingLibrary2);
CHECK(callToUnderlyingLibrary3);
No 'goto', use only 1 'return' in functions. That's the elegant code.
IMHO, OP's question point and all answers are talking about FANCY techniques. Fancy code is just sort of eye candy.
I think the following code is normal (and malloc/free is similar):
int foo(){
FILE *fp = fopen("test.in", "r");
int i;
for(i = 0; i < NUM; i ++){
if(Match(fp, i)){
fclose(fp);
return i;
}
}
fclose(fp);
return 0;
}
As we can see fclose(fp) appears twice in the code. It will appear more if there are other return statements in the function foo. However, it is troublesome that I have to write fclose(fp) many times. One solution is just one return for one function. However, multiple returns is sometimes useful. Is there any other solution?
PS: As I know, there is a macro(with-open-file) in lisp.
(with-open-file (stream-var open-argument*)
body-form*)
It could open and close file for us.
The use of break often helps:
int foo() {
FILE *fp = fopen("test.in", "r");
int i, result;
result = 0;
for(i = 0; i < NUM; i ++){
if(Match(fp, i)){
result = i;
break;
}
}
fclose(fp);
return result;
}
In the source code of linux kernel, there are many functions that have to take care of locks and other resource on return. And they conventionally add a cleanup label at the end of the function, and goto there whenever early return occurs. I personally recommend this usage to avoid duplicate code, and maybe this is the only sane usage of goto.
An extra layer of indirection can ensure you don't miss an exit from the function:
int foo(FILE *fp)
{
int i;
for(i = 0; i < NUM; i ++){
if(Match(fp, i)){
return i;
}
}
return 0;
}
int foo_wrapper(void)
{
FILE *fp = fopen("test.in", "r");
int out = foo(fp);
fclose(fp);
return out;
}
I'll expand on exception handling via goto (mentioned in #Charles Peng's answer):
You do it something like:
int func(...)
{
...
f = fopen(...);
if (f == NULL) {
log("failed opening blah blah...");
goto err;
}
...
m = malloc(...)
if (m == NULL) {
log("blah blah...");
goto err_close_f;
}
...
if (do_something(...) < 0) {
log("blah blah...");
goto err_close_f;
}
...
r = alloc_resource(...)
if (r == 0) {
log("blah blah...");
goto err_free_m;
}
...
return 0;
err_free_m:
free(m);
err_close_f:
fclose(f);
err:
return -1;
}
The advantage of this is that it's very maintainable. Resource acquisition and release has a somewhat symmetrical look when using this idiom. Also, resource release is out of the main logic, avoiding excessive clutter where it annoys the most. It's quite trivial to check that error handling of functions is right (just check that it jumps to the appropiate point, and that the previous label releases any acquired resource)...
A return statement NOT at the end of a function is the equivalent of a goto statement. Even though it may appear as though some functions are simpler with multiple return statements it has been my experience while maintaining various code bases that the ones with only 1 exit point from every function are easier to maintain.