Problems with C XOR executable file encryption / decryption - c

I'm trying to create a simple XOR crypter / decrypter in C for .exe files. I'm still pretty new in C and don't understand everything yet, especially memory stuff. So I've been following an online tutorial on how to make a simple XOR string crypter which worked fine. Now I wanted to modify it so I can en/decrypt executable files and decided to utilize the fwrite() and fread() functions. This is what I've come up with:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h> // execve function
#define XOR_KEY 0xAA // key
#define JOB_CRYPT 1 // alter flow depending on the job
#define JOB_DECRYPT 2
//////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////
void xorFile (char *infile, char *outfile) {
FILE *nFile, *eFile;
long nFileSize; // store file size of the file we want to read
char *buffer; // buffer for reading
char *eBuffer; // buffer for storing encrypted data
size_t rResult;
size_t wResult;
///// READ FILE /////
nFile = fopen(infile, "rb");
if(nFile == NULL) {
fputs("Error opening file...", stderr);
exit(EXIT_FAILURE);
}
fseek(nFile, 0, SEEK_END);
nFileSize = ftell(nFile);
rewind(nFile);
buffer = (char *) malloc(sizeof(char) * nFileSize);
if(buffer == NULL) {
fputs("Error allocating memory...", stderr);
exit(EXIT_FAILURE);
}
rResult = fread(buffer, 1, nFileSize, nFile);
if(rResult != nFileSize) {
fputs("Error reading file...", stderr);
exit(EXIT_FAILURE);
}
fclose(nFile);
printf("File size is: %ld\n", nFileSize);
printf("Buffer size is (pointer): %u\n", sizeof(buffer));
printf("Reading result: %lu\n", rResult);
////// WRITE TO FILE //////
eFile = fopen(outfile, "wb");
if(eFile == NULL) {
fputs("Error creating file...", stderr);
exit(EXIT_FAILURE);
}
eBuffer = (char *) malloc(sizeof(char) * nFileSize);
if(eBuffer == NULL) {
fputs("Error allocating memory (2)...", stderr);
exit(EXIT_FAILURE);
}
// encrypt byte by byte and save to buffer
printf("Proceeding with encryption!\n");
for(int i = 0; buffer[i] != EOF; i++) {
eBuffer[i] = buffer[i] ^ XOR_KEY;
}
printf("Proceeding with fwrite()!\n");
wResult = fwrite(eBuffer, 1, nFileSize, eFile);
fclose(eFile);
printf("eBuffer size is (pointer)%u\n", sizeof(eBuffer));
printf("Writing result: %lu\n", wResult);
free(buffer); // free buffers in heap
free(eBuffer);
}
//////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////
int main(int argc, char *argv[]) {
// checking if all parameters were given
if(argc < 4) {
fprintf(stderr, "Usage: %s [CRYPT | DECRYPT] [IN-FILE] [OUT-FILE]\n", argv[0]);
exit(EXIT_FAILURE);
}
int job;
// DOLOCIMO JOB
if(strcmp(argv[1], "CRYPT") == 0) {
job = JOB_CRYPT;
} else if (strcmp(argv[1], "DECRYPT") == 0) {
job = JOB_DECRYPT;
} else {
fprintf(stderr, "Please select [CRYPT | DECRYPT]!");
exit(EXIT_FAILURE);
}
// CRYPT/DECRYPT OUR FILE
xorFile(argv[2], argv[3]);
if(job == JOB_DECRYPT) {
char *args[] = {argv[3], NULL};
int errExec = execve(args[0], args, NULL);
if(errExec == -1) {
perror("Error executing file...");
exit(EXIT_FAILURE);
}
}
return 0;
}
I'm sorry for the ugly looking code but I first wanted to make it work and I'll refine it later.
Anyways, when I run it in command prompt, the encryption works fine, it generates an encrypted file, but when I run the decrpytion job, the program
crashes during the decryption process. Here's a picture of what happens so you can imagine it better.
Since I have less than 10 reputation, I'm not allowed to embedd pictures.
Here is a link to Imgur.
What's going wrong here? Am I creating a buffer overflow when I'm decrypting it?
Thank you!

Here's the problem:
for(int i = 0; buffer[i] != EOF; i++) {
eBuffer[i] = buffer[i] ^ XOR_KEY;
}
Binary files can contain bytes with any value. So the EOF value is valid and does not designate the end of the file. This means that if the file contains a byte with this value, the loop will quit early and you won't XOR all the bytes. If the file does not contain this byte, the loop will run past the end of the allocated memory which invokes undefined behavior which in this case manifests in a crash.
You know how many bytes you need to processes, so use that as your loop control:
for(int i = 0; i < nFileSize; i++) {
A few other minor corrections:
buffer = (char *) malloc(sizeof(char) * nFileSize);
if(buffer == NULL) {
fputs("Error allocating memory...", stderr);
exit(EXIT_FAILURE);
}
Don't cast the return value of malloc. Also, sizeof(char) is 1 by definition, so you can leave that out.
Also, if a system or library function fails, you should use perror to print the error message. This will print additional information regarding why the function failed.
buffer = malloc(nFileSize);
if(buffer == NULL) {
perror("Error allocating memory...");
exit(EXIT_FAILURE);
}

Related

Segmentation fault instead of showing message - reading from a file by using pointers in c

I wrote a program, which reads from a file. I use a condition in which I print that the array is too big, but when I use a too big array instead of showing this message I have segmentation fault.
This is my program
#include <stdio.h>
#include <stdlib.h>
#define N 10000 // Maximum array size
int _strlen(char *array) {
int i;
for (i = 0; array[i] != '\0'; ++i);
return i;
}
int readText(FILE *wp, char *s, int max) {
int sum = 0;
if (_strlen(s) > max) {
printf("This array is too big. Maximum size is %d", max);
} else {
while ((*s++ = fgetc(wp)) != EOF) {
sum++;
}
*(s-1) = '\0';
}
return sum;
}
int main(int argc, char *argv[]) {
FILE *wz, *wc;
char *s;
char array[N];
s = array;
if (argc != 3) {
printf("Wrong arguments number\n");
printf("I should run this way:\n");
printf("%s source result\n",argv[0]);
exit(1);
}
if ((wz = fopen(argv[1], "r")) == NULL) {
printf("Open error %s\n", argv[1]);
exit(1);
}
if ((wc = fopen(argv[2], "w")) == NULL) {
printf("Open error %s\n", argv[2]);
exit(2);
}
fprintf(wc, "Read text from file source.txt");
readText(wz, s, 10000);
return 0;
}
In output I want to have: This array is too big. Maximum size is %d
Instead of Segmentation fault core dumped
In addition, I want to say that the program is when I use a smaller array, but I want to show the user a proper message when he uses too big array instead of segmentation fault.
Thanks, I change my program in that way. The only problem is that this program check the if condition in every while loop so this program could be slow.
int readText(FILE *wp, char *s, int max) {
int sum = 0;
if (_strlen(s) > max) {
printf("This array is too big. Maximum size is %d", max);
} else {
while ((*s++ = fgetc(wp)) != EOF) {
sum++;
if (sum > max) {
printf("This array is too big. Maximum size is %d", max);
break;
}
}
*(s-1) = '\0';
}
return sum;
}
The remarks / other answer solve your undefined behavior (segmentation fault in your case).
The only problem is that this program check the if condition in every while loop so this program could be slow.
Your program is not slow because of a 'if' but because you read the file char per char.
Using stat or equivalent function you can get the size of the file to read it throw only one fread :
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <string.h>
#include <sys/stat.h>
#define N 10000 // Maximum array size
int main(int argc, char *argv[]) {
char array[N];
FILE *wz, *wc;
struct stat st;
off_t sz;
if (argc != 3) {
printf("Wrong arguments number\n"
"I should run this way:\n"
"%s source result\n", argv[0]);
exit(1);
}
if ((wz = fopen(argv[1], "r")) == NULL) {
printf("Cannot open %s to read : %s\n", argv[1], strerror(errno));
exit(1);
}
if (stat(argv[1], &st) == -1) {
printf("Cannot get stat of %s : %s\n", argv[1], strerror(errno));
exit(1);
}
if (st.st_size > N-1) {
printf("This array is too big. Maximum size is %d", N-1);
sz = N-1;
}
else
sz = st.st_size;
if (fread(array, 1, sz, wz) != sz) {
printf("cannot read %s : %s", argv[1], strerror(errno));
fclose(wz); /* for valgrind end test etc */
exit(1);
}
array[sz] = 0;
fclose(wz);
if ((wc = fopen(argv[2], "w")) == NULL) {
printf("Cannot open %s to write : %s\n", argv[2], strerror(errno));
fclose(wz); /* for valgrind end test etc */
exit(2);
}
/* ... */
fclose(wc);
return 0;
}
Knowing the size of the file allows to remove that limitation to a constant size and try to read the file while you can allocate enough memory for :
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <string.h>
#include <sys/stat.h>
int main(int argc, char *argv[]) {
char * array;
FILE *wz, *wc;
struct stat st;
if (argc != 3) {
printf("Wrong arguments number\n"
"I should run this way:\n"
"%s source result\n", argv[0]);
exit(1);
}
if ((wz = fopen(argv[1], "r")) == NULL) {
printf("Cannot open %s to read : %s\n", argv[1], strerror(errno));
exit(1);
}
if (stat(argv[1], &st) == -1) {
printf("Cannot get stat of %s : %s\n", argv[1], strerror(errno));
exit(2);
}
if ((array = malloc(st.st_size + 1)) == NULL) {
printf("Not enough memory to memorize the file %s\n", argv[1]);
exit(3);
}
if (fread(array, 1, st.st_size, wz) != st.st_size) {
printf("cannot read %s : %s", argv[1], strerror(errno));
fclose(wz); /* for valgrind end test etc */
free(array); /* for valgrind etc */
exit(4);
}
array[st.st_size] = 0;
fclose(wz);
if ((wc = fopen(argv[2], "w")) == NULL) {
printf("Cannot open %s to write : %s\n", argv[2], strerror(errno));
free(array); /* for valgrind etc */
exit(5);
}
/* ... */
fclose(wc);
free(array); /* for valgrind etc */
return 0;
}
Anyway because of the usage of the program "source result" may be you want to copy the file specified by argv[1] in the file specified by argv[2], in that case better to read and write block by block rather than to read all to not use a lot of memory for nothing and to manage the case the input file size is greater than the memory size.
You cannot measure the length of the destination array with _strlen(s), the size is given as an argument and reading an uninitialized array with _strlen() has undefined behavior.
Furthermore, you store fgetc(fp) to *s++ before testing for EOF. This is incorrect in all cases:
if char type is signed, EOF cannot be distinguished from a valid byte value of \377.
if char is unsigned, EOF cannot be tested because it has been converted as a char value of 0xff, hence the loop runs forever, writing beyond the end of the destination array until this causes a crash.
You simply want to add a test in the reading loop to stop reading bytes from the file when the buffer is full and read the bytes into an int variable so you can test for end of file reliably.
Here is a modified version:
#include <stdio.h>
#include <stdlib.h>
#define N 10000 // Maximum array size
int readText(FILE *wp, char *s, int max) {
int i = 0, c;
while (i < max - 1 && (c = fgetc(wp)) != EOF) {
s[i++] = c;
}
s[i] = '\0';
return i;
}
int main(int argc, char *argv[]) {
FILE *wz, *wc;
char array[N];
int nread;
if (argc != 3) {
printf("Wrong arguments number\n");
printf("I should run this way:\n");
printf("%s source result\n", argv[0]);
exit(1);
}
if ((wz = fopen(argv[1], "r")) == NULL) {
printf("Open error %s\n", argv[1]);
exit(1);
}
if ((wc = fopen(argv[2], "w")) == NULL) {
printf("Open error %s\n", argv[2]);
exit(2);
}
fprintf(wc, "Read text from file source.txt\n");
nread = readText(wz, array, N);
printf("Read %d bytes\n", nread);
return 0;
}

Copy data from file X to file Y program in C

I tried to write basic program in C which copy data from file to another with given source path, destination path and buffer size as input.
my problem is the destination file filled with junk or something because its way larger than the source (get bigger depending on buffer size) and can't be open.
How do i read and write just the bytes in the source?
i'm working in linux, and this is the actually copying part:
char buffer[buffer_size];
int readable=1;
int writeable;
while(readable != 0){
readable = read(sourcef, buffer, buffer_size);
if(readable == -1){
close(sourcef);
close(destf);
exit_with_usage("Could not read.");
}
writeable = write(destf, buffer, buffer_size);
if(writeable == -1){
close(sourcef);
close(destf);
exit_with_usage("Could not write.");
}
}
writeable = write(destf, buffer, buffer_size);
must be
writeable = write(destf, buffer, readable);
Currently you do not write the number of characters you read but all the buffer, so the output file is too large
You also manage wrongly the end of the input file
The return value of read is :
On success, the number of bytes read is returned (zero indicates end of file)
On error, -1 is returned
A proposal :
/* you already check input and output file was open with success */
char buffer[buffer_size];
for(;;){
ssize_t readable = read(sourcef, buffer, buffer_size);
if(readable <= 0){
close(sourcef);
close(destf);
if (readable != 0)
/* not EOF */
exit_with_usage("Could not read.");
/* EOF */
break;
}
if (write(destf, buffer, n) != n) {
close(sourcef);
close(destf);
exit_with_usage("Could not write.");
}
}
I suppose exit_with_usage calls exit() so does not return
Note in theory write may write less than the expected number of characters without being an error, and the write has to be done in a loop, but in that case it is useless to manage that
read function returns how many bytes were read to buffer(which has buffer_size). Its not always the case actual bytes read has same value as buffer size(consider scenario if there are not enough bytes left in source file to fully fill your buffer). So you should write to destination file not buffer_size(third argument of the write function), but how many bytes have you read - that is readable variable in your code
You should exit when readable returns an error.So
while(readable != 0){
should be
while(readable != -1){
So that loop could be terminataed when an readfile is exhausted.
You see currently after the whole readfile has been read, calling read fails but write is being called repeatedly since execution has no exit path for failure on read. Also write should only write the number of bytes read. So the code would look like this:
char buffer[buffer_size];
int readable=1;
int writeable;
while(readable != -1){
readable = read(sourcef, buffer, buffer_size);
if(readable == -1){
close(sourcef);
close(destf);
exit_with_usage("Could not read.");
}
writeable = write(destf, buffer, readable);
if(writeable == -1){
close(sourcef);
close(destf);
exit_with_usage("Could not write.");
}
}
Simple code
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h> // For system calls write, read e close
#include <fcntl.h>
#define BUFFER_SIZE 4096
int main(int argc, char* argv[]) {
if (argc != 3) {
printf("Usage %s Src_file Dest_file\n", argv[0]);
exit(1);
}
unsigned char buffer[BUFFER_SIZE] = {0};
ssize_t ReadByte = 0;
int src_fd, dst_fd;
// open file in read mode
if ((src_fd = open(argv[1], O_RDONLY)) == -1) {
printf("Failed to open input file %s\n", argv[1]);
exit(1);
}
// open file in write mode and already exists to overwrite
if ((dst_fd = open(argv[2], O_WRONLY | O_CREAT | O_TRUNC, 644)) == -1) {
printf("Failed to create output file %s\n", argv[2]);
exit(1);
}
// loop
while (1) {
// read buffer
ReadByte = read(src_fd, buffer, sizeof(buffer));
// error with reading
if (ReadByte == -1) {
printf("Encountered an error\n");
break;
} else if (ReadByte == 0) {
// file end exit loop
printf("File copying successful.\n");
break;
}
// error with writing
if (write(dst_fd, buffer, ReadByte) == -1) {
printf("Failed to copying file\n");
break;
}
}
// Close file
close(src_fd);
close(dst_fd);
exit(0);
}
Run
./program src_file dest_file

C - How to pipe to a program that read only from file

I want to pipe a string to a program that read input only from file, but not from stdin. Using it from bash, i can do something like
echo "hi" | program /dev/stdin
and I wanted to replicate this behaviour from C code. What I did is this
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/wait.h>
#include <string.h>
int main() {
pid_t pid;
int rv;
int to_ext_program_pipe[2];
int to_my_program_pipe[2];
if(pipe(to_ext_program_pipe)) {
fprintf(stderr,"Pipe error!\n");
exit(1);
}
if(pipe(to_my_program_pipe)) {
fprintf(stderr,"Pipe error!\n");
exit(1);
}
if( (pid=fork()) == -1) {
fprintf(stderr,"Fork error. Exiting.\n");
exit(1);
}
if(pid) {
close(to_my_program_pipe[1]);
close(to_ext_program_pipe[0]);
char string_to_write[] = "this is the string to write";
write(to_ext_program_pipe[1], string_to_write, strlen(string_to_write) + 1);
close(to_ext_program_pipe[1]);
wait(&rv);
if(rv != 0) {
fprintf(stderr, "%s %d\n", "phantomjs exit status ", rv);
exit(1);
}
char *string_to_read;
char ch[1];
size_t len = 0;
string_to_read = malloc(sizeof(char));
if(!string_to_read) {
fprintf(stderr, "%s\n", "Error while allocating memory");
exit(1);
}
while(read(to_my_program_pipe[0], ch, 1) == 1) {
string_to_read[len]=ch[0];
len++;
string_to_read = realloc(string_to_read, len*sizeof(char));
if(!string_to_read) {
fprintf(stderr, "%s\n", "Error while allocating memory");
}
string_to_read[len] = '\0';
}
close(to_my_program_pipe[0]);
printf("Output: %s\n", string_to_read);
free(string_to_read);
} else {
close(to_ext_program_pipe[1]);
close(to_my_program_pipe[0]);
dup2(to_ext_program_pipe[0],0);
dup2(to_my_program_pipe[1],1);
if(execlp("ext_program", "ext_program", "/dev/stdin" , NULL) == -1) {
fprintf(stderr,"execlp Error!");
exit(1);
}
close(to_ext_program_pipe[0]);
close(to_my_program_pipe[1]);
}
return 0;
}
It is not working.
EDIT
I don't get the ext_program output, that should be saved in string_to_read. The program just hangs. I can see that ext_program is executed, but I don't get anything
I would like to know if there is an error, or if what I want cannot be done. Also I know that the alternative is to use named pipes.
EDIT 2: more details
As I still can not get my program working, I post the complete code
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/wait.h>
int main() {
pid_t pid;
int rv;
int to_phantomjs_pipe[2];
int to_my_program_pipe[2];
if(pipe(to_phantomjs_pipe)) {
fprintf(stderr,"Pipe error!\n");
exit(1);
}
if(pipe(to_my_program_pipe)) {
fprintf(stderr,"Pipe error!\n");
exit(1);
}
if( (pid=fork()) == -1) {
fprintf(stderr,"Fork error. Exiting.\n");
exit(1);
}
if(pid) {
close(to_my_program_pipe[1]);
close(to_phantomjs_pipe[0]);
char jsToExectue[] = "var page=require(\'webpage\').create();page.onInitialized=function(){page.evaluate(function(){delete window._phantom;delete window.callPhantom;});};page.onResourceRequested=function(requestData,request){if((/http:\\/\\/.+\?\\\\.css/gi).test(requestData[\'url\'])||requestData.headers[\'Content-Type\']==\'text/css\'){request.abort();}};page.settings.loadImage=false;page.settings.userAgent=\'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36\';page.open(\'https://stackoverflow.com\',function(status){if(status!==\'success\'){phantom.exit(1);}else{console.log(page.content);phantom.exit();}});";
write(to_phantomjs_pipe[1], jsToExectue, strlen(jsToExectue) + 1);
close(to_phantomjs_pipe[1]);
int read_chars;
int BUFF=1024;
char *str;
char ch[BUFF];
size_t len = 0;
str = malloc(sizeof(char));
if(!str) {
fprintf(stderr, "%s\n", "Error while allocating memory");
exit(1);
}
str[0] = '\0';
while( (read_chars = read(to_my_program_pipe[0], ch, BUFF)) > 0)
{
len += read_chars;
str = realloc(str, (len + 1)*sizeof(char));
if(!str) {
fprintf(stderr, "%s\n", "Error while allocating memory");
}
strcat(str, ch);
str[len] = '\0';
memset(ch, '\0', BUFF*sizeof(ch[0]));
}
close(to_my_program_pipe[0]);
printf("%s\n", str);
free(str);
wait(&rv);
if(rv != 0) {
fprintf(stderr, "%s %d\n", "phantomjs exit status ", rv);
exit(1);
}
} else {
dup2(to_phantomjs_pipe[0],0);
dup2(to_my_program_pipe[1],1);
close(to_phantomjs_pipe[1]);
close(to_my_program_pipe[0]);
close(to_phantomjs_pipe[0]);
close(to_my_program_pipe[1]);
execlp("phantomjs", "phantomjs", "--ssl-protocol=TLSv1", "/dev/stdin" , (char *)NULL);
}
return 0;
}
What I am trying to do is to pass to phantomjs a script to execute through pipe and then read the resulting HTML as a string. I modified the code as told, but phantomjs still does not read from stdin.
I tested the script string by creating a dumb program that writes it to a file and then executed phantomjs normally and that works.
I also tryed to execute execlp("phantomjs", "phantomjs", "--ssl-protocol=TLSv1", "path_to_script_file" , (char *)NULL); and that works too, the output HTML is showed.
It does not work when using pipe.
An Explanation At Last
Some experimentation with PhantomJS shows that the problem is writing a null byte at the end of the JavaScript program sent to PhantomJS.
This highlights two bugs:
The program in the question sends an unnecessary null byte.
PhantomJS 2.1.1 (on a Mac running macOS High Sierra 10.13.3) hangs when an otherwise valid program is followed by a null byte
The code in the question contains:
write(to_phantomjs_pipe[1], jsToExectue, strlen(jsToExectue) + 1);
The + 1 means that the null byte terminating the string is also written to phantomjs. And writing that null byte causes phantomjs to hang. That is tantamount to a bug — it certainly isn't clear why PhantomJS hangs without detecting EOF (there is no more data to come), and without giving an error, etc.
Change that line to:
write(to_phantomjs_pipe[1], jsToExectue, strlen(jsToExectue));
and the code works as expected — at least with PhantomJS 2.1.1 on a Mac running macOS High Sierra 10.13.3.
Initial analysis
You aren't closing enough file descriptors in the child.
Rule of thumb: If you
dup2()
one end of a pipe to standard input or standard output, close both of the
original file descriptors returned by
pipe()
as soon as possible.
In particular, you should close them before using any of the
exec*()
family of functions.
The rule also applies if you duplicate the descriptors with either
dup()
or
fcntl()
with F_DUPFD
The child code shown is:
} else {
close(to_ext_program_pipe[1]);
close(to_my_program_pipe[0]);
dup2(to_ext_program_pipe[0],0);
dup2(to_my_program_pipe[1],1);
if(execlp("ext_program", "ext_program", "/dev/stdin" , NULL) == -1) {
fprintf(stderr,"execlp Error!");
exit(1);
}
close(to_ext_program_pipe[0]);
close(to_my_program_pipe[1]);
}
The last two close() statements are never executed; they need to appear before the execlp().
What you need is:
} else {
dup2(to_ext_program_pipe[0], 0);
dup2(to_my_program_pipe[1], 1);
close(to_ext_program_pipe[0]);
close(to_ext_program_pipe[1]);
close(to_my_program_pipe[0]);
close(to_my_program_pipe[1]);
execlp("ext_program", "ext_program", "/dev/stdin" , NULL);
fprintf(stderr, "execlp Error!\n");
exit(1);
}
You can resequence it splitting the close() calls, but it is probably better to regroup them as shown.
Note that there is no need to test whether execlp() failed. If it returns, it failed. If it succeeds, it does not return.
There could be another problem. The parent process waits for the child to exit before reading anything from the child. However, if the child tries to write more data than will fit in the pipe, the process will hang, waiting for some process (which will have to be the parent) to read the pipe. Since they're both waiting for the other to do something before they will do what the other is waiting for, it is (or, at least, could be) a deadlock.
You should also revise the parent process to do the reading before the waiting.
if (pid) {
close(to_my_program_pipe[1]);
close(to_ext_program_pipe[0]);
char string_to_write[] = "this is the string to write";
write(to_ext_program_pipe[1], string_to_write, strlen(string_to_write) + 1);
close(to_ext_program_pipe[1]);
char *string_to_read;
char ch[1];
size_t len = 0;
string_to_read = malloc(sizeof(char));
if(!string_to_read) {
fprintf(stderr, "%s\n", "Error while allocating memory");
exit(1);
}
while (read(to_my_program_pipe[0], ch, 1) == 1) {
string_to_read[len] = ch[0];
len++;
string_to_read = realloc(string_to_read, len*sizeof(char));
if (!string_to_read) {
fprintf(stderr, "%s\n", "Error while allocating memory\n");
exit(1);
}
string_to_read[len] = '\0';
}
close(to_my_program_pipe[0]);
printf("Output: %s\n", string_to_read);
free(string_to_read);
wait(&rv);
if (rv != 0) {
fprintf(stderr, "%s %d\n", "phantomjs exit status ", rv);
exit(1);
}
} …
I'd also rewrite the code to read in big chunks (1024 bytes or more). Just don't copy more data than the read returns, that's all. Repeatedly using realloc() to allocate one more byte to the buffer is ultimately excruciatingly slow. It won't matter much if there's only a few bytes of data; it will matter if there are kilobytes or more data to process.
Later: Since the PhantomJS program generates over 90 KiB of data in response to the message it was sent, this was a factor in the problems — or would have been were it not for the hang-on-null-byte bug in PhantomJS.
Still having problems 2018-02-03
I extracted the code, as amended, into a program (pipe89.c, compiled to pipe89). I got inconsistent crashes when the space allocated changed. I eventually realized that you're reallocating one byte too little space — it took a lot longer than it should have done (but it would help if Valgrind was available for macOS High Sierra — it isn't yet).
Here's the fixed code with debugging information commented output:
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/wait.h>
#include <unistd.h>
int main(void)
{
pid_t pid;
int rv;
int to_ext_program_pipe[2];
int to_my_program_pipe[2];
if (pipe(to_ext_program_pipe))
{
fprintf(stderr, "Pipe error!\n");
exit(1);
}
if (pipe(to_my_program_pipe))
{
fprintf(stderr, "Pipe error!\n");
exit(1);
}
if ((pid = fork()) == -1)
{
fprintf(stderr, "Fork error. Exiting.\n");
exit(1);
}
if (pid)
{
close(to_my_program_pipe[1]);
close(to_ext_program_pipe[0]);
char string_to_write[] = "this is the string to write";
write(to_ext_program_pipe[1], string_to_write, sizeof(string_to_write) - 1);
close(to_ext_program_pipe[1]);
char ch[1];
size_t len = 0;
char *string_to_read = malloc(sizeof(char));
if (string_to_read == 0)
{
fprintf(stderr, "%s\n", "Error while allocating memory");
exit(1);
}
string_to_read[len] = '\0';
while (read(to_my_program_pipe[0], ch, 1) == 1)
{
//fprintf(stderr, "%3zu: got %3d [%c]\n", len, ch[0], ch[0]); fflush(stderr);
string_to_read[len++] = ch[0];
char *new_space = realloc(string_to_read, len + 1); // KEY CHANGE is " + 1"
//if (new_space != string_to_read)
// fprintf(stderr, "Move: len %zu old %p vs new %p\n", len, (void *)string_to_read, (void *)new_space);
if (new_space == 0)
{
fprintf(stderr, "Error while allocating %zu bytes memory\n", len);
exit(1);
}
string_to_read = new_space;
string_to_read[len] = '\0';
}
close(to_my_program_pipe[0]);
printf("Output: %zu (%zu) [%s]\n", len, strlen(string_to_read), string_to_read);
free(string_to_read);
wait(&rv);
if (rv != 0)
{
fprintf(stderr, "%s %d\n", "phantomjs exit status ", rv);
exit(1);
}
}
else
{
dup2(to_ext_program_pipe[0], 0);
dup2(to_my_program_pipe[1], 1);
close(to_ext_program_pipe[0]);
close(to_ext_program_pipe[1]);
close(to_my_program_pipe[0]);
close(to_my_program_pipe[1]);
execlp("ext_program", "ext_program", "/dev/stdin", NULL);
fprintf(stderr, "execlp Error!\n");
exit(1);
}
return 0;
}
It was tested on a program which wrote 5590 byte out for 27 bytes of input. That isn't as massive a multiplier as in your program, but it proves a point.
I still think you'd do better not reallocating a single extra byte at a time — the scanning loop should use a buffer of, say, 1 KiB and read up to 1 KiB at a time, and allocate the extra space all at once. That's a much less intensive workout for the memory allocation system.
Problems continuing on 2018-02-05
Taking the code from the Edit 2 and changing only the function definition from int main() { to int main(void) { (because the compilation options I use don't allow old-style non-prototype function declarations or definitions, and without the void, that is not a prototype), the code is
working fine for me. I created a surrogate phantomjs program (from another I already have lying around), like this:
#include <stdio.h>
int main(int argc, char **argv, char **envp)
{
for (int i = 0; i < argc; i++)
printf("argv[%d] = <<%s>>\n", i, argv[i]);
for (int i = 0; envp[i] != 0; i++)
printf("envp[%d] = <<%s>>\n", i, envp[i]);
FILE *fp = fopen(argv[argc - 1], "r");
if (fp != 0)
{
int c;
while ((c = getc(fp)) != EOF)
putchar(c);
fclose(fp);
}
else
fprintf(stderr, "%s: failed to open file %s for reading\n",
argv[0], argv[argc-1]);
return(0);
}
This code echoes the argument list, the environment, and then opens the file named as the last argument and copies that to standard output. (It is highly specialized because of the special treatment for argv[argc-1], but the code before that is occasionally useful for debugging complex shell scripts.)
When I run your program with this 'phantomjs', I get the output I'd expect:
argv[0] = <<phantomjs>>
argv[1] = <<--ssl-protocol=TLSv1>>
argv[2] = <</dev/stdin>>
envp[0] = <<MANPATH=/Users/jleffler/man:/Users/jleffler/share/man:/Users/jleffler/oss/share/man:/Users/jleffler/oss/rcs/man:/usr/local/mysql/man:/opt/gcc/v7.3.0/share/man:/Users/jleffler/perl/v5.24.0/man:/usr/local/man:/usr/local/share/man:/usr/share/man:/opt/gnu/share/man>>
envp[1] = <<IXH=/opt/informix/12.10.FC6/etc/sqlhosts>>
…
envp[49] = <<HISTFILE=/Users/jleffler/.bash.jleffler>>
envp[50] = <<_=./pipe31>>
var page=require('webpage').create();page.onInitialized=function(){page.evaluate(function(){delete window._phantom;delete window.callPhantom;});};page.onResourceRequested=function(requestData,request){if((/http:\/\/.+?\\.css/gi).test(requestData['url'])||requestData.headers['Content-Type']=='text/css'){request.abort();}};page.settings.loadImage=false;page.settings.userAgent='Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36';page.open('https://stackoverflow.com',function(status){if(status!=='success'){phantom.exit(1);}else{console.log(page.content);phantom.exit();}});
At this point, I have to point the finger at phantomjs in your environment; it doesn't seem to behave as expected when you do the equivalent of:
echo "$JS_PROG" | phantomjs /dev/stdin | cat
Certainly, I cannot reproduce your problem any more.
You should take my surrogate phantomjs code and use that instead of the real phantomjs and see what you get.
If you get output analogous to what I showed, then the problem is with the real phantomjs.
If you don't get output analogous to what I showed, then maybe there is a problem with your code from the update to the question.
Later: Note that because the printf() uses %s to print the data, it would not notice the extraneous null byte being sent to the child.
In the pipe(7) man it is written that you should read from pipe ASAP:
If a process attempts to write to a
full pipe (see below), then write(2) blocks until sufficient data has
been read from the pipe to allow the write to complete. Nonblocking
I/O is possible by using the fcntl(2) F_SETFL operation to enable the
O_NONBLOCK open file status flag.
and
A pipe has a limited capacity. If the pipe is full, then a write(2)
will block or fail, depending on whether the O_NONBLOCK flag is set
(see below). Different implementations have different limits for the
pipe capacity. Applications should not rely on a particular
capacity: an application should be designed so that a reading process
consumes data as soon as it is available, so that a writing process
does not remain blocked.
In your code you write, wait and only then read
write(to_ext_program_pipe[1], string_to_write, strlen(string_to_write) + 1);
close(to_ext_program_pipe[1]);
wait(&rv);
//...
while(read(to_my_program_pipe[0], ch, 1) == 1) {
//...
Maybe the pipe is full or ext_program is waiting for the data to be read, you should wait() only after the read.

How to change STDIN stream to binary

I'm making an upload form via a CGI interface. I'm writing it in C and don't want to use any outside libraries (ie. cgic).
I thought the program was complete, as the first test files uploaded correctly. But they were ASCII files. When I tested with a binary file (JPG). It seems that STDIN is trying to read the binary data as ASCII which creates a problem for characters like \0 which is present at the end of an ASCII file, but is a common character in binary files. The results of uploading a 1.9MB file end up with a 38kB file.
When searching how to change the STDIN stream to binary, I was referred to the command freopen and told to use NULL as the argument for the file. example 1
It says:
If filename is a null pointer, the freopen() function shall attempt to
change the mode of the stream to that specified by mode, as if the
name of the file currently associated with the stream had been used.
In this case, the file descriptor associated with the stream need not
be closed if the call to freopen() succeeds. It is
implementation-defined which changes of mode are permitted (if any),
and under what circumstances.
But when I check the man page on my system with man 3 freopen, it doesn't say any of
this at all. Furthermore, reading the man page, I find out the the
option for binary (adding 'b' to the mode) is no longer recognized and
only exists for archaic compliancy:
The mode string can also include
the letter 'b' either as a last character or as a character between
the characters in any of the two-character strings described above.
This is strictly for compatibility with C89 and has no effect; the 'b'
is ignored on all POSIX conforming systems, including Linux.
So right now I'm completely lost. How can I change the STDIN stream to read binary input?
Here is the code:
#include <stdio.h>
#include <stdlib.h>
#include <libgen.h>
#include <string.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
#include <unistd.h>
// Declare constants.
#define BUF_SIZE 4096
#define FILENAME_SIZE 500
#define MARKER_SIZE 100
#define RETURN_FAILURE 0
#define RETURN_SUCCESS 1
#define SEARCH_STRING_1 "filename=\""
#define SEARCH_STRING_2 "\r\n\r\n"
// Declare global variables.
char filename[FILENAME_SIZE + 1];
char *program_name;
// Declare function prototype.
void print_footer (void);
void print_header (void);
void process_input (char *data);
int main (int argc, char *argv[])
{
// Declare variables.
long long ret;
char buf[BUF_SIZE + 1];
// Get program name for error reporting.
program_name = basename(argv[0]);
// Prepare output for browser.
print_header();
// Protect variable against buffer overflow.
buf[BUF_SIZE] = '\0';
// Loop through all the file data.
while(1)
{
// Read in the next block of data.
if((ret = (long long) fread(buf, 1, BUF_SIZE, stdin)) != BUF_SIZE)
{
// Check for error.
if(ferror(stdin) != 0)
{
printf("%s: An error occurred while reading the input file.<br>\n", program_name);
process_input(NULL);
exit(EXIT_FAILURE);
}
// Check for EOF.
else if(feof(stdin) != 0)
break;
}
// Terminate and process uploaded data.
buf[ret] = '\0';
process_input(buf);
}
// Terminate and process uploaded data.
buf[ret] = '\0';
process_input(buf);
// Finish user output, close output file and exit.
print_footer();
process_input(NULL);
exit(EXIT_SUCCESS);
}
void process_input (char *data)
{
// Declare variables.
char *ptr1= NULL;
char *ptr2;
int x = 0;
static FILE *fp;
static int flag = 0;
static char marker[MARKER_SIZE + 1];
// If data is NULL, close output file.
if(data == NULL)
{
if(fclose(fp) == EOF)
{
printf("%s: process_input: close failed (%s)<br>\n", program_name, strerror(errno));
exit(EXIT_FAILURE);
}
return;
}
// Check if this is the first time through.
if(flag == 0)
{
// Get marker.
if((ptr1 = strchr(data, '\n')) == NULL)
{
printf("%s: process_input: strchr(1) failed (\n)<br>\n", program_name);
exit(EXIT_FAILURE);
}
ptr1[0] = '\0';
strcpy(marker, data);
ptr1[0] = '\n';
// Get filename.
if((ptr1 = strstr(data, SEARCH_STRING_1)) == NULL)
{
printf("%s: process_input: strstr(1) failed (%s)<br>\n", program_name, SEARCH_STRING_1);
exit(EXIT_FAILURE);
}
// Advance pointer to start of filename.
ptr1 += 10;
// Find end of filename.
if((ptr2 = strchr(ptr1, '"')) == NULL)
{
printf("%s: process_input: strchr(2) failed (\")<br>\n", program_name);
exit(EXIT_FAILURE);
}
// Terminate and store filename.
ptr2[0] = '\0';
strcpy(filename, ptr1);
ptr2[0] = '"';
// Remove spaces from filename.
while(filename[x] != '\0')
{
if(filename[x] == ' ')
filename[x] = '.';
x++;
}
// Open output file.
if((fp = fopen(filename, "wb")) == NULL)
{
printf("%s: process_input: fopen failed (%s) (%s)<br>\n", program_name, strerror(errno), filename);
exit(EXIT_FAILURE);
}
// Find start of file data.
if((ptr1 = strstr(data, SEARCH_STRING_2)) == NULL)
{
printf("%s: process_input: strstr(2) failed (%s)<br>\n", program_name, SEARCH_STRING_2);
fclose(fp);
exit(EXIT_FAILURE);
}
// Set flag.
flag++;
// Advance pointer to start of file data.
ptr1 += 4;
// Change STDIN stream to binary.
if(freopen(NULL, "rb", stdin) == NULL)
{
printf("%s: process_input: freopen failed (%s)<br>\n", program_name, strerror(errno));
fclose(fp);
exit(EXIT_FAILURE);
}
}
// Catch everything else.
else
{
ptr1 = data;
if((ptr2 = strstr(ptr1, marker)) != NULL)
ptr2[0 - 2] = '\0';
}
// Write file data.
if(fwrite(ptr1, 1, strlen(ptr1), fp) != strlen(ptr1))
{
printf("%s: process_input: write failed (%s)<br>\n", program_name, strerror(errno));
fclose(fp);
exit(EXIT_FAILURE);
}
}
void print_footer (void)
{
printf("\nMade it!\n");
}
void print_header (void)
{
printf("Content-type: text/plain\r\n\r\n");
}
Ok, it appears what #NominalAnimal said was correct. You can store binary data in a string, but the moment you use any function in the string.h library, it almost always changes what is stored in that string (if the data is binary).
The easy solution is to make a separate function that takes a pointer to the binary data and do your string searches in that function, returning what pertinent information is needed. That way, the original data is never changed.
'stdin' is a macro of STDIN_FILENO, which is egal to 0. See also 'unistd.h'.
You are not showing your code, but I think you stop when you encounter a '\0' or a non-ascii char, since you said you were using 'fread()'.
You have to stop when fread() function returns 0, which means it stopped to read : it encountered EOF.

I am trying to print a txt file and it doesn't work in C homework

I'm writing code that's supposed to verify that a .txt file is a certain format.
I wrote my code as I saw in a tutorial and in the website
and for some reason my program doesn't even print my file.
Can you tell me what I'm doing wrong?
The code will do something far more complex, but I'm still trying to work on my basics.
Here's my code so far:
int main(int argc, char *argv[]) {
/* argv[0] = name of my running file
* argv[1] = the first file that i receive
*/
define MAXBUFLEN 4096
char source[MAXBUFLEN + 1];
int badReturnValue = 1;
char *error = "Error! trying to open the file ";
if (argc != 2) {
printf("please supply a file \n");
return badReturnValue;
}
char *fileName = argv[1];
FILE *fp = fopen(argv[1], "r"); /* "r" = open for reading */
if (fp != NULL) {
size_t newLen = fread(&source, sizeof(char), MAXBUFLEN, fp);
if (ferror(fp) != 0) {
printf("%s %s", error, fileName);
return badReturnValue;
}
int symbol;
while ((symbol = getc(fp)) != EOF) {
putchar(symbol);
}
printf("finish");
fclose(fp);
}
else {
printf("%s %s", error, fileName);
return badReturnValue;
}
}
I think you need a bit more explanations:
#include <stdio.h>
#include <stdlib.h>
#include <errno.h>
#include <string.h>
// there might be a macro BUFLEN defined in stdio
// which size is optimized for reading in chunks.
// Test if avaiable otherwise define it
#ifndef BUFLEN
# define BUFLEN 4096
#endif
int main(int argc, char *argv[])
{
char source[BUFLEN];
char *filename;
FILE *fp;
size_t fpread, written;
char c;
int ret_fclose;
if (argc != 2) {
fprintf(stderr, "Usage: %s filename\n", argv[0]);
exit(EXIT_FAILURE);
}
// reset errno, just in case
errno = 0;
// work on copy
filename = malloc(strlen(argv[1]) + 1);
if (filename == NULL) {
fprintf(stderr, "Allocating %zu bytes failed\n", strlen(argv[1]) + 1);
exit(EXIT_FAILURE);
}
filename = strcpy(filename, argv[1]);
// try to open the file at 'filename'
fp = fopen(filename, "r");
if (fp == NULL) {
fprintf(stderr, "Opening file \"%s\" filename failed\n", filename);
// errno might got set to something usable, check and print
if (errno != 0) {
fprintf(stderr, "Error: %s\n", strerror(errno));
}
exit(EXIT_FAILURE);
}
// You have two options here. One is to read in chunks of MAXBUFLEN
while ((fpread = fread(&source, 1, BUFLEN, fp)) > 0) {
// Do something with the stuff we read into "source"
// we do nothing with it here, we just write to stdout
written = fwrite(&source, 1, fpread, stdout);
// you can use 'written' for error check when writing to an actual file
// but it is unlikely (but not impossible!) with stdout
// test if we wrote what we read
if ((fpread - written) != 0) {
fprintf(stderr, "We did not write what we read. Diff: %d\n",
(int) (fpread - written));
}
}
// fread() does not distinguish between EOF and error, we have to check by hand
if (feof(fp)) {
// we have read all, exit
puts("\n\n\tfinish\n");
// No, wait, we want to do it again in a different way, so: no exit
// exit(EXIT_SUCCESS);
} else {
// some error may have occured, check
if (ferror(fp)) {
fprintf(stderr, "Something bad happend while reading \"%s\"\n", filename);
if (errno != 0) {
fprintf(stderr, "Error: %s\n", strerror(errno));
}
exit(EXIT_FAILURE);
}
}
// the other way is to read it byte by byte
// reset the filepointers/errors et al.
rewind(fp);
// rewind() should have reseted errno, but better be safe than sorry
errno = 0;
printf("\n\n\tread and print \"%s\" again\n\n\n\n", filename);
// read one byte and print it until end of file
while ((c = fgetc(fp)) != EOF) {
// just print. Gathering them into "source" is left as an exercise
fputc(c, stdout);
}
// clean up
errno = 0;
ret_fclose = fclose(fp);
// even fclose() might fail
if (ret_fclose == EOF) {
fprintf(stderr, "Something bad happend while closing \"%s\"\n", filename);
if (errno != 0) {
fprintf(stderr, "Error: %s\n", strerror(errno));
}
exit(EXIT_FAILURE);
}
// The macros EXIT_FAILURE and EXIT_SUCCESS are set to the correct values for
// the OS to tell it if we had an eror or not.
// Using exit() is noot necessary here but there exits teh function atexit()
// that runs a given function (e.g: clean up, safe content etc.) when called
exit(EXIT_SUCCESS);
}
You read from the file twice but only print once.
If the file is to small the first reading will read all of the contents, and the second reading will not produce anything so you don't print anything.
I believe you have to reset the pointer after using fread.
Try fseek(fp, SEEK_SET, 0) to reset the pointer to the beginning of the file. Then print the file.

Resources