Bad file descriptors when implementing pipes & execvp - c

I'm currently working on an assignment that is teaching us on how to implement pipes in my custom shell. Before I actually implement pipes on my shell and change my code, they want us to create two children, and run a command on each child while implementing a pipe:
Execute "ls -l" on child 1
Execute "tail -n 2" on child 2
Currently, my code looks like this:
int main (int argc, char * argv[]){
int debugMode=0;
int p[2];
int writeDup;
int readDup;
int status;
if (strcmp(argv[1],"-d")==0)
debugMode=1;
if (pipe(p)<0)
return 0;
int child1= fork();
if (child1 == 0)
{
if (debugMode == 1)
fprintf(stderr, "Child 1 is redirecting stdout to write end of pipe.\n");
fclose(stdout);
writeDup = dup(p[1]);
close(writeDup);
char *args[] = {"ls","-l",NULL};
if (execvp(args[0],args)<0){
if (debugMode ==1)
perror("ls -l failed ");
return 0;
}
}
else
{
if (debugMode == 1)
fprintf(stderr, "Parent process is waiting to close write end of pipe.\n");
while ((child1=waitpid(-1,&status,0))!=-1);
close(p[1]);
}
int child2 = fork();
if (child2 == 0)
{
fclose(stdin);
readDup = dup(p[0]);
close(readDup);
char *args[] = {"tail","-n","2",NULL};
if (execvp(args[0],args)<0){
if (debugMode ==1)
perror("tail -n 2 failed ");
return 0;
}
}
else{
if (debugMode == 1)
fprintf(stderr, "Parent process is closing read end of pipe.\n");
while ((child2=waitpid(-1,&status,0))!=-1);
close(p[0]);
}
if (debugMode == 1 && child1 != 0 && child2 !=0)
fprintf(stderr, "Waiting for child processes to terminate.\n");
while ((child1=waitpid(-1,&status,0))!=-1 && (child2=waitpid(-1,&status,0))!=-1 );
return 0;
}
However, while executing, I receive several errors:
ls: write error : bad file descriptor
tail: cannot fstat 'standard input': Bad file descriptor
tail: -: bad file descriptor
They requested us to close the standard inputs & outputs, so by doing so I assume that the program should default into reading/writing into the pipe. I'm continuing to try to find a solution, I would appreciate any help or direction!

Related

Trying to re-implement "|" pipe operator in C

I'm trying to re-implement The pipe "|" operator.
The program will be executed as follows:
$ ./exe infile cmd cmd2 cmd3 cmdn outfile.
where at first i'm gonna read from infile process infile's data through the commands and finally pipe it to outfile.
IMPLEMENTAIONS:
My pseudo code looks like the following:
change infile descriptor to stdin.
loop each command.
pipe();
fork();
if (we're in the child process)
change stdin to read_end of pipe.
execute command.
else if (we're in the parent process)
change stdout to write_end of pipe.
execute command.
wait for child process.
change outfile descriptor to stdout.
CODE:
int infile_fd = open(args->infile, O_RDONLY);
dup2(infile_fd, STDIN_FILENO);
// how many commands
while(i < args->len)
{
// error handling stuff
args->cmds[i].cmd = check_exist(args->cmds[i], args);
if (pipe(args->cmds[i].fd) == -1)
{
perror("piping failed\n");
exit(EXIT_FAILURE);
}
if ((args->cmds[i].pid = fork()) == -1)
{
perror("fork failed\n");
exit(EXIT_FAILURE);
} else {
// child process
if (args->cmds[i].pid == 0)
{
close(args->cmds[i].fd[WRITE_END]);
dup2(args->cmds[i].fd[READ_END], STDIN_FILENO);
if (execve(args->cmds[i].cmd, args->cmds[i].flags, env) == -1)
{
perror("execve failed\n");
exit(EXIT_FAILURE);
}
i++;
}
else
{
// parent process
close(args->cmds[i].fd[READ_END]);
dup2(args->cmds[i].fd[WRITE_END], STDOUT_FILENO);
if (execve(args->cmds[i].cmd, args->cmds[i].flags, env) == -1)
{
perror("execve failed\n");
exit(EXIT_FAILURE);
}
wait(NULL);
i++;
}
}
}
int outfile_fd = open(args->outfile, O_WRONLY | O_CREAT | O_TRUNC, 0644);
// change stdout to outfile
dup2(outfile_fd, STDOUT_FILENO);
OUTPUT:
output is garbage, commands read data off the stack and start showing env variables.
EXPECTED OUTPUT:
data first being read from "infile" and then passed through commands until the end of the piping channel at "outfile".
It would be silly to ask what am I doing wrong, because I'm probably doing it all wrong, so a better question: How can I do it right?

Program crash in forking process with pipes

I'm writing a basic shell for course homework that will find a command in the given list of paths, and execute the command. It is also meant to handle pipes.
However, when I fork a child process, I get a "Write error : Broken Pipe" message in gdb, and the program terminates abruptly.
I cannot seem to understand why this is happening, since I've been cautious about opening and closing correct pipes and process forking seems to work as desired. Can someone with more experience in C and unix programming please help me diagnose the problem? Is there something logically incorrect with my fork implementation / pipe implementation?
//commands is of the format {"ls -al", "more", NULL}
//it represents commands connected by pipes, ex. ls -al | more
char **commands = parseArgv(consoleinput, SPECIAL_CHARS[4]);
int numcommands = 0;
while( commands[numcommands]!=NULL )
{
numcommands++;
}
const int numpipes = 2*(numcommands-1);
int pipefds[numpipes];
int i=0;
for(i=0; i<numpipes;i=i+2)
{
pipe(pipefds+i);
}
int pipe_w = 1;
int pipe_r = pipe_w - 3;
int curcommand = 0;
while(curcommand < numcommands)
{
if(pipe_w < numpipes)
{
//open write end
dup2(pipefds[pipe_w], 1);
}
if(pipe_r > 0)
{
//open read end
dup2(pipefds[pipe_r], 0);
}
for(i=0;i<numpipes;i++) //close off all pipes
{
close(pipefds[i]);
}
//Parse current command and Arguments into format needed by execv
char **argv = parseArgv(commands[curcommand], SPECIAL_CHARS[0]);
//findpath() replaces argv[0], i.e. command name by its full path ex. ls by /bin/ls
if(findPath(argv) == 0)
{
int child_pid = fork();
//Program crashes after this point
//Reason: /bin/ls: write error, broken pipe
if(child_pid < 0)
{
perror("fork error:");
}
else if(child_pid == 0) //fork success
{
if(execv(argv[0], argv) == -1)
{
perror("Bad command or filename:");
}
}
else
{
int child_status;
child_pid = waitpid(child_pid, &child_status, 0);
if(child_pid < 0)
{
perror("waitpid error:");
}
}
}
else
{
printf("Bad command or filename");
}
free(argv);
curcommand++;
pipe_w = pipe_w + 2;
pipe_r = pipe_r + 2;
}
//int i=0;
for(i=0;i<numpipes;i++) //close off all pipes
{
close(pipefds[i]);
}
free(commands);
Duplicating the file descriptors after the fork() call, i.e. in the child process, is the correct way.
Also, the waitpid() call makes one child process wait for the other, and the shell hangs. The wait() call should be moved to after the loop, i.e. the parent should wait for all the children.

trying to run "ls | grep r" with "execvp()"

I created a pipe between two child processes,
first, I run ls, which writes to the proper fd,
then, I run grep r, which reads from the proper fd,
I can see in the terminal that the grep command works fine (the output)
The problem is that grep doesn't quit, it stays there, even though ls isn't running anymore
for other programs the pipe works fine..
for (i = 0; i < commands_num ; i++) { //exec all the commands instants
if (pcommands[i]._flag_pipe_out == 1) { //creates pipe if necessary
if (pipe(pipe_fd) == -1) {
perror("Error: \"pipe()\" failed");
}
pcommands[i]._fd_out = pipe_fd[1];
pcommands[i+1]._fd_in = pipe_fd[0];
}
pid = fork(); //the child exec the commands
if (pid == -1) {
perror("Error: \"fork()\" failed");
break;
} else if (!pid) { //child process
if (pcommands[i]._flag_pipe_in == 1) { //if there was a pipe to this command
if (dup2(pcommands[i]._fd_in, STDIN) == -1) {
perror("Error: \"dup2()\" failed");
exit(0);
}
close(pcommands[i]._fd_in);
}
if (pcommands[i]._flag_pipe_out == 1) { //if there was a pipe from this command
if (dup2(pcommands[i]._fd_out, STDOUT) == -1) {
perror("Error: \"dup2()\" failed");
exit(0);
}
close(pcommands[i]._fd_out);
}
execvp(pcommands[i]._commands[0] , pcommands[i]._commands); //run the command
perror("Error: \"execvp()\" failed");
exit(0);
} else if (pid > 0) { //father process
waitpid(pid, NULL, WUNTRACED);
}
}
//closing all the open fd's
for (i = 0; i < commands_num ; i++) {
if (pcommands[i]._fd_in != STDIN) { //if there was an other stdin that is not 0
close(pcommands[i]._fd_in);
}
if (pcommands[i]._fd_out != STDOUT) { //if there was an other stdout that is not 1
close(pcommands[i]._fd_out);
}
}
So, I have a "command" instant pcommands[i]
It has:
a flag of pipein,pipeout
fdin,fdout,
and a char** (for the real command, like "ls -l")
lets say everything is good,
that means that:
pcommands[0]:
pipein=0
pipeout=1
char** = {"ls","-l",NULL}
pcommands[1]:
pipein=1
pipeout=0
char** = {"grep","r",NULL}
now, the loop will go twice (because I have two commands instants)
at the first time, it will see the pcommands[0] has pipeout==1
create pipe
do fork
pcommands[0] has pipeout==1
child: dup2 to the stdout
execvp
second time:
doesn't create pipe
do fork
child:
the pcomands[1] has pipein==1
then: dup2 to the input
exevp
..
this command works, my output is:
errors.log exer2.pdf multipal_try
(all the things with 'r')
but then it get stuck, and doesn't get out of grep..
in an other terminal i can see grep is still working
I hope I close all the fd's I need to close...
I don't understand why doesn't it work, it seems like I do it right (well, it works for other commands..)
can someone please help? thanks
You aren't closing enough pipe file descriptors.
Rule of Thumb:
If you use dup() or dup2() to duplicate a pipe file descriptor to standard input or standard output, you should close both of the original pipe file descriptors.
You also need to be sure that if the parent shell creates the pipe, it closes both of its copies of the pipe file descriptors.
Also note that the processes in a pipeline should be allowed to run concurrently. In particular, pipes have a limited capacity, and a process blocks when there's no room left in the pipe. The limit can be quite small (POSIX mandates it must be at least 4 KiB, but that's all). If your programs deal with megabytes of data, they must be allowed to run concurrently in the pipeline. Therefore, the waitpid() should occur outside the loop that launches the children. You also need to close the pipes in the parent process before waiting; otherwise, the child reading the pipe will never see EOF (because the parent could, in theory, write to the pipe, even though it won't).
You have structure members whose names start with an underscore. That's dangerous. Names starting with an underscore are reserved for the implementation. The C standard says:
ISO/IEC 9899:2011 §7.1.3 Reserved Identifiers
— All identifiers that begin with an underscore and either an uppercase letter or another
underscore are always reserved for any use.
— All identifiers that begin with an underscore are always reserved for use as identifiers
with file scope in both the ordinary and tag name spaces.
That means that if you run into problems, then the trouble is yours, not the system's. Obviously, your code works, but you should be aware of the problems you could run into and it is wisest to avoid them.
Sample Code
This is a fixed SSCCE based on the code above:
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/wait.h>
#include <unistd.h>
typedef struct Command Command;
struct Command
{
int _fd_out;
int _fd_in;
int _flag_pipe_in;
int _flag_pipe_out;
char **_commands;
};
typedef int Pipe[2];
enum { STDIN = STDIN_FILENO, STDOUT = STDOUT_FILENO, STDERR = STDERR_FILENO };
int main(void)
{
char *ls_cmd[] = { "ls", 0 };
char *grep_cmd[] = { "grep", "r", 0 };
Command commands[] =
{
{
._fd_in = 0, ._flag_pipe_in = 0,
._fd_out = 1, ._flag_pipe_out = 1,
._commands = ls_cmd,
},
{
._fd_in = 0, ._flag_pipe_in = 1,
._fd_out = 1, ._flag_pipe_out = 0,
._commands = grep_cmd,
}
};
int commands_num = sizeof(commands) / sizeof(commands[0]);
/* Allow valgrind to check memory */
Command *pcommands = malloc(commands_num * sizeof(Command));
for (int i = 0; i < commands_num; i++)
pcommands[i] = commands[i];
for (int i = 0; i < commands_num; i++) { //exec all the commands instants
if (pcommands[i]._flag_pipe_out == 1) { //creates pipe if necessary
Pipe pipe_fd;
if (pipe(pipe_fd) == -1) {
perror("Error: \"pipe()\" failed");
}
pcommands[i]._fd_out = pipe_fd[1];
pcommands[i+1]._fd_in = pipe_fd[0];
}
pid_t pid = fork(); //the child exec the commands
if (pid == -1) {
perror("Error: \"fork()\" failed");
break;
} else if (!pid) { //child process
if (pcommands[i]._flag_pipe_in == 1) { //if there was a pipe to this command
assert(i > 0);
assert(pcommands[i-1]._flag_pipe_out == 1);
assert(pcommands[i-1]._fd_out > STDERR);
if (dup2(pcommands[i]._fd_in, STDIN) == -1) {
perror("Error: \"dup2()\" failed");
exit(0);
}
close(pcommands[i]._fd_in);
close(pcommands[i-1]._fd_out);
}
if (pcommands[i]._flag_pipe_out == 1) { //if there was a pipe from this command
assert(i < commands_num - 1);
assert(pcommands[i+1]._flag_pipe_in == 1);
assert(pcommands[i+1]._fd_in > STDERR);
if (dup2(pcommands[i]._fd_out, STDOUT) == -1) {
perror("Error: \"dup2()\" failed");
exit(0);
}
close(pcommands[i]._fd_out);
close(pcommands[i+1]._fd_in);
}
execvp(pcommands[i]._commands[0] , pcommands[i]._commands); //run the command
perror("Error: \"execvp()\" failed");
exit(1);
}
else
printf("Child PID %d running\n", (int)pid);
}
//closing all the open pipe fd's
for (int i = 0; i < commands_num; i++) {
if (pcommands[i]._fd_in != STDIN) { //if there was another stdin that is not 0
close(pcommands[i]._fd_in);
}
if (pcommands[i]._fd_out != STDOUT) { //if there was another stdout that is not 1
close(pcommands[i]._fd_out);
}
}
int status;
pid_t corpse;
while ((corpse = waitpid(-1, &status, 0)) > 0)
printf("Child PID %d died with status 0x%.4X\n", (int)corpse, status);
free(pcommands);
return(0);
}
Just for my knowledge, how would you do it, so it won't get "indisputably messy"?
I'd probably keep the pipe information so that I the child didn't need to worry about the conditionals contained in the asserts (accessing the child information for the child before or after it in the pipeline). If each child only needs to access information in its own data structure, it is cleaner. I'd reorganize the 'struct Command' so it contained two pipes, plus indicators for which pipe contains information that needs closing. In many ways, not radically different from what you've got; just tidier in that child i only needs to look at pcommands[i].
You can see a partial answer in a different context at C Minishell adding pipelines.

process termination doesn't affect waitpid()

I need to simulate the following bash commands using C under Linux (with fork, exec, kill, signal, wait, waitpid, dup2, open, sleep, pipe etc).
[0] echo 'tail-f $1' > /tmp/rtail
[1]/tmp/rtail ~/.bash_history >> /tmp/1.txt &
PID of process [1] should be saved.
[2] Expect termination of the command started on step [1]. After termination print on the screen: "Program 1 terminated."
So far I have this code:
#include <fcntl.h>
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <sys/wait.h>
int main(int argc, char *argv[]) {
pid_t pID = fork();
if (pID == 0) // child
{
int file = open("/tmp/rtail", O_CREAT | O_WRONLY);
//Now we redirect standard output to the file using dup2
dup2(file, 1);
puts("tail -f $1");
close(file);
system("chmod 777 /tmp/rtail");
exit(0);
} else if (pID < 0) // failed to fork
{
printf("Failed to fork");
exit(1);
// Throw exception
} else // parent
{
pid_t pID2 = fork();
if (pID2 == 0) {
char tmp1[20];
sprintf(tmp1, "echo %i > /tmp/pidprog1", getpid());
system(tmp1);
int file = open("/tmp/1.txt", O_APPEND | O_WRONLY);
//Now we redirect standard output to the file using dup2
dup2(file, 1);
FILE* proc = popen("sh /tmp/rtail ~/.bash_history", "r");
char tmp[20];
while (fgets(tmp, 40, proc) != NULL) {
printf(tmp);
}
fclose(proc);
exit(0);
}
else if (pID2 < 0) // failed to fork
{
printf("Failed to fork");
exit(1);
// Throw exception
} else {
FILE* fl = fopen("/tmp/pidprog1", "r");
char buff[10];
fgets(buff, 10, fl);
int pidc = atoi(buff);
fclose(fl);
int status;
waitpid(pidc, &status, 0);
printf("Program 1 terminated\n");
}
}
// Code executed by both parent and child.
return 0;
}
The problem is that when I manually kill the process using PID saved into /tmp/pidprog1, parent process doesn't stop waiting and doesn't print "Program 1 terminated" line.
The parent is very likely reading a garbage value into pidc. You are doing nothing to ensure that the grandchild has actually written the pid before the parent tries to read it. You need to use wait to ensure that valid pids are in the file. (Or, just keep track of the pids from the return value of fork.)
You are not doing enough error checking: what happens if any open fails? (eg, when you try
to open /tmp/1.txt for appending but it doesn't already exist?)
Why are you using fgets to read 40 characters into a buffer of size 20?
Why are you dup'ing and using fputs instead of just writing to the fd?
Why are you printing error messages to stdout instead of stderr ( use perror ).

Implementing pipelining in a Linux shell

I'm trying to develop a shell in Linux as an Operating Systems project. One of the requirements is to support pipelining (where calling something like ls -l|less passes the output of the first command to the second). I'm trying to use the C pipe() and dup2() commands but the redirection doesn't seem to be happening (less complains that it didn't receive a filename). Can you identify where I'm going wrong/how I might go about fixing that?
EDIT: I'm thinking that I need to use either freopen or fdopen somewhere since I'm not using read() or write()... is that correct?
(I've heard from others who've done this project that using freopen() is another way to solve this problem; if you think that would be better, tips for going that direction would also be appreciated.)
Here's my execute_external() function, which executes all commands not built-in to the shell. The various commands in the pipe (e.g. [ls -l] and [less]) are stored in the commands[] array.
void execute_external()
{
int numCommands = 1;
char **commands;
commands = malloc(sizeof(char *));
if(strstr(raw_command, "|") != NULL)
{
numCommands = separate_pipeline_commands(commands);
}
else
{
commands[0] = malloc(strlen(raw_command) * sizeof(char));
commands[0] = raw_command;
}
int i;
int pipefd[2];
for (i = 0; i < numCommands; i++)
{
char **parameters_array = malloc(strlen(commands[i]) * sizeof(char *));
int num_params;
num_params = str_to_str_array(commands[i], parameters_array);
if (numCommands > 1 && i > 0 && i != numCommands - 1)
{
if (pipe(pipefd) == -1)
{
printf("Could not open a pipe.");
}
}
pid_t pid = fork();
pmesg(2, "Process forked. ID = %i. \n", pid);
int status;
if (fork < 0)
{
fprintf(to_write_to, "Could not fork a process to complete the external command.\n");
exit(EXIT_FAILURE);
}
if (pid == 0) // This is the child process
{
if (numCommands > 1) { close(pipefd[1]); } // close the unused write end of the pipe
if (i == 0) // we may be pipelining and this is the first process
{
dup2(1, pipefd[1]); // set the source descriptor (for the next iteration of the loop) to this proc's stdout
}
if (i !=0 && (i != numCommands-1)) // we are pipelining and this is not the first or last process
{
dup2(pipefd[0], 0); // set the stdin of this process to the source of the previous process
}
if (execvp(parameters_array[0], parameters_array) < 0)
{
fprintf(to_write_to, "Could not execute the external command. errno: %i.\n", errno);
exit(EXIT_FAILURE);
}
else { pmesg(2, "Executed the child process.\n");}
}
else
{
if (numCommands > 1) { close(pipefd[0]); } // close the unused read end of the pipe
if (backgrounding == 0) { while(wait(&status) != pid); }// Wait for the child to finish executing
}
free(parameters_array);
}
free(commands);
}
It looks like there are a couple of bugs going on in your code.
First, all your dup2's are only in the child. In order to connect a pipe you will need to dup2 the stdout of the parent to the write end pipefd[1] of the pipe. Then you would hook up the read end to stdin.
Also it looks like on of your dup2's is backwards with dup2 fildes is duplicated to fildes2. So when you reassign stdin you want dup2(in, 0) and for stdout you want dup2(out, 1).
So a stripped down piece of piping code is going to look like:
int pipefd[2];
pipe(pipefd);
pid_t pid = fork();
if (pid == 0) //The child
{
dup2(pipefd[0], 0);
}
else
{
dup2(pipefd[1], 1);
}

Resources