Linux dropping root permissions - C script - c

I have the following C script running in linux (Ubuntu 12.0.4) as a set root UID script (chmod 4755)
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
void main()
{ int fd;
/* Assume that /etc/zzz is an important system file,
* and it is owned by root with permission 0644.
* Before running this program, you should create
* the file /etc/zzz first. */
fd = open("/etc/zzz", O_RDWR | O_APPEND);
if (fd == -1) {
printf("Cannot open /etc/zzz\n");
exit(0);
}
/* Simulate the tasks conducted by the program */
sleep(1);
/* After the task, the root privileges are no longer needed, it’s time to relinquish the root privileges
permanently. */
setgroups(0, NULL);
setregid(getgid());
setreuid(getuid()); /* getuid() returns the real uid */
if(setregid(getgid()) == 0){
printf("Still root GID!\n");
exit(0);
} if(setreuid(getuid()) ==0){
printf("Still root UID\n");
exit(0);
if (fork()) { /* In the parent process */
close (fd);
exit(0);
} else { /* in the child process */
/* Now, assume that the child process is compromised,
malicious attackers have injected the following
statements into this process */
write (fd, "Malicious Data\n", 15);
close (fd);
}
}
As far as I can see, it should be setting the permissions back to the real user (ID 1000) but I am getting the "Still root" errors.
I have tried inserting setuid(1000) and setuid(0) just about the setgroups to remove any saved UID issues, but that just allows it to bypass the if statements, but still allows the "Malicious Data" to be written.
I have also tried closing the file close(fd) before dropping permissions, as I was unsure if you'd be unable to edit permissions whilst a file opened as root was still open. But I was still having the same issue
Any ideas as to what I am doing wrong here? And why it isn't working?

I assume you run the program with sudo. In that case, getuid will return 0.
You'd have to explicitly call set the uid to the desired (e.g. 1000) uid.
Also, "Malicious Data\n" will be written because the fd was already opened when the process had elevated permissions, and you can still write there even if your process lost permissions. The process now cannot open the file again.
Everything is according to spec: if you want to disallow the process from writing to the file, make sure to close it before dropping permissions.

Related

Linux ioctl for tty gives EPERM

I try to implement a getty program (the program, which acquires a specific tty, sets it to stdin, stdout and stderr and executes the login process normally; The typical implementation is agetty).
My problem is, it always gives me an EPERM error in the line with the ioctl call, which should change the controlling terminal.
The manpage about ioctl_tty(2) says:
TIOCSCTTY int arg
Make the given terminal the controlling terminal of the calling process. The calling process must be a session
leader and not have a controlling terminal already. For this case, arg should be specified as zero.
If this terminal is already the controlling terminal of a different session group, then the ioctl fails with EPERM,
unless the caller has the CAP_SYS_ADMIN capability and arg equals 1, in which case the terminal is stolen, and all
processes that had it as controlling terminal lose it.
Now, if I just for test run it with superuser privileges (that means GID and UID = 0) theoretically the kernel should just skip the capabilities checks, according to the manpage capabilities(7).
So what's wrong with the following code, when run as root:
#include <termios.h>
#include <stropts.h>
#include <fcntl.h>
#include <string.h>
#include <sys/ioctl.h>
int main (int argc, char **argv) {
char path[50] = "/dev/";
if (argc > 1) strcat (path, argv[1]);
/* First argument is tty device name */
else return 1;
int fd = open (path, O_RDWR, 0);
if (fd == -1) return 1;
ioctl (fd, TIOCSCTTY, 1); /* Here is the error */
}

error in executing system function with STDOUT_FILENO closed

I have an strange issue. I am not very good in C language. I am trying to create a daemon to execute my bash script based service in Linux. To make it easy I have made the code simple. Below is my code.
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <syslog.h>
int main(int argc, char* argv[])
{
pid_t process_id = 0;
pid_t sid = 0;
process_id = fork();
if (process_id < 0)
{
printf("fork failed!\n");
exit(1);
}
if (process_id > 0)
{
printf("daemon creatd with process id %d \n", process_id);
exit(0);
}
umask(0);
sid = setsid();
if(sid < 0)
{
fprintf(stderr, "Return error\n");
exit(1);
}
chdir("/");
close(STDIN_FILENO);
close(STDOUT_FILENO);
close(STDERR_FILENO);
int status = system("ls");
openlog("slog", LOG_PID|LOG_CONS, LOG_USER);
syslog(LOG_INFO, "Returned status is %d", status);
closelog();
return (0);
}
As you can see, the above program will execute the system function to execute the ls command and output the exit code to system log.
After I compile the program and run, in the logs I can see the status code is 512. But If I comment out the following line,
close(STDOUT_FILENO);
then it works perfect and I can see in the log the status code is 0,
What I might be doing wrong?
UPDATE
My program is pretty big and I am not using ls in real environment. i made the program simple to reproduce the issue what I am facing. Also, to see the status of program, I am not looking at the output but the status code in the syslog.
In case it is not clear from other comments and answers, ls writes its output to stdout. If it can not write to stdout it terminates and (apparently) sets an status code of 512 (non-zero in any case).
The child process will inherit stdout from the parent, so if you do not close stdout in the parent, the child will have a stdout to write to and the command will succeed. If you close stdout, then the child has nowhere to write its output and the error occurs.
So, although ls is just an example for this question, your actual child process is writing to stdout which results in the same problem.
The simplest way around this, and assuming that you do not want the child's output, is to redirect the child's stdout on the command line:
int status = system("ls >/dev/null 2>&1");
This will redirect stdout and stderr to /dev/null, effectively throwing away the child's output while still giving it somewhere to write to.
Your daemon creation looks fine. But daemon processes by convention do not have a controlling terminal which you accomplish by closing all the standard file descriptors and call setsid() to create a new session to make the daemon a session leader. So, you can't make the daemon produce any output on the stdout stream. It obviously works if you don't close the stdout stream.
So, what you are doing is trying to write something to a tty from a terminal. So, you either don't need a daemon for this purpose or you need to write to a different file (instead of stdout).

How to open new terminal through C program in linux

I have written client-sever code where I have many connections, let say each node represents different process on same machine. And to do that I have obviously use fork().
But now problem is that all results get displayed on same terminal.
I want to know is there any way such that after each fork() or process creation new terminal gets opened and all results get displayed for that process on particular terminal.
P.S: I have tried system("gnome-terminal") but it just opens new terminal but all results get displayed again on same terminal only. All new terminals are just opens and remain blank without any result.
Also I have gone through this link How to invoke another terminal for output programmatically in C in Linux but I don't want to run my program with parameters or whatever. Its should be just like ./test
Here is my code:-
for(int i=0;i<node-1;i++)
{
n_number++;
usleep(5000);
child_pid[i]=fork();
if(!child_pid[i])
{
system("gnome-terminal");
file_scan();
connection();
exit(0);
}
if(child_pid[i]<0)
printf("Error Process %d cannot be created",i);
}
for(int i=0;i<node-1;i++)
wait(&status);
So basically what I want is for each process there should be new terminal displaying only that process information or result.
What I exactly want:
After fork() I have some data related to say process 1 then I want its output to one terminal
Same goes with each process. So its like if I have 3 process then there must be 3 terminals and each must display process related data only.
I know it can be doable using IPC(Inter Process Communication) but is there any other way around? I mean just 2-3 commands or so? Because I do not want to invest too much in coding this part.
Thanks in advance!!!
Maybe you want something like that. This program is using the unix98 pseudoterminal (PTS), which is a bidirectional channel between master and slave. So, for each fork that you do, you will need to create a new PTS, by calling the triad posix_openpt, grantpt, unlockpt at master side and ptsname at slave side. Do not forget to correct the initial filedescriptors (stdin, stdout and sdterr) at each side.
Note that is just a program to prove the concept, so I am not doing any form of error check.
#define _XOPEN_SOURCE 600
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <libgen.h>
#include <string.h>
#include <fcntl.h>
int main() {
pid_t i;
char buf[10];
int fds, fdm, status;
fdm = posix_openpt(O_RDWR);
grantpt(fdm);
unlockpt(fdm);
close(0);
close(1);
close(2);
i = fork();
if ( i != 0 ) { // father
dup(fdm);
dup(fdm);
dup(fdm);
printf("Where do I pop up?\n");
sleep(2);
printf("Where do I pop up - 2?\n");
waitpid(i, &status, 0);
} else { // child
fds = open(ptsname(fdm), O_RDWR);
dup(fds);
dup(fds);
dup(fds);
strcpy(buf, ptsname(fdm));
sprintf(buf, "xterm -S%c/2", basename(buf));
system(buf);
exit(0);
}
}

Multiple processes write to the same file (C)

There is a program (Ubuntu 12.04 LTS, a single-core processor):
#include <stdio.h>
#include <unistd.h>
#include <signal.h>
#include <fcntl.h>
#include <sys/types.h>
int main(){
mode_t mode = S_IRUSR | S_IWUSR;
int i = 0, fd, pid;
unsigned char pi1 = 0x33, pi2 = 0x34;
if((fd = open("res", O_WRONLY | O_CREAT | O_TRUNC, mode)) < 0){
perror("open error");
exit(1);
}
if((pid = fork()) < 0){
perror("fork error");
exit(1);
}
if(pid == 0) {
if(write(fd, &pi2, 1) != 1){
perror("write error");
exit(1);
}
}else{
if(write(fd, &pi1, 1) != 1){
perror("write error");
exit(1);
}
}
close(fd);
return 0;
}
The idea is to open the file for writing, then going fork. The position at which there will be a record total for both processes. The strange thing is that if you run the program, it's output to a file "res" is not constant: I have infuriated then 34 then 4 then 3. The question is why such a conclusion? (After all, if the position is shared, then the conclusion must be either 34 or 43.).
In my suspicion, the process is interrupted in the function write, when he found a position in which to write.
When you spawn multiple processes with fork() there is no way to tell in which order they will be executed. It's up to the operating systems scheduler to decide.
So having multiple processes write to the same file is a recipe for disaster.
Regarding the question why sometimes one of the two numbers gets omitted: write first writes the data and then it increments the file pointer. I think it could be possible that the thread control changes in exactly that moment so that the second thread writes before the file position was updated. So it overwrites the data the other process just wrote.
Here's what I think is happening.
You open the file and you fork
The parent gets to run first (similar stuff can happen if the child runs first)
The parent writes 3 and exits
The parent was the controlling process for the terminal so the kernel sends a SIGHUP to all members of the foreground group
The default action of SIGHUP is to terminate the process so the child silently dies
A simple way to test this is to add sleep:
sleep(5); /* Somewhere in the child, right before you write. */
You'll see the child process dies instantaneously: the write is never performed.
Another way to test this is to ignore the SIGHUP, before you fork:
sigignore(SIGHUP); /* Define _XOPEN_SOURCE 500 before including signal.h. */
/* You can also use signal(SIGHUP, SIG_IGN); */
You'll see the process now writes both digits to the file.
The overwriting hypothesis is unlikely. After fork, both processes share a link to the same file descriptor in a system-wide table which also contains the file offset.
I run your program several times,and the result is "34" or "43".
So i have write a shell script
#!/bin/bash
for i in {1..500}
do
./your_program
for line in $(cat res)
do
echo "$line"
done
done
,and run your program 500 times. As we can see,it get '3' or '4' some times( aboat 20 times in 500)。
How we can explain this?
The answer is that:
when we fork() a child process,the child share the same file description and file state structure(which has the current file offset).
In normal,a process get offset=0 first,and write the first byte,and the offset=1;the other process get offset=1,and it will write the second byte.
But some times, if the parent process get offset=0 from the file state structure ,and child get offset=0 at the same time,a process write the first byte,and the other overwrite the first byte. The result will be "3" or "4" (depends on whether the parent write first or child ). Because they both write the first byte of the file.
Fork and offset,see this
The standard says
Write requests of {PIPE_BUF} bytes or less shall not be interleaved with data from other processes doing writes on the same pipe.
Link - http://pubs.opengroup.org/onlinepubs/009696699/functions/write.html
The atomicity of write is only guaranteed in case of writing to a pipe for less than equal to PIPE_BUF and it is not specified for a regular file and we cannot assume that.
So in this case race condition is happening and that results in incorrect data for some runs.
(In my system also it is happening after say few thousand runs).
I think you should think of using mutex/semaphore/any other locking primitive to solve this.
Are you sure you need fork() exactly? fork() creates different process with different memory space (file descriptors and so on). Maybe pthreads would suit you? In case with pthreads you'll share the same fd for all processes. But anyways, you should really think about using mutexes in your project.

Detect when a fifo is opened from a program

I have a situation where I need to check if the other side of a fifo has opened it, however I can't use a open because otherwise the program will start doing stuff.
Why I have to do this: I have a program (a monitor) that launches a server program (both created by me). The monitor uses this fifo to communicate beacause the monitor can be closed/reopened while the server is already started.
The problem is when the monitor starts the server: in this case I have to wait in some way for fifos to be created and then open them.
Actually I'm using a while on the monitor that checks when the fifos are created, however in this way it opens the fifos before the server could do it (even if the instruction after the mkfifo is actually the open!!!).
You may say that I'm opening the fifos in wrong order on the monitor (I'm opening the Write fifo(WRONLY) before the read fifo), the problem is that I can't revert this order because it's required that the server will wait for clients on the open (RDONLY) fifo.
Any suggestion on how to avoid this race condition?
I'm actually using a sleep in the monitor after checking if fifos are created, this obviusly solves the problem but I think is definitely not correct.
Thanks to everyone
Edit 1:
This is how things are working at the moment
Server
mkfifo(fifo1)
mkfifo(fifo2)
open(fifo1 O_RDONLY)
open(fifo2 O_WRONLY)
Monitor
while (fifo1 doesn't exists && fifo2 doesn't exists);
open(fifo1 O_WRONLY)
open(fifo2 O_RDONLY)
I think the race condition is quite explicit now, it's important to notice that fifos are blocking (only RDONLY are blocking, WRONLY will not block even if there isn't anyone on the other side => this is a unix behaviour, not designed by me).
Edit 2:
The race condition happens at first fifo open level. I must open the first fifo on the server before the monitor does it.
You may want to use a named semaphore using sem_open() that is visible to each program at the file-system level in order to synchronize the two programs. Basically your monitor program will wait on the locked semaphore until the server increases it. At that point, all the fifo's will be initialized, and you can move foward with your monitor with the fifo's in a known-state.
Make sure to use the O_CREAT and O_EXCL flags on the initial call to sem_open() so that the creation of the semaphore is atomic. For example, both the monitor and the server program will attempt to create the semaphore at startup if it doesn't already exist ... if it exists, the call will fail, meaning the either the monitor or the server, but not both programs, gained the right to create the semaphore and initialize it. The monitor then waits on the semaphore while the server initializes the fifo's ... once the fifo's are initialized, the server releases the semaphore, and the monitor is then able to continue.
Here's the process as I'm envisioning it ... I believe it should effectively solve your race condition:
In the monitor:
Create the named semaphore and set it in a locked state
Launch the server
Wait for the FIFO's to be visible at the file-system level
Open fifo1 for writing (non-blocking)
Open fifo2 for reading (blocks until the server opens fifo2 for writing)
Wait on the semaphore (possibly with a timeout) until the server unlocks it, indicating that it has now opened both FIFO's successfully.
In the server:
Create the FIFO's
Open fifo1 (blocks until the monitor opens it for writing)
Open fifo2 (non-blocking)
Unlock the semaphore now that the server has opened both FIFO's
So basically your monitor cannot continue until there is a "known-state" where everything is properly initialized ... the server indicates that state to the monitor via the named semaphore.
At least what I found 'till now makes me understand that there isn't a way to detect if a fifo is opened, except if you open it too.
Edit 1:
As Jason stated, there are two ways (both not allowed in my homework however):
1) *You could do a search through /proc/PID/fd (replace PID with a numerical process ID) to see what client processes had already opened your FIFO*
2) Another option would be to call fuser on your FIFO
However, the first one requires something that teachers don't want: watching inside proc/PID/fd. The second one I heared requires root privileges, and this is again something I can't do. Hopefully this will help someone else in the future.
If you do the open()s in the right order, there is no race condition. (the only possible race would be a third process interfering with the same fifos) From the fine manual :
"However, it has to be open at both ends simultaneously before you can proceed to do any input or output operations on it. Opening a FIFO for reading normally blocks until some other process opens the same FIFO
for writing, and vice versa."
This means that the ordering
{ process 1: open (fifo1, RO); process2: open (fifo1, WO); }
...
{ process 1: open (fifo2, WO); process2: open (fifo2, RO); }
will always succeed (given no process starvation) The order of operations on each fifo is unimportant; for fifo1 either process1 or process2 can go first (and will be blocked until the other side succeeds).
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#define FIFO1 "fifo1"
#define FIFO2 "fifo2"
void do_master(void);
void do_slave(void);
void randsleep(unsigned max);
/************************************/
void do_master(void)
{
int in,out, rc;
char buff[20];
randsleep(5);
mkfifo(FIFO1, 0644);
randsleep(7);
mkfifo(FIFO2, 0644);
out = open(FIFO2, O_WRONLY);
if (out == -1) {
fprintf(stderr, "[Master]: failed opening output\n" );
return;
}
fprintf(stderr, "[Master]: opened output\n" );
in = open(FIFO1, O_RDONLY);
if (in == -1) {
fprintf(stderr, "[Master]: failed opening input\n" );
close(out);
return;
}
fprintf(stderr, "[Master]: opened input\n" );
rc = write( out, "M2S\n\0" , 5);
fprintf(stderr, "[Master]: wrote %d\n", rc );
rc = read( in, buff , sizeof buff);
fprintf(stderr, "[Master]: read %d: %s\n", rc, buff );
unlink(FIFO1);
unlink(FIFO2);
}
/***********************************/
void do_slave(void)
{
int in,out, rc;
unsigned iter=0;
char buff[20];
loop1:
in = open(FIFO2, O_RDONLY);
if (in == -1) {
fprintf(stderr, "[Slave%u]: failed opening input\n", ++iter );
randsleep(2);
goto loop1;
}
fprintf(stderr, "[Slave]: opened input\n" );
loop2:
out = open(FIFO1, O_WRONLY);
if (out == -1) {
fprintf(stderr, "[Slave%u]: failed opening output\n", ++iter );
randsleep(3);
goto loop2;
}
fprintf(stderr, "[Slave]: opened output\n" );
rc = write( out, "S2M\n\0" , 5);
fprintf(stderr, "[Slave]: wrote %d\n", rc );
rc = read( in, buff , sizeof buff);
fprintf(stderr, "[Slave]: read %d:%s\n", rc, buff );
}
/*************************************/
void randsleep(unsigned max)
{
unsigned val;
val = rand();
val %= max;
sleep(val);
return;
}
/*************************************/
int main(void)
{
int rc;
switch (rc=fork()) {
case -1: exit(1); break;
case 0: do_slave(); break;
default: do_master(); break;
}
exit (0);
}
Consider using a Unix domain sockets of type SOCK_STREAM. The server will bind its socket to a name in the filesystem. Each client gets its own bidirectional connection to the server.

Resources