screenshot using openGL and/or X11 - c

i am trying to get a screenshot of the screen or a window. I tried using functions from X11
and it works fine. The problem is that getting the pixels from XImage takes a lot of time.
Than i tried to look for some answers on how to do it using openGL. Here's what i've got:
#include <stdlib.h>
#include <stdio.h>
#include <cstdio>
#include <GL/glut.h>
#include <GL/gl.h>
#include <GL/glx.h>
#include <X11/Xlib.h>
int main(int argc, char **argv)
{
int width=1200;
int height=800;
//_____________________________----
Display *dpy;
Window root;
GLint att[] = { GLX_RGBA, GLX_DEPTH_SIZE, 24, GLX_DOUBLEBUFFER, None };
XVisualInfo *vi;
GLXContext glc;
dpy = XOpenDisplay(NULL);
if ( !dpy ) {
printf("\n\tcannot connect to X server\n\n");
exit(0);
}
root = DefaultRootWindow(dpy);
vi = glXChooseVisual(dpy, 0, att);
if (!vi) {
printf("\n\tno appropriate visual found\n\n");
exit(0);
}
glXMakeCurrent(dpy, root, glc);
glc = glXCreateContext(dpy, vi, NULL, GL_TRUE);
printf("vendor: %s\n", (const char*)glGetString(GL_VENDOR));
//____________________________________________
glXMakeCurrent(dpy, root, glc);
glEnable(GL_DEPTH_TEST);
GLubyte* pixelBuffer = new GLubyte[sizeof(GLubyte)*width*height*3*3];
glReadBuffer(GL_FRONT);
GLint ReadBuffer;
glGetIntegerv(GL_READ_BUFFER,&ReadBuffer);
glPixelStorei(GL_READ_BUFFER,GL_RGB);
GLint PackAlignment;
glGetIntegerv(GL_PACK_ALIGNMENT,&PackAlignment);
glPixelStorei(GL_PACK_ALIGNMENT,1);
glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_INT, pixelBuffer);
int i;
for (i=0;i<100;i++) printf("%u\n",((unsigned int *)pixelBuffer)[i]);
return 0;
}
when i run the program it returns an error:
X Error of failed request: BadAccess (attempt to access private resource denied)
Major opcode of failed request: 199 ()
Minor opcode of failed request: 26
Serial number of failed request: 20
Current serial number in output stream: 20
if i comment the line with glXMakeCurrent(dpy, root, glc); before glc = glXCreateContext(dpy, vi, NULL, GL_TRUE); it returns no erros, but all the pixels are 0.
How should i go about this problem? I am new to openGL and maybe i am missing something important here. Maybe also another way of getting pixels from the screen or specific window exists?

I don't think what you are trying to do is possible. You can't use OpenGL to read pixels from window you don't own and which probably don't even use OpenGL. You need to stick to X11.
If you have XImage you can get raw pixels from ximage->data. Just make sure you are reading it in correct format.
http://tronche.com/gui/x/xlib/graphics/images.html

You can use XShmGetImage, but you have to query the extensions of the X11 server first, to make sure MIT-SHM extension is available. You also need to know how to setup and use a shared memory segment for this.
Querying the Extension:
http://git.videolan.org/?p=ffmpeg.git;a=blob;f=libavdevice/x11grab.c#l224
Getting the image:
http://git.videolan.org/?p=ffmpeg.git;a=blob;f=libavdevice/x11grab.c#l537

The following runs once at 140 fps on my platform. xcb_image_get() (called with XCB_IMAGE_FORMAT_Z_PIXMAP) will store all pixels in ximg->data, pixel by pixel. On my platform, each pixel is 32 bits, each channel is 8 bits, and there's 3 channels (to 8 bits per pixel are unused).
/*
gcc ss.c -o ss -lxcb -lxcb-image && ./ss
*/
#include <stdio.h>
#include <xcb/xcb_image.h>
xcb_screen_t* xcb_get_screen(xcb_connection_t* connection){
const xcb_setup_t* setup = xcb_get_setup(connection); // I think we don't need to free/destroy this!
return xcb_setup_roots_iterator(setup).data;
}
void xcb_image_print(xcb_image_t* ximg){
printf("xcb_image_print() Printing a (%u,%u) `xcb_image_t` of %u bytes\n\n", ximg->height, ximg->width, ximg->size);
for(int i=0; i < ximg->size; i += 4){
printf(" ");
printf("%02x", ximg->data[i+3]);
printf("%02x", ximg->data[i+2]);
printf("%02x", ximg->data[i+1]);
printf("%02x", ximg->data[i+0]);
}
puts("\n");
}
int main(){
// Connect to the X server
xcb_connection_t* connection = xcb_connect(NULL, NULL);
xcb_screen_t* screen = xcb_get_screen(connection);
// Get pixels!
xcb_image_t* ximg = xcb_image_get(connection, screen->root, 0, 0, screen->width_in_pixels, screen->height_in_pixels, 0xffffffff, XCB_IMAGE_FORMAT_Z_PIXMAP);
// ... Now all pixels are in ximg->data!
xcb_image_print(ximg);
// Clean-up
xcb_image_destroy(ximg);
xcb_disconnect(connection);
}

Related

Committing first page of sparse texture crashes at glfwDestroyWindow

The following OpenGL/GLFW/GLAD code (MCVE version) crashes the process with the following access violation exception in nvoglv64.dll:
0x00007FFC731F586F (nvoglv64.dll) in ConsoleApplication2.exe: 0xC0000005: Zugriffsverletzung beim Lesen an Position 0x0000000000024AA8.
at the glfwDestroyWindow(window) call at the end of the code listing below, and I don't know, why. This call won't crash when either not calling glTexPageCommitmentARB or calling it with a commit of GL_FALSE/0.
I've read the specification of ARB_sparse_texture three times now, checking the conditions of every argument of glTexPageCommitmentARB (x, y, z, width, height, depth multiples of the page sizes of the internal format) and I am somewhat sure that I've followed all instructions on how to use that function correctly. I've also checked for any OpenGL errors with a debug context and a debug message callback before, there were no outputs.
Further information:
OS: Windows 10 x64 (20H2)
Driver: Nvidia 461.09 (DCH)
GLAD Configuration: OpenGL 4.3 Core + ARB_sparse_texture + ARB_sparse_texture2
GLFW: Fresh local MSVC x64 Release build from Git commit 0b9e48fa3df9c18
The fprintf right before the glTexPageCommitmentARB call in the code below print the following output before the crash at the end of the program:
256 128 1 32768
#include <stdlib.h>
#include <stdio.h>
#include <glad/glad.h>
#include <GLFW/glfw3.h>
int main(int argc, char** argv) {
glfwInit();
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_VISIBLE, GLFW_FALSE);
GLFWwindow* window = glfwCreateWindow(800, 600, "", NULL, NULL);
if (window == NULL) {
fprintf(stderr, "%s\n", "GLFW window NULL");
exit(1);
}
glfwMakeContextCurrent(window);
if (!gladLoadGLLoader((GLADloadproc)glfwGetProcAddress)) {
fprintf(stderr, "%s\n", "gladLoadGLLoader failed");
exit(2);
}
if (!GLAD_GL_ARB_sparse_texture || !GLAD_GL_ARB_sparse_texture2) {
fprintf(stderr, "%s\n", "GL_ARB_sparse_texture or GL_ARB_sparse_texture2 unsupported");
exit(3);
}
GLuint tex;
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
// activate sparse allocation for this texture
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_SPARSE_ARB, GL_TRUE);
// use default page size index 0 (as per ARB_sparse_texture2)
glTexParameteri(GL_TEXTURE_2D, GL_VIRTUAL_PAGE_SIZE_INDEX_ARB, 0);
GLint width, height, depth, maxSparseTexSize;
// query page size for width, height and depth
// R16UI is supported as sparse texture internal format as per
// https://www.khronos.org/registry/OpenGL/extensions/ARB/ARB_sparse_texture.txt
glGetInternalformativ(GL_TEXTURE_2D, GL_R16UI, GL_VIRTUAL_PAGE_SIZE_X_ARB, 1, &width);
glGetInternalformativ(GL_TEXTURE_2D, GL_R16UI, GL_VIRTUAL_PAGE_SIZE_Y_ARB, 1, &height);
glGetInternalformativ(GL_TEXTURE_2D, GL_R16UI, GL_VIRTUAL_PAGE_SIZE_Z_ARB, 1, &depth);
glGetIntegerv(GL_MAX_SPARSE_TEXTURE_SIZE_ARB, &maxSparseTexSize);
fprintf(stderr, "%d %d %d %d\n", width, height, depth, maxSparseTexSize);
glTexStorage2D(
/*target*/GL_TEXTURE_2D,
/*levels*/1,
/*internalFormat*/GL_R16UI,
/*width*/maxSparseTexSize,
/*height*/maxSparseTexSize);
// commit one page at (0, 0, 0)
glTexPageCommitmentARB(GL_TEXTURE_2D,
/*level*/0,
/*xoffset*/0,
/*yoffset*/0,
/*zoffset*/0,
/*width*/width,
/*height*/height,
/*depth*/depth,
/*commit*/GL_TRUE);
glfwDestroyWindow(window);
glfwTerminate();
}
Am I missing anything here or is it just a driver or GLFW bug?
EDIT: Some more information:
The crash happens in a driver thread (nvoglv64.dll) right after the main thread called wglDeleteContext(...) for the GL context. When the Visual Studio 2019 debugger halts due to the exception, I can see three driver threads (all from nvoglv64.dll) existing (one of which issued the access violation exception) while the main thread is halted right after wglDeleteContext(...).
EDIT2: Experimenting more with this, the crash always occurred when the texture was not dereferenced (ref count to 0) and disposed of by the driver before destroying the GL context. For example, in the code above, when I call glDeleteTextures(1, &tex); right before glDestroyWindow(window), the crash does not occur (reproducibly). In the actual app I distilled the above MCVE from, the texture was also attached as a color attachment in a FBO. When I did not deleted the FBO but only decremented the ref count for the texture (by calling glDeleteTextures(...), the crash would still occur. Only when I also dereferenced the FBO by glDeleteFramebuffers(...) and the texture along with it, would the crash not occur anymore.

Make Xorg recognize libevdev virtual device

I've been trying to create a simple program using libevdev to make a virtual device that will simply move the mouse by 50 points on the X axis every second. The program runs just fine, however Xorg won't recognize the newly created virtual device.
I suppose it's going to be something trivial, but I cannot figure out what.
Xorgs' logs say:
[ 5860.310] (II) config/udev: Adding input device test device Mouse (/dev/input/event18)
[ 5860.310] (II) No input driver specified, ignoring this device.
[ 5860.310] (II) This device may have been added with another device file.
The program:
#include <libevdev/libevdev.h>
#include <libevdev/libevdev-uinput.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <unistd.h>
static void check(int i) {
if (i < 0) {
printf("%s\n", strerror(-i));
exit(1);
}
}
int main() {
struct libevdev* evdev = libevdev_new();
libevdev_set_name(evdev, "test device Mouse");
libevdev_set_id_vendor(evdev, 0x1);
libevdev_set_id_product(evdev, 0x1);
libevdev_set_id_version(evdev, 0x1);
libevdev_set_id_bustype(evdev, BUS_USB);
check(libevdev_enable_event_type(evdev, EV_REL));
check(libevdev_enable_event_code(evdev, EV_REL, REL_X, NULL));
check(libevdev_enable_event_code(evdev, EV_REL, REL_Y, NULL));
check(libevdev_enable_event_code(evdev, EV_SYN, SYN_REPORT, NULL));
struct libevdev_uinput* uinput;
check(libevdev_uinput_create_from_device(evdev, LIBEVDEV_UINPUT_OPEN_MANAGED, &uinput));
for (int i = 0; i < 1000; i++) {
check(libevdev_uinput_write_event(uinput, EV_REL, REL_X, 50));
check(libevdev_uinput_write_event(uinput, EV_SYN, SYN_REPORT, 0));
sleep(1);
}
}
What am I doing wrong?
The issue was that mouse buttons need to be enabled for mouse movements to work.
Adding these lines fixes the issue:
check(libevdev_enable_event_type(evdev, EV_KEY));
check(libevdev_enable_event_code(evdev, EV_KEY, BTN_LEFT, NULL));
check(libevdev_enable_event_code(evdev, EV_KEY, BTN_RIGHT, NULL));

DDA Line Drawing Algorithm has errors

Why am I getting an error saying 'setPixel not defined' with this code?
#include <windows.h>
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include<GL/glut.h>
inline int round(const float a)
{
return int (a+0.5);
}
void init(void)
{
glClearColor(0.0f,0.0f,1.0f,1.0f);
gluOrtho2D(0.0,200.0,0.0,200.0);
glMatrixMode(GL_PROJECTION);
}
void LineSegment(int xa, int ya,int xb,int yb)
{
glClear(GL_COLOR_BUFFER_BIT);
glColor3f(1.0f,0.0f,0.0f);
printf("Enter the initial value");
scanf("%d%d",&xa,&ya);
printf("Enter the final value");
scanf("%d%d",&xb,&yb);
int dx=xb-xa;
int dy=yb-ya;
int steps,k;
float xIncrement,yIncrement,x=xa,y=ya;
if(fabs(dx)>fabs(dy))
steps=fabs(dx);
else
steps=fabs(dy);
xIncrement=dx/(float)steps;
yIncrement=dy/(float)steps;
setPixel(round(x),round(y));
for(k=0;k<steps;k++);
{
x += xIncrement;
y += yIncrement;
setPixel(round(x),round(y));
}
glFlush();
}
int main(int argc, char** argv)
{
glutInit(&argc,argv);
glutInitDisplayMode(GLUT_SINGLE|GLUT_RGBA);
glutCreateWindow("DDA Line Algorithm");
glutDisplayFunc(LineSegment);
init();
glutMainLoop();
return 0;
}
Because there is no setPixel method in OpenGL or GLUT and as far as I can see from your code, you do not define one either. OpenGL deals with rendering primitives like points, lines, triangels etc, but not directly with setting single pixels on the screen. Since it is unclear what you want to achieve some suggestions:
If you want to draw a line in OpenGL, use the appropriate methods like glBegin(GL_LINES), etc. (although they are deprecated and should not be used anymore.) or glDrawArrays(GL_LINES, ....
If the goal is to implement a dda software rasterizer, then you might have to write the pixels to a texture and then display this texture.
Because you haven't defined setPixel anywhere. It's not an OpenGL call. You need to write it yourself, and it should set pixels on a buffer (if you're using double buffering) which you then later use as an argument to glDrawPixels(), or a call to the display buffer using glVertex2i(x,y). You can see an example of both approaches here and here.
Also, your LineSegment function is broken. In OpenGL you call glutDisplayFunc to specify a function which is called as fast as possible to render the display. However, in this function you call scanf() to prompt the user for data - this is broken. You should prompt them once at the start, and then pass that data into the function (which will then run as often as possible once glutMainLoop is called).

uEye Camera and OpenCV Memory Access

I am using a 35MM EO Megapixel Fixed FL Lens Edmund Optics camera, OpenCV 2.4.6, and Ubuntu 12.04 LTS as my development environment. I am also using C to develop, not C++. The camera has an API that I am following, and I believe I have set everything up correctly. I initialize the camera, set memory locations, and freeze the video. I then use OpenCV to get the image from memory, but my image is nothing like what it should be (may be seen below). Is my image data pulling data from a junk memory location? How can I access the image saved by "is_FreezeVideo" for image processing done by OpenCV? The image that is printed out can be seen here http://i.imgur.com/kW6aqB3.png
The code I am using is below.
#include "../Include/Camera.h"
#include <wchar.h>
#include <locale.h>
#include <stdlib.h>
#include <stdio.h>
#include <cv.h>
#include <highgui.h>
//#include <opencv2/opencv.hpp>
#include <ueye.h>
// uEye variables
HIDS m_hCam; // handle to room
HWND m_hWndDisplay; // handle to diplay window
int m_Ret; // return value of uEye SDK functions
int m_nColorMode = 0; // Y8/RGB16/RGB24/REG32
int m_nBitsPerPixel=8; // number of bits needed store one pixel
int m_nSizeX = 1280; // width of video
int m_nSizeY = 1024; // height of video
int m_lMemoryId; // grabber memory - buffer ID
char* m_pcImageMemory; // grabber memory - pointer to buffer
int m_nRenderMode = IS_RENDER_FIT_TO_WINDOW; //render mode
void getAzimuth(){
}
void getElevation(){
}
void initializeCamera(){
if (m_hCam !=0 ) {
//free old image mem.
is_FreeImageMem (m_hCam, m_pcImageMemory, m_lMemoryId);
is_ExitCamera (m_hCam);
}
// init room
m_hCam = (HIDS) 0; // open next room
m_Ret = is_InitCamera (&m_hCam, NULL); // init room
if (m_Ret == IS_SUCCESS) {
// retrieve original image size
SENSORINFO sInfo;
is_GetSensorInfo (m_hCam, &sInfo);
m_nSizeX = sInfo.nMaxWidth;
m_nSizeY = sInfo.nMaxHeight;
printf("Width: %d Height: ", m_nSizeX, m_nSizeY);
// setup the color depth to the current windows setting
is_GetColorDepth (m_hCam, &m_nBitsPerPixel, &m_nColorMode);
is_SetColorMode (m_hCam, m_nColorMode);
//printf ("m_nBitsPerPixel=%i m_nColorMode=%i \n", m_nBitsPerPixel, IS_CM_BAYER_RG8);
// memory initialization
is_AllocImageMem (m_hCam, m_nSizeX, m_nSizeY, m_nBitsPerPixel, &m_pcImageMemory, &m_lMemoryId);
//set memory active
is_SetImageMem (m_hCam, m_pcImageMemory, m_lMemoryId);
// display initialization
is_SetImageSize (m_hCam, m_nSizeX, m_nSizeY);
is_SetImagePos(m_hCam, 0, 0);
is_SetDisplayMode (m_hCam, IS_SET_DM_DIB);
} else {
printf("No Camera Initialized! %c", 10);
}
if (m_hCam !=0) {
INT dummy;
char *pMem, *pLast;
double fps = 0.0;
if (is_FreezeVideo (m_hCam, IS_WAIT) == IS_SUCCESS) {
m_Ret = is_GetActiveImageMem(m_hCam, &pLast, &dummy);
m_Ret = is_GetImageMem(m_hCam, (void**)&pLast);
}
IplImage* tmpImg = cvCreateImageHeader (cvSize (m_nSizeX, m_nSizeY), IPL_DEPTH_8U, 1);
tmpImg->imageData = &m_pcImageMemory;
cvNamedWindow("src",1);
cvShowImage("src",tmpImg);
cvWaitKey(0);
}
}
Thanks
You need to use is_ImageFile function to save the image file to a filename.You can see the sample example from the is_ImageFile function.You can save it to the format(bmp,png,jpeg) you need.
regards,
Sreenivas
The problem was the camera properties. After setting brightness and other properties, we now get an actual image

Simulate button click using GTK+ using gtk_event_put and a GdkEventButton structure

This is a follow-up to How to insert synthetic mouse events into X11 input queue
I'm trying to create a program that takes input from an external device and generates mouse clicks so that GTK+ will get and handle the events as if they mouse click happened normally.
Seems I can use a GdkEventButton structure:
https://developer.gnome.org/gdk/stable/gdk-Event-Structures.html#GdkEventButton
But I'm not sure how to determine the values to enter for each field. I'm looking for a small snippet of sample code or advice from somebody that has used gtk_event_put() with a GdkEventButton structure.
EDIT: If anybody knows a different or better way than my answer, please let me know.
There is another way, but this way I'm going to present, involve the use of pure X11, although seeing your question's tags, an answer that uses X11 is acceptable too.
That said, let me start by introducing the function where all happens, one thing, I'm going to be a bit verbose, in case of some other people that don't understand how use this function.
void
mouse_click(Display *display, int x, int y, int click_type, struct timeval *t);
display structure previously returned by XOpenDisplay() that contains all the information about the X server.
x the x coordinate of the pointer relative to the root of the screen.
y the y coordinate of the pointer relative to the root of the screen.
click_type the type of the click. For this answer, either MOUSE_RIGHT_CLICK or MOUSE_LEFT_CLICK
t timeval structure, specified the time interval the click should remain pressed.
Implementation
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <time.h>
#include <sys/types.h>
#include <sys/select.h>
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#define portable_usleep(t) select(0, NULL, NULL,NULL, t)
enum { MOUSE_RIGHT_CLICK, MOUSE_LEFT_CLICK };
void
mouse_click(Display *display, int x, int y, int click_type, struct timeval *t)
{
Window root;
XEvent event;
root = DefaultRootWindow(display);
XWarpPointer(display, None, root, 0, 0, 0, 0, x, y);
memset(&event, 0, sizeof(event));
event.xbutton.type = ButtonPress;
event.xbutton.button = click_type;
event.xbutton.same_screen = True;
XQueryPointer(display, root, &event.xbutton.root, &event.xbutton.window,
&event.xbutton.x_root, &event.xbutton.y_root,
&event.xbutton.x, &event.xbutton.y, &event.xbutton.state);
event.xbutton.subwindow = event.xbutton.window;
while(event.xbutton.subwindow) {
event.xbutton.window = event.xbutton.subwindow;
XQueryPointer(display, event.xbutton.window,&event.xbutton.root,
&event.xbutton.subwindow, &event.xbutton.x_root,
&event.xbutton.y_root, &event.xbutton.x, &event.xbutton.y,
&event.xbutton.state);
}
if(XSendEvent(display, PointerWindow, True, 0xfff, &event)==0)
fprintf(stderr, "XSendEvent()\n");
XFlush(display);
portable_usleep(t); /* keeps the click pressed */
event.type = ButtonRelease;
event.xbutton.state = 0x100;
if(XSendEvent(display, PointerWindow, True, 0xfff, &event)==0)
fprintf(stderr, "XSendEvent()\n");
XFlush(display);
}
Well, this function it's pretty straight forward, for example, left click at position 500,645 button pressed for half a second, this is how:
int
main(void)
{
int x;
int y;
Display *display;
struct timeval t;
display = XOpenDisplay(NULL);
if(!display) {
fprintf(stderr, "Can't open display!\n");
exit(EXIT_FAILURE);
}
x = 500;
y = 645;
t.tv_sec = 0;
t.tv_usec = 500000; /* 0.5 secs */
mouse_click(display, x, y, MOUSE_LEFT_CLICK, &t);
XCloseDisplay(display);
return 0;
}
Compile
$ gcc -o click click.c -lX11
After some research I was able to learn that GTK+/GDK 2.14 and newer have some functions for creating automated testing suites for GTK+ applications.
I was able to use gdk_test_simulate_button to simulate mouse clicks. This may not be the optimum solution, but right now it appears to work well.

Resources