SDL2 rendering does not go beyond 30 FPS on rockchip-based board - c

I am trying to implement animations on a Rock64 ARM board, which has a Rockchip RK3328 with a Mali GPU. I am using SDL2 and I am experiencing a low framerate. So I wrote some testing code:
#include <SDL2/SDL.h>
#include <stdbool.h>
// generates a texture with the given color and the output's size
SDL_Texture *colorTexture(SDL_Renderer *renderer,
unsigned char r, unsigned char g, unsigned char b) {
int w, h, pitch;
SDL_GetRendererOutputSize(renderer, &w, &h);
SDL_Texture *ret = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_RGBA8888,
SDL_TEXTUREACCESS_STREAMING, w, h);
void *pixels;
SDL_LockTexture(ret, NULL, &pixels, &pitch);
for (int y = 0; y < h; ++y) {
unsigned char *cur = pixels + y * pitch;
for (int x = 0; x < w; ++x) {
*cur++ = 255; *cur++ = b; *cur++ = g; *cur++ = r;
}
}
SDL_UnlockTexture(ret);
return ret;
}
int main(int argc, char *argv[]) {
SDL_Init(SDL_INIT_VIDEO | SDL_INIT_EVENTS);
SDL_Window *window;
SDL_Renderer *renderer;
SDL_CreateWindowAndRenderer(640, 480, SDL_WINDOW_OPENGL, &window, &renderer);
SDL_Texture *blue = colorTexture(renderer, 0, 0, 255);
SDL_Texture *red = colorTexture(renderer, 255, 0, 0);
int w, h;
SDL_GetWindowSize(window, &w, &h);
bool running = true;
SDL_Texture *first = blue, *second = red;
uint32_t start = SDL_GetTicks();
int frameCount = 0;
while (running) {
SDL_Event evt;
for (int x = 0; x < w; ++x) {
while (SDL_PollEvent(&evt)) {
if (evt.type == SDL_QUIT) {
running = false;
goto after_animation;
}
}
SDL_Rect firstRect = {.x = 0, .y = 0, .w = x, .h = h},
secondRect = {.x = x, .y = 0, .w = w - x, .h = h};
SDL_RenderCopy(renderer, first, &firstRect, &firstRect);
SDL_RenderCopy(renderer, second, &secondRect, &secondRect);
SDL_RenderPresent(renderer);
frameCount++;
uint32_t cur = SDL_GetTicks();
if (cur - start >= 1000) {
printf("%d FPS\n", frameCount);
frameCount = 0;
start = cur;
}
}
after_animation:;
SDL_Texture *tmp = first; first = second; second = tmp;
}
SDL_Quit();
}
According to the post here, the board is able to reach framerates higher than 60 FPS even for 4k output (my monitor is a 4k TV). However, my testing code reports only 30 FPS. It only renders two textures that have exactly the screen's size, and scrolls horizontally showing one texture on the left and one on the right. I believe I should be able to reach framerates beyond 30 FPS. How can I speed up the rendering?

Related

How do I draw a Pixmap with GTK?

Using GTK3, I have been trying to draw a Pixmap from a memory buffer. I have just created a memory buffer, and filled it with alternating rows of colours in the 32-bit RGBA format. I have been trying the following function:
gdk_pixbuf_new_from_data(const guchar *data, GdkColorspace colorspace, gboolean has_alpha, int bits_per_sample, int width, int height, int rowstride, GdkPixbufDestroyNotify destroy_fn, gpointer destroy_fn_data)
Using this, I have been able to wrap the memory buffer into a GdkPixbuf*, however when I attempt to draw the pixbuf to screen with Cairo, the image seems to be distorted.
Here is my test program that I have been experimenting with:
#include <gtk/gtk.h>
#include <stdlib.h>
#include <stdint.h>
#include <stdbool.h>
const int WIDTH = 1080;
const int HEIGHT = 720;
GtkWidget* mainWindow;
int currentCol = 0;
uint32_t* framebuffer = NULL;
GdkPixbuf* pixbuf = NULL;
typedef struct _rgbColor {
uint8_t red;
uint8_t green;
uint8_t blue;
uint8_t alpha;
}rgbColor;
void onWindowDestroy (GtkWidget* object, gpointer user_data) {
gtk_main_quit();
}
gboolean onTimerTick(gpointer user_data) {
rgbColor c = {0, 0, 0, 255};
if (currentCol == 0) {
c.red = 255;
}
if (currentCol == 1) {
c.green = 255;
}
if (currentCol == 2) {
c.blue = 255;
currentCol = -1;
}
currentCol++;
fillWithColour(framebuffer, c);
rgbColor c1 = {0, 0, 255, 255};
fillEveryInterval(framebuffer, c1, 20);
gtk_widget_queue_draw(mainWindow);
return 1;
}
gboolean onDraw(GtkWidget* widget, cairo_t *cr, gpointer user_data) {
gdk_cairo_set_source_pixbuf(cr, pixbuf, 0, 0);
cairo_paint(cr);
return 0;
}
void fillWithColour(uint32_t* fb, rgbColor c) {
for (int y = 0; y < HEIGHT; y++) {
for (int x = 0; x < WIDTH; x++) {
encodePixel(fb, c, x, y);
}
}
}
void fillEveryInterval(uint32_t* fb, rgbColor c, int interval) {
for (int y = 1; y < HEIGHT; y += interval) {
for (int x = 0; x < WIDTH; x++) {
encodePixel(fb, c, x, y);
}
}
}
void encodePixel(uint32_t* fb, rgbColor c, int x, int y) {
uint32_t r, g, b, a;
r = c.red;
g = c.green << 8;
b = c.blue << 16;
a = c.alpha << 24;
*(fb + (sizeof(uint32_t)*y+x)) = b | g | r | a;
}
int main() {
framebuffer = malloc(sizeof(uint32_t)*WIDTH*HEIGHT);
rgbColor c = {255, 0, 0, 255};
fillWithColour(framebuffer, c);
gtk_init(NULL, NULL);
mainWindow = gtk_window_new(GTK_WINDOW_TOPLEVEL);
gtk_window_set_default_size(GTK_WINDOW(mainWindow), WIDTH, HEIGHT);
gtk_window_set_title(GTK_WINDOW(mainWindow), "Framebuffer test");
GtkWidget* drawingArea = gtk_drawing_area_new();
gtk_container_add(GTK_CONTAINER(mainWindow), drawingArea);
g_signal_connect(GTK_WINDOW(mainWindow), "destroy", (GCallback)onWindowDestroy, NULL);
g_signal_connect(GTK_DRAWING_AREA(drawingArea), "draw", (GCallback)onDraw, NULL);
g_timeout_add(500, onTimerTick, NULL);
gtk_widget_show_all(GTK_WINDOW(mainWindow));
pixbuf = gdk_pixbuf_new_from_data(framebuffer, GDK_COLORSPACE_RGB, true, 8, WIDTH, HEIGHT, WIDTH*4, NULL, NULL);
gtk_main();
}
I can't seem to figure out what is causing the issue, I have experimented with eliminating the alpha channel and packing the RGB values into 24-bits, however I was not successful with that method either. I think it may have something to do with the rowstride, however I have not been able to find a value which corrects the issue. Am I on the right track here, or is there a better way to draw a RGB buffer to screen using GTK?
You're just storing the pixels in the wrong location. In encodePixel, change this line:
*(fb + (sizeof(uint32_t)*y+x)) = b | g | r | a;`
to this
fb[WIDTH*y+x] = b | g | r | a;
And on a sidenote: You should do something with the many warnings from the compiler.

SDL_UpdateTexture ARGB much faster than RGBA

I was trying to use SDL_UpdateTexture to create a texture from an allocated buffer of pixels, and I was surprised that it wasn't even rendering at 60fps (with the main culprit being SDL_UpdateTexture):
#include <SDL2/SDL.h>
#include <sys/time.h>
static double time_in_ms(void) {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000.0 + tv.tv_usec / 1000.0;
}
int main(void) {
SDL_Init(SDL_INIT_VIDEO);
SDL_Window *window = SDL_CreateWindow("Pixel formats" , 0, 0, 1920, 1080, 0);
SDL_Renderer *renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED);
double last_frame = time_in_ms();
while (1) {
int width, height;
SDL_Event e;
while (SDL_PollEvent(&e)) {
if (e.type == SDL_QUIT) {
return 0;
}
}
SDL_GetWindowSize(window, &width, &height);
SDL_Texture *texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_RGBA8888, SDL_TEXTUREACCESS_STREAMING, width, height);
Uint32 *pixels = malloc(width * height * sizeof(*pixels));
/* fill buffer with blue pixels */
for (int y = 0; y < height; y++) {
Uint32 *row = pixels + y * width;
for (int x = 0; x < width; x++) {
row[x] = 0x0000FFFF;
}
}
double update_begin = time_in_ms();
SDL_UpdateTexture(texture, NULL, pixels, width * sizeof(*pixels));
double update_end = time_in_ms();
SDL_SetTextureBlendMode(texture, SDL_BLENDMODE_NONE);
SDL_RenderCopy(renderer, texture, NULL, NULL);
SDL_RenderPresent(renderer);
SDL_DestroyTexture(texture);
free(pixels);
double this_frame = time_in_ms();
printf("frame took %fms\n", this_frame - last_frame);
printf(" - update texture: %fms\n", update_end - update_begin);
last_frame = this_frame;
}
}
But, if I just change SDL_PIXELFORMAT_RGBA8888 to SDL_PIXELFORMAT_ARGB8888 (and update 0x0000FFFF to 0xFF0000FF), all of a sudden, SDL_UpdateTexture goes down from taking ~15ms per frame to ~1ms. This happens regardless of whether renderer is an accelerated or software renderer. This seems very strange to me, since even if SDL_Textures are internally ARGB, it takes far less than 15ms to convert from RGBA to ARGB:
for (int y = 0; y < height; y++) {
Uint32 *row = pixels + y * width;
for (int x = 0; x < width; x++) {
Uint32 a = row[x] & 0xFF;
row[x] >>= 8;
row[x] |= a << 24;
}
}
Why might it be that SDL_UpdateTexture is so much faster with ARGB than RGBA (and does this vary across platforms)?
(SDL_PIXELFORMAT_RGB888 is also quite fast (~2ms), but not as fast as ARGB)

Mandelbrot set in OpenGL - C

I'm new to OpenGL and I am trying to get a mandelbrot set computed with OpenGL and GLFW.
I found the code here but freeglut is broken on my system and for some reason complains about no callback being set even though it clearly is being set. It does however flash one frame and then crash, in that frame I can see the mandelbrot set so I know the math is correct.
I figured this would be a good opportunity to learn more about OpenGL and GLFW, so I set to work making this happen.
After double checking everything, I can see that it definitely calculates the values then switches the buffers properly.
However, I think I'm missing two things:
A vertex which the texture can actually be applied to
EDIT: (from learnopengl.com) "Once glTexImage2D is called, the currently bound texture object now has the texture image attached to it.", so it can't be #2
not sure what's happening with the calculation but it looks like it's binding a texture named 'texture' but then calculating the values in a struct array which don't seem to be associated in any way. I bind the texture with tex (texture) and then send the struct array to glTexImage2D
If someone could just point me in the right direction or confirm my suspicions that would be awesome.
My code is here:
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define GLEW_STATIC
#include <GL/glew.h>
#include <pthread.h>
#include <GLFW/glfw3.h>
#include <GL/gl.h>
#define VAL 255
typedef struct {
uint8_t r;
uint8_t g;
uint8_t b;
}rgb_t;
rgb_t **tex_array = 0;
rgb_t *image;
int gwin;
int width = 640;
int height = 480;
int tex_w, tex_h;
double scale = 1./256;
double cx = -.6, cy = 0;
int color_rotate = 0;
int saturation = 1;
int invert = 0;
int max_iter = 256;
int dump = 1;
GLFWwindow* window;
int global_iterator = 0;
int conversion_iterator_x = 0;
int conversion_iterator_y = 0;
GLFWwindow* init_glfw();
void set_texture(GLuint tex);
void framebuffer_size_callback(GLFWwindow* window, int width, int height);
void render(GLuint tex);
void screen_dump();
void keypress(unsigned char key, int x, int y);
void hsv_to_rgb(int hue, int min, int max, rgb_t *p);
void calc_mandel(rgb_t* px);
void alloc_texture();
void set_texture();
void mouseclick(int button, int state, int x, int y);
void resize(int w, int h);
void framebuffer_size_callback(GLFWwindow* window, int width, int height);
int main(int c, char **v)
{
GLFWwindow* win = init_glfw();
glfwSetWindowPos(win, 1000, 500);
GLuint texture;
glGenTextures(1, &texture);
set_texture(texture);
/* Loop until the user closes the window */
while (!glfwWindowShouldClose(win))
{
render(texture);
/* Swap front and back buffers */
glfwSwapBuffers(win);
/* Poll for and process events */
glfwPollEvents();
if(glfwGetKey(win, GLFW_KEY_ESCAPE) == GLFW_PRESS){
glfwSetWindowShouldClose(win, GL_TRUE);
}
}
return 0;
}
void set_texture(GLuint tex)
{
printf("Allocating space\n");
alloc_texture();
printf("Calculating mandel... %d\n", global_iterator);
++global_iterator;
calc_mandel(image);
printf("mandel calculation complete\n");
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, tex);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, tex_w, tex_h,
0, GL_RGB, GL_UNSIGNED_BYTE, tex_array[0]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
printf("Rendering to screen...\n");
render(tex);
}
void alloc_texture()
{
int i;
int ow = tex_w;
int oh = tex_h;
for (tex_w = 1; tex_w < width; tex_w <<= 1);
for (tex_h = 1; tex_h < height; tex_h <<= 1);
if (tex_h != oh || tex_w != ow){
tex_array = realloc(tex_array, tex_h * tex_w * 3 + tex_h * sizeof(rgb_t*));
}
for (tex_array[0] = (rgb_t *)(tex_array + tex_h), i = 1; i < tex_h; i++){
tex_array[i] = tex_array[i - 1] + tex_w;
}
}
void render(GLuint tex)
{
double x = (double)width /tex_w,
y = (double)height/tex_h;
glClear(GL_COLOR_BUFFER_BIT);
glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
glBindTexture(GL_TEXTURE_2D, tex);
glBegin(GL_QUADS);
glTexCoord2f(0, 0); glVertex2i(0, 0);
glTexCoord2f(x, 0); glVertex2i(width, 0);
glTexCoord2f(x, y); glVertex2i(width, height);
glTexCoord2f(0, y); glVertex2i(0, height);
glEnd();
glFlush();
glFinish();
}
GLFWwindow* init_glfw()
{
/* Initialize the library */
if (!glfwInit()){
return NULL;
}
/*
* Configure window options here if you so desire
*
* i.e.
*/
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
//glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
//glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
//the fourth parameter of glfwCreateWindow should be NULL for windowed mode and
//glfGetPrimaryMonitor() for full screen mode
/* Create a windowed mode window and its OpenGL context */
window = glfwCreateWindow(width, height, "Mandelbrot", NULL, NULL);
if (!window)
{
glfwTerminate();
return NULL;
}
/* Make the window's context current */
glfwMakeContextCurrent(window);
glfwSetFramebufferSizeCallback(window, framebuffer_size_callback);
/*
* Initialize glew here
*/
glewExperimental = GL_TRUE;
glewInit();
return window;
}
void calc_mandel(rgb_t* px)
{
int i, j, iter, min, max;
double x, y, zx, zy, zx2, zy2;
min = max_iter;
max = 0;
for (i = 0; i < height; i++) {
px = tex_array[i];
y = (i - height/2) * scale + cy;
for (j = 0; j < width; j++, px++) {
x = (j - width/2) * scale + cx;
iter = 0;
zx = hypot(x - .25, y);
if (x < zx - 2 * zx * zx + .25){
iter = max_iter;
}
if ((x + 1)*(x + 1) + y * y < 1/16){
iter = max_iter;
}
zx = zy = zx2 = zy2 = 0;
for (; iter < max_iter && zx2 + zy2 < 4; iter++) {
zy = 2 * zx * zy + y;
zx = zx2 - zy2 + x;
zx2 = zx * zx;
zy2 = zy * zy;
}
if (iter < min){
min = iter;
}
if (iter > max){
max = iter;
}
*(unsigned short *)px = iter;
}
}
for (i = 0; i < height; i++){
for (j = 0, px = tex_array[i]; j < width; j++, px++){
hsv_to_rgb(*(unsigned short*)px, min, max, px);
}
}
}
void hsv_to_rgb(int hue, int min, int max, rgb_t *p)
{
printf("Converting hsv to rbg... \n");
if (min == max){
max = min + 1;
}
if (invert){
hue = max - (hue - min);
}
if (!saturation) {
p->r = p->g = p->b = 255 * (max - hue) / (max - min);
printf("done! (!saturation)\n");
return;
}
double h = fmod(color_rotate + 1e-4 + 4.0 * (hue - min) / (max - min), 6);
double c = VAL * saturation;
double X = c * (1 - fabs(fmod(h, 2) - 1));
p->r = p->g = p->b = 0;
switch((int)h) {
case 0: p->r = c; p->g = X; break;
case 1: p->r = X; p->g = c; break;
case 2: p->g = c; p->b = X; break;
case 3: p->g = X; p->b = c; break;
case 4: p->r = X; p->b = c; break;
default:p->r = c; p->b = X; break;
}
printf("done! (sauration)\n");
}
void framebuffer_size_callback(GLFWwindow* window, int width, int height)
{
// make sure the viewport matches the new window dimensions; note that width and
// height will be significantly larger than specified on retina displays.
glViewport(0, 0, width, height);
glOrtho(0, width, 0, height, -1, 1);
//set_texture();
}
[1]: https://rosettacode.org/wiki/Mandelbrot_set#PPM_non_interactive

Mandelbrot - Multi instead of Single color

I have the following code from a class:
#include <stdio.h>
#include <math.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <gdk/gdk.h>
#include <gtk/gtk.h>
void destroy(void) {
gtk_main_quit();
}
static void
put_pixel (GdkPixbuf *pixbuf, int x, int y, guchar red, guchar green, guchar blue, guchar alpha);
typedef
struct complex{
double re;
double im;
} Complex;
int main(int argc, char** argv ){
gtk_init (&argc, &argv);
gint width = 256;
gint height = 256;
GdkColorspace cs = GDK_COLORSPACE_RGB;
GdkPixbuf* pixbuf = gdk_pixbuf_new( cs, 1, 8, width, height);
if (pixbuf == NULL ) {
fprintf( stderr, "Not able to create gdkimage object\n");
return -1;
}
/*
* Lower left and upper right corners of complex plane
*/
const Complex ll = {-2.5, -1.0};
const Complex ur = { 1.0, 1.0};
/*
* Resolution: Nx by Ny pixels in complex plane
*/
const int Nx = 256;
const int Ny = 256;
const double dx = (ur.re - ll.re)/(double)(Nx-1);
const double dy = (ur.im - ll.im)/(double)(Ny-1);
Complex z[Nx][Ny];
Complex c[Nx][Ny];
unsigned int pixel[Nx][Ny];
/*
* Number of iterations
*/
const int number_of_iterations = 1024;
/*
* Once the real or imaginary parts go beyond the max
* values, they have "escaped"
*/
const double Max = 4.0;
/*
* Initialize z and c
*/
printf("Initializing ... \n");
int i=0;
for (i=0; i<Nx; i++) {
int j=0;
for (j=0; j<Ny; j++) {
z[i][j].re = z[i][j].im = 0.;
c[i][j].re = ll.re + i * dx;
c[i][j].im = ll.im + j * dy;
pixel[i][j] = 1; // close to black
}
}
/*
* Iterate for number of iterations
*/
printf("Starting iterations ... \n");
int k=0;
unsigned int color;
for(k=0; k<number_of_iterations; k++){
/*
* Figure out the color for this iteration
*/
color = (unsigned int) ((k * 255 * 256 * 256 ) / number_of_iterations);
for (i=0; i<Nx; i++) {
int j=0;
for (j=0; j<Ny; j++) {
/*
* if pixel value is 0, no need to compute anything.
*/
Complex t = z[i][j];
/*
* check magnitude
*/
if (t.re * t.re + t.im*t.im < Max) {
/*
* Compute z_n+1 = z_n * z_n + c
*/
Complex znp1;
znp1.re = t.re * t.re - t.im * t.im + c[i][j].re;
znp1.im = 2.0 * t.re * t.im + c[i][j].im;
z[i][j] = znp1;
pixel[i][j] = color;
} else {
pixel[i][j] = 0; // black
}
//put draw part here
guchar red = pixel[i][j] & 0xff;
guchar green = pixel[i][j] & 0xff00;
guchar blue = pixel[i][j] & 0xff0000;
guchar alpha = 255; //opacity
put_pixel( pixbuf, i, j, red, green, blue, alpha);
}
}
}
printf("Done with iterations \n");
GtkWidget* window = gtk_window_new(GTK_WINDOW_TOPLEVEL);
GtkWidget* image = gtk_image_new();
gtk_image_set_from_pixbuf((GtkImage*) image, pixbuf);
/*
* The next 7 lines of code are NOW used to end the program if x button
*/
GdkScreen* screen = gtk_widget_get_screen (GTK_WIDGET (window));
GdkVisual* visual = gdk_screen_get_rgba_visual (screen);
if (visual == NULL)
visual = gdk_screen_get_system_visual (screen);
g_signal_connect(G_OBJECT (window), "destroy",
G_CALLBACK (destroy), NULL);
gtk_container_add(GTK_CONTAINER (window), image);
gtk_widget_show_all(window);
gtk_widget_queue_draw(image);
gtk_main();
return 0;
}//end of main
/*
* put_pixel - from: https://developer.gnome.org/gdk-pixbuf/stable/gdk-pixbuf-The-GdkPixbuf-Structure.html
*/
static void
put_pixel (GdkPixbuf *pixbuf,
int x, int y,
guchar red, guchar green,
guchar blue, guchar alpha) {
int n_channels = gdk_pixbuf_get_n_channels (pixbuf);
g_assert (gdk_pixbuf_get_colorspace (pixbuf) == GDK_COLORSPACE_RGB);
g_assert (gdk_pixbuf_get_bits_per_sample (pixbuf) == 8);
g_assert (gdk_pixbuf_get_has_alpha (pixbuf));
g_assert (n_channels == 4);
int width = gdk_pixbuf_get_width (pixbuf);
int height = gdk_pixbuf_get_height (pixbuf);
g_assert (x >= 0 && x < width);
g_assert (y >= 0 && y < height);
int rowstride = gdk_pixbuf_get_rowstride (pixbuf);
guchar* pixels = gdk_pixbuf_get_pixels (pixbuf);
guchar* p = pixels + y * rowstride + x * n_channels;
p[0] = red;
p[1] = green;
p[2] = blue;
p[3] = alpha;
}
I am trying to have the color of the shape multi-colored. Currently it only outputs with 1 color.
I think it may be something to do with the iterations. And the color not changing from iteration to iteration.
Any help would be great.

Why dots cannot be drawn "smoothly" with XDrawPoint ( X Window )?

Here is the code snippet (Xlib):
void update_screen()
{
int X,Y;
for (X = 0; X < Wnd_X; X++)
for (Y = 0; Y < Wnd_Y; Y++)
{
XDrawPoint(dsp, win, gc_2, X, Y);
usleep(100);
}
return;
}
But it'll be like this:
and then:
Why dots cannot be drawn smoothly with XDrawPoint?
And how to fill the window with green color fluently?
Full code here:
#include <X11/Xlib.h> // must precede most other headers!
#include <stdlib.h>
int Wnd_X=500;
int Wnd_Y=500;
void update_screen();
GC gc_2;
Window win;
Display *dsp;
int main()
{
dsp = XOpenDisplay(NULL);
int screen = DefaultScreen(dsp);
win = XCreateWindow(dsp, DefaultRootWindow(dsp), 0, 0, Wnd_X, Wnd_Y, 0, 0, 0, 0, 0, 0);
//XCreateSimpleWindow(dsp, DefaultRootWindow(dsp), 0, 0, Wnd_X, Wnd_Y, 0, 0x000000, 0xFFFFFF);
Atom wmDelete = XInternAtom(dsp, "WM_DELETE_WINDOW", True);
XSetWMProtocols(dsp, win, &wmDelete, 1);
XGCValues gcvalues_2;
gcvalues_2.function = GXcopy;
gcvalues_2.plane_mask = AllPlanes;
gcvalues_2.foreground = 0x00FF00;
gcvalues_2.background = 0xFFFFFF;
gc_2 = XCreateGC(dsp, win, GCFunction|GCPlaneMask|GCForeground|GCBackground, &gcvalues_2);
XEvent evt;
long eventMask = StructureNotifyMask;
eventMask |= ButtonPressMask|ButtonReleaseMask|KeyPressMask|KeyReleaseMask;
XSelectInput(dsp, win, eventMask);
XMapWindow(dsp, win);
// wait until window appears
do { XNextEvent(dsp,&evt); } while (evt.type != MapNotify);
update_screen();
XDestroyWindow(dsp, win);
XCloseDisplay(dsp);
return 0;
}
void update_screen()
{
int X,Y;
for (X = 0; X < Wnd_X; X++)
for (Y = 0; Y < Wnd_Y; Y++)
{
XDrawPoint(dsp, win, gc_2, X, Y);
usleep(100);
}
return;
}
You need to flush the updates to the X server. Try this:
XDrawPoint(dsp, win, gc_2, X, Y);
XFlush(dsp);
usleep(100);

Resources