Related
I’m trying to add freetype text to my running program. The code below was adapted from EdoardoLuciani. No text is shown, though.
I checked if the the tff file is loaded correctly and that valid textures ids are generated. They are o.k.
The remaining of the program is running without problem, but no text is drawn.
Could you help me to find what's wrong, please?
Environment:
DELL XPS
GPU 1050
OpenGL 4.6
Win32
Language C
// Main program
...
initText()
...
// Drawing loop
...
{
drawText(10, 300, "Hello World!");
}
void initText()
{
// Extract glyphs textures
//
glUseProgram(textShader);
FT_Library ft;
FT_Init_FreeType(&ft);
FT_Face face;
FT_New_Face(ft, "arial.ttf", 0, &face);
FT_Set_Pixel_Sizes(face, 0, 48);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
for(int c = 0; c < 128; c++)
{
FT_Load_Char(face, c, FT_LOAD_RENDER);
//
// Extract glyph
//
GLuint texture;
glCreateTextures(GL_TEXTURE_2D,1, &texture);
glTextureStorage2D(texture, 1, GL_R8, face->glyph->bitmap.width, face->glyph->bitmap.rows);
glTextureSubImage2D(texture, 0, 0, 0, face->glyph->bitmap.width, face->glyph->bitmap.rows, GL_RED, GL_UNSIGNED_BYTE, face->glyph->bitmap.buffer);
glBindTexture(GL_TEXTURE_2D, texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, 0);
//
// Save glyph to lookup table
//
Character ch;
ch.texture = texture;
ch.bearing[0] = face->glyph->bitmap.width;
ch.bearing[1] = face->glyph->bitmap.rows;
ch.size[0] = face->glyph->bitmap_left;
ch.size[1] = face->glyph->bitmap_top;
ch.advance = face->glyph->advance.x;
characters[c] = ch;
}
FT_Done_Face(face);
FT_Done_FreeType(ft);
//
// Create VAO
//
glCreateVertexArrays(1, &textVAO);
glBindVertexArray(textVAO);
//
// Create the unique VBO
//
glCreateBuffers(1, &VBO);
glNamedBufferStorage(VBO, sizeof(GLfloat) * 6 * 4, NULL, GL_DYNAMIC_STORAGE_BIT);
glVertexArrayVertexBuffer(textVAO, 0, VBO, 0, sizeof(GLfloat) * 4);
glVertexArrayAttribFormat(textVAO, 0, 4, GL_FLOAT, GL_FALSE, 0);
glVertexArrayAttribBinding(textVAO, 0, 0);
glEnableVertexArrayAttrib(textVAO, 0);
//
// Send color to fragment shader
//
glUniform3f(6, 0.88f, 0.59f, 0.07f);
}
void drawText(float x, float y, float scale, char *text)
{
glEnable(GL_CULL_FACE);
glUseProgram(textShader);
glBindVertexArray(textVAO);
//
// Draw each character
//
char *c = text;
while(*c != 0)
{
Character ch = characters[(int)*c];
if(ch.bearing[0] != 0 && ch.bearing[1] != 0)
{
GLfloat xpos = x + ch.bearing[0] * scale;
GLfloat ypos = y - (ch.size[1] - ch.bearing[1]) * scale;
//
GLfloat w = ch.size[0] * scale;
GLfloat h = ch.size[1] * scale;
//
// Update VBO for each character
//
GLfloat vertices[6*4] =
{
xpos, ypos + h, 0.0f, 0.0f ,
xpos, ypos, 0.0f, 1.0f ,
xpos + w, ypos, 1.0f, 1.0f ,
xpos, ypos + h, 0.0f, 0.0f ,
xpos + w, ypos, 1.0f, 1.0f ,
xpos + w, ypos + h, 1.0f, 0.0f
};
glNamedBufferSubData(VBO, 0, sizeof(GLfloat)*6*4, vertices);
glBindTexture(GL_TEXTURE_2D, ch.texture);
glDrawArrays(GL_TRIANGLES, 0, 6);
}
x += (ch.advance >> 6) * scale;
c++;
}
glDisable(GL_CULL_FACE);
}
The vertex shader:
#version 460 core
layout (location = 0) in vec4 vertex; // <vec2 pos, vec2 tex>
layout (location = 1) uniform mat4 projection;
out vec2 TexCoords;
void main()
{
gl_Position = projection * vec4(vertex.xy, 0.0, 1.0);
TexCoords = vertex.zw;
}
The fragment shader:
#version 460 core
in vec2 TexCoords;
out vec4 color;
layout (binding = 0) uniform sampler2D text;
layout (location = 6) uniform vec3 textColor;
void main()
{
color = vec4(textColor, 1.0) * texture(text, TexCoords).r;
}
I am following the tutorials on learnopengl.com and I have come across an issues. I am writing my program in C rather than C++ and am using the linmath library for my matrix transformation and I have the files shader.c and texture.c. I believe that the issue is with my matrix transformation but I can't figure out where I've gone wrong. The program runs and it compiles, the window is created and the background colour is correct however the object does not show.
Here is the source:
#include <stdio.h>
#include <stdlib.h>
#include <glad/glad.h>
#include <GLFW/glfw3.h>
#include "linmath.h"
#include "shader.h"
#include "texture.h"
void framebuffer_size_callback(GLFWwindow* window, int width, int height);
void processInput(GLFWwindow *window);
const unsigned int SCR_WIDTH = 800;
const unsigned int SCR_HEIGHT = 600;
int main(){
// glfw: initialize and configure
// ------------------------------
glfwInit();
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); // uncomment this statement to fix compilation on OS X
// glfw window creation
// --------------------
GLFWwindow* window = glfwCreateWindow(SCR_WIDTH, SCR_HEIGHT, "LearnOpenGL", NULL, NULL);
if (window == NULL){
printf("Failed to create GLFW window");
glfwTerminate();
return -1;
}
glfwMakeContextCurrent(window);
glfwSetFramebufferSizeCallback(window, framebuffer_size_callback);
if (!gladLoadGLLoader((GLADloadproc)glfwGetProcAddress))
{
printf("Failed to initialize GLAD");
return -1;
}
Shader ourShader;
ourShader.ID = loadShader("3.3.shader.vs", "3.3.shader.fs");
// set up vertex data (and buffer(s)) and configure vertex attributes
// ------------------------------------------------------------------
float vertices[] = {
// positions // colors // texture coords
0.5f, 0.5f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, // top right
0.5f, -0.5f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, // bottom right
-0.5f, -0.5f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, // bottom left
-0.5f, 0.5f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f // top left
};
unsigned int indices[] = {
0, 1, 3, // first triangle
1, 2, 3 // second triangle
};
unsigned int VBO, VAO, EBO;
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
glGenBuffers(1, &EBO);
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW);
// position attribute
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)0);
glEnableVertexAttribArray(0);
// color attribute
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)(3 * sizeof(float)));
glEnableVertexAttribArray(1);
// texture coord attribute
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)(6 * sizeof(float)));
glEnableVertexAttribArray(2);
Texture tex1 = loadTexture("container.jpg");
Texture tex2 = loadTexture("awesomeface.png");
// tell opengl for each sampler to which texture unit it belongs to (only has to be done once)
// -------------------------------------------------------------------------------------------
useShader(ourShader.ID);
setIntShader("texture1", 0, ourShader.ID);
setIntShader("texture2", 1, ourShader.ID);
// render loop
// -----------
while (!glfwWindowShouldClose(window))
{
// input
// -----
processInput(window);
// render
// ------
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
// bind textures on corresponding texture units
bindTexture(tex1, GL_TEXTURE0);
bindTexture(tex2, GL_TEXTURE1);
// activate shader
useShader(ourShader.ID);
// create transformations
mat4x4 model;
mat4x4 m;
mat4x4 view;
mat4x4 projection;
mat4x4_rotate(model, m, 1.0f, 0.0f, 0.0f, -0.95993f);
mat4x4_translate(view, 0.0f, 0.0f, -3.0f);
mat4x4_perspective(projection, 0.785f, (float)SCR_WIDTH / (float)SCR_HEIGHT, 0.1f, 100.0f);
// pass them to the shaders
setMat4Shader("model", model, ourShader.ID);
setMat4Shader("view", view, ourShader.ID);
setMat4Shader("projection", projection, ourShader.ID);
// render container
glBindVertexArray(VAO);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
// glfw: swap buffers and poll IO events (keys pressed/released, mouse moved etc.)
// -------------------------------------------------------------------------------
glfwSwapBuffers(window);
glfwPollEvents();
}
// optional: de-allocate all resources once they've outlived their purpose:
// ------------------------------------------------------------------------
glDeleteVertexArrays(1, &VAO);
glDeleteBuffers(1, &VBO);
glDeleteBuffers(1, &EBO);
// glfw: terminate, clearing all previously allocated GLFW resources.
// ------------------------------------------------------------------
glfwTerminate();
return 0;
}
void framebuffer_size_callback(GLFWwindow* window, int width, int height)
{
glViewport(0, 0, width, height);
}
void processInput(GLFWwindow *window)
{
if(glfwGetKey(window, GLFW_KEY_ESCAPE) == GLFW_PRESS)
glfwSetWindowShouldClose(window, GL_TRUE);
}
shader.c
#include "shader.h"
GLuint loadShader(const char * vertex_file_path,const char * fragment_file_path){
// Create the shaders
GLuint VertexShaderID = glCreateShader(GL_VERTEX_SHADER);
GLuint FragmentShaderID = glCreateShader(GL_FRAGMENT_SHADER);
// Read the Vertex Shader code from the file
char* VertexShaderCode;
FILE * vertexFile = fopen(vertex_file_path, "r");
if( vertexFile == NULL ){
printf("Impossible to open the file !\n");
return LOAD_FAILURE;
}
long vertex_file_size;
fseek(vertexFile, 0, SEEK_END);
vertex_file_size = ftell(vertexFile);
rewind(vertexFile);
VertexShaderCode = malloc((vertex_file_size + 1) * (sizeof(char)));
fread(VertexShaderCode, sizeof(char), vertex_file_size, vertexFile);
fclose(vertexFile);
VertexShaderCode[vertex_file_size] = 0;
// Read the Fragment Shader code from the file
char* FragmentShaderCode;
FILE * fragmentFile = fopen(fragment_file_path, "r");
if( fragmentFile == NULL ){
printf("Impossible to open the file !\n");
return LOAD_FAILURE;
}
long fragment_file_size;
fseek(fragmentFile, 0, SEEK_END);
fragment_file_size = ftell(fragmentFile);
rewind(fragmentFile);
FragmentShaderCode = malloc((fragment_file_size + 1) * (sizeof(char)));
fread(FragmentShaderCode, sizeof(char), fragment_file_size, fragmentFile);
fclose(fragmentFile);
FragmentShaderCode[fragment_file_size] = 0;
const char* vsCode = VertexShaderCode;
const char* fsCode = FragmentShaderCode;
// 2. compile shaders
unsigned int vertex, fragment;
int success;
char infoLog[512];
// vertex shader
vertex = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertex, 1, &vsCode, NULL);
glCompileShader(vertex);
checkCompileErrors(vertex, "VERTEX");
// fragment Shader
fragment = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragment, 1, &fsCode, NULL);
glCompileShader(fragment);
checkCompileErrors(fragment, "FRAGMENT");
// shader Program
GLuint ID = glCreateProgram();
glAttachShader(ID, vertex);
glAttachShader(ID, fragment);
glLinkProgram(ID);
checkCompileErrors(ID, "PROGRAM");
// delete the shaders as they're linked into our program now and no longer necessery
glDeleteShader(vertex);
glDeleteShader(fragment);
return ID;
}
void useShader(unsigned int ID){
glUseProgram(ID);
}
void setBoolShader(const char * name, int value, unsigned int ID)
{
glUniform1i(glGetUniformLocation(ID, name), value);
}
void setIntShader(const char * name, int value, unsigned int ID)
{
glUniform1i(glGetUniformLocation(ID, name), value);
}
void setFloatShader(const char * name, float value, unsigned int ID)
{
glUniform1f(glGetUniformLocation(ID, name), value);
}
void setMat4Shader(const char * name, mat4x4 mat, unsigned int ID)
{
glUniformMatrix4fv(glGetUniformLocation(ID, name), 1, GL_FALSE, *mat);
}
void checkCompileErrors(GLuint shader, char type[])
{
GLint success;
GLchar infoLog[1024];
if(strncmp(type, "PROGRAM", 7) != 0)
{
glGetShaderiv(shader, GL_COMPILE_STATUS, &success);
if(!success)
{
glGetShaderInfoLog(shader, 1024, NULL, infoLog);
printf("ERROR::SHADER_COMPILATION_ERROR of type: %s \n %s \n", type, infoLog);
}
}
else
{
glGetProgramiv(shader, GL_LINK_STATUS, &success);
if(!success)
{
glGetProgramInfoLog(shader, 1024, NULL, infoLog);
printf("ERROR::SHADER_COMPILATION_ERROR of type: %s \n %s \n", type, infoLog);
}
}
}
texture.c
#include "texture.h"
Texture loadTexture(const char* path){
unsigned int texture;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
// set the texture wrapping/filtering options (on the currently bound texture object)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
//load texture
int width, height, nrChannels;
unsigned char *data = SOIL_load_image(path, &width, &height, &nrChannels, SOIL_LOAD_RGB);
if(data){
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, data);
glGenerateMipmap(GL_TEXTURE_2D);
} else{
printf("Failed to load Image");
}
SOIL_free_image_data(data);
Texture t;
t.ID = texture;
t.path = path;
return t;
}
void bindTexture(Texture texture, GLenum unit){
// bind textures on corresponding texture units
glActiveTexture(unit);
glBindTexture(GL_TEXTURE_2D, texture.ID);
}
vertex shader
#version 330 core
layout (location = 0) in vec3 aPos;
layout (location = 1) in vec2 aTexCoord;
out vec2 TexCoord;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
gl_Position = projection * view * model * vec4(aPos, 1.0);
TexCoord = vec2(aTexCoord.x, aTexCoord.y);
}
Fragment Shader
#version 330 core
out vec4 FragColor;
in vec2 TexCoord;
// texture samplers
uniform sampler2D texture1;
uniform sampler2D texture2;
void main()
{
// linearly interpolate between both textures (80% container, 20% awesomeface)
FragColor = mix(texture(texture1, TexCoord), texture(texture2, TexCoord), 0.2);
}
I am rendering H264 video frames from an IP camera which are decoded into BGRA32 pixel format via DirectX 11 and SharpDx in WPF via D3DImage control.
After lot of research and looking at various samples and examples I have managed to get it finally got it to render H264 frames with DirectX 11
My current setup involves setting setting up vertex shader and pixel shader as below:
var device = this.Device;
var context = device.ImmediateContext;
// Compile Vertex and Pixel shaders
vertexShaderByteCode = ShaderBytecode.CompileFromFile("TriangleShader.fx", "VSMain", "vs_5_0", ShaderFlags.Debug, EffectFlags.None);
vertexShader = new VertexShader(device, vertexShaderByteCode);
pixelShaderByteCode = ShaderBytecode.CompileFromFile("TriangleShader.fx", "PSMain", "ps_5_0", ShaderFlags.Debug, EffectFlags.None);
pixelShader = new PixelShader(device, pixelShaderByteCode);
// Layout from VertexShader input signature
// An input layout is the layout of the data containing the location and properties of a vertex. It would be a format of data that you can modify and set according
layout = new InputLayout(device, ShaderSignature.GetInputSignature(vertexShaderByteCode), new[] {
new InputElement("SV_Position", 0, Format.R32G32B32A32_Float, 0, 0),
new InputElement("COLOR", 0, Format.R32G32_Float, 16, 0),
new InputElement("TEXCOORD", 0, Format.R32G32_Float, 32, 0),
});
// Write vertex data to a datastream
var stream = new DataStream(Utilities.SizeOf<Vertex>() * 6, true, true);
int iWidth = (int)this.ActualWidth;
int iHeight = (int)this.ActualHeight;
float top = iWidth / 2;
float bottom = iHeight / 2;
stream.WriteRange(new[]
{
new Vertex(
new Vector4(-top, bottom, 0.5f, 0.0f), // position top-center
new Color4(0.0f, 0.0f, 0.0f, 0.0f), // color top-center (r,g,b,alpha)
new Vector2(0f,0f)),
new Vertex(
new Vector4(top, bottom, 0.5f, 0.0f), // position top-right
new Color4(0.0f, 0.0f, 0.0f, 0.0f), // color top-right (r,g,b,alpha)
new Vector2(iWidth,iHeight)),
new Vertex(
new Vector4(-top, -bottom, 0.5f, 0.0f), // position bottom-left
new Color4(0.0f, 0.0f, 0.0f, 0.0f), // color bottom-left (r,g,b,alpha)
new Vector2(iWidth,iHeight)),
new Vertex(
new Vector4(-top, -bottom, 0.5f, 0.0f), // position bottom-right
new Color4(0.0f, 0.0f, 0.0f, 0.0f), // color bottom-left (r,g,b,alpha)
new Vector2(iWidth,0f)),
new Vertex(
new Vector4(top, -bottom, 0.5f, 0.0f), // position bottom-right
new Color4(0.0f, 0.0f, 0.0f, 0.0f), // color bottom-right (r,g,b,alpha)
new Vector2(iWidth,iHeight)),
new Vertex(
new Vector4(top, bottom, 0.5f, 0.0f), // position top-right
new Color4(0.0f, 0.0f, 0.0f, 0.0f), // color top-right (r,g,b,alpha)
new Vector2(0f, iHeight)),
});
stream.Position = 0;
// Instantiate Vertex buiffer from vertex data
//
vertices = new SharpDX.Direct3D11.Buffer(device, stream, new BufferDescription()
{
BindFlags = BindFlags.VertexBuffer,
CpuAccessFlags = CpuAccessFlags.None,
OptionFlags = ResourceOptionFlags.None,
SizeInBytes = Utilities.SizeOf<Vertex>() * 6,
Usage = ResourceUsage.Default,
StructureByteStride = 0
});
stream.Dispose();
// Prepare All the stages
context.InputAssembler.InputLayout = (layout);
context.InputAssembler.PrimitiveTopology = (PrimitiveTopology.TriangleStrip);
context.InputAssembler.SetVertexBuffers(0, new VertexBufferBinding(vertices, Utilities.SizeOf<Vertex>(), 0));
context.VertexShader.Set(vertexShader);
context.PixelShader.Set(pixelShader);
context.OutputMerger.SetTargets(m_RenderTargetView);
When ever i receive a new frame then I update the RenderTexture by mapping the resource as below:
device.ImmediateContext.ClearRenderTargetView(this.m_RenderTargetView, Color4.Black);
Texture2DDescription colordesc = new Texture2DDescription
{
BindFlags = BindFlags.ShaderResource,
Format = m_PixelFormat,
Width = iWidth,
Height = iHeight,
MipLevels = 1,
SampleDescription = new SampleDescription(1, 0),
Usage = ResourceUsage.Dynamic,
OptionFlags = ResourceOptionFlags.None,
CpuAccessFlags = CpuAccessFlags.Write,
ArraySize = 1
};
Texture2D newFrameTexture = new Texture2D(this.Device, colordesc);
DataStream dtStream = null;
DataBox dBox = Device.ImmediateContext.MapSubresource(newFrameTexture, 0, MapMode.WriteDiscard, 0, out dtStream);
if (dtStream != null)
{
int iRowPitch = dBox.RowPitch;
for (int iHeightIndex = 0; iHeightIndex < iHeight; iHeightIndex++)
{
//Copy the image bytes to Texture
// we write row strides multiplies by bytes per pixel
// as our case is bgra32 which is 4 bytes
dtStream.Position = iHeightIndex * iRowPitch;
Marshal.Copy(decodedData, iHeightIndex * iWidth * 4, new IntPtr(dtStream.DataPointer.ToInt64() + iHeightIndex * iRowPitch), iWidth * 4);
}
}
Device.ImmediateContext.UnmapSubresource(newFrameTexture, 0);
Texture2D srcTexture = m_RenderTargetView.ResourceAs<Texture2D>();
Device.ImmediateContext.CopySubresourceRegion(newFrameTexture, 0, null, this.RenderTarget, 0);
Device.ImmediateContext.Draw(6, 0);
Device.ImmediateContext.Flush();
this.D3DSurface.InvalidateD3DImage();
Disposer.SafeDispose(ref newFrameTexture);
My Effects/HLSL file:
Texture2D ShaderTexture : register(t0);
SamplerState Sampler : register(s0);
cbuffer PerObject: register(b0)
{
float4x4 WorldViewProj;
};
struct VertexShaderInput
{
float4 Position : SV_Position;
float4 Color : COLOR;
float2 TextureUV : TEXCOORD0;
};
struct VertexShaderOutput
{
float4 Position : SV_Position;
float4 Color : COLOR;
float2 TextureUV : TEXCOORD0;
};
VertexShaderOutput VSMain(VertexShaderInput input)
{
VertexShaderOutput output = (VertexShaderOutput)0;
output.Position = mul(input.Position, WorldViewProj);
output.Color = mul(input.Color, WorldViewProj);
output.TextureUV = input.TextureUV;
return output;
}
float4 PSMain(VertexShaderOutput input) : SV_Target
{
return ShaderTexture.Sample(Sampler, input.TextureUV).rgb;
}
Images are rendered correctly but it seem they are opaque if the background color of the parent color is anything other than black.
Now i can't seem to figure out what exactly is going on here which is rendering my texture as transparent.
I have also tried to render when the parent grid control has its background as a picture of a koala as shown below:
Any help in this matter would be much appreciated.
I can't seem to understand how multiple copies of same object are being drawn using array of objects. Below you see both my draw and display methods:
void display()
{
SolidSphere **spheres = createSpheres();
float const win_aspect = (float)win_width / (float)win_height;
glViewport(0, 0, win_width, win_height);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glColor3f(.6, 0, 0);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(45, win_aspect, 1, 10);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
for (int i = 0; i < n; i++)
{
spheres[i]->draw(posX,posY,posZ);
}
glutSwapBuffers();
}
void draw(GLfloat x, GLfloat y, GLfloat z)
{
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
glTranslatef(x,y,z);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glVertexPointer(3, GL_FLOAT, 0, &vertices[0]);
glNormalPointer(GL_FLOAT, 0, &normals[0]);
glTexCoordPointer(2, GL_FLOAT, 0, &texcoords[0]);
glDrawElements(GL_TRIANGLES, indices.size(), GL_UNSIGNED_SHORT, &indices[0]);
glPopMatrix();
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_NORMAL_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
}
As a result I only get 1 instead of n spheres.
I'm trying to display simple white cube in OpenGL with SDL. I've setup my VBO and IBO like this:
GLfloat vertexData[] =
{
-0.5f, -0.5f, -0.5f, // bot, left, back
-0.5f, 0.5f, -0.5f, // top, left, back
-0.5f, -0.5f, 0.5f, // bot, left, front
-0.5f, 0.5f, 0.5f, // top, left, front
0.5f, -0.5f, -0.5f, // bot, right, back
0.5f, -0.5f, 0.5f, // bot, right, front
0.5f, 0.5f, -0.5f, // top, right, back
0.5f, 0.5f, 0.5f // top, right, front
};
GLint indexData[] =
{
//back
0, 1, 6,
0, 5, 6,
//left
0, 2, 3,
0, 1, 3,
//right
4, 5, 7,
4, 6, 7,
//bot
0, 4, 5,
0, 2, 5,
//top
1, 3, 7,
1, 6, 7,
//front
2, 3, 7,
2, 5, 7
};
However, it looks like this:
What am I doing wrong?
Edit:
My init functions
bool App::OpenGLInit()
{
programID = glCreateProgram();
GLuint vertexShader = glCreateShader(GL_VERTEX_SHADER);
std::string vertexString = getShaderFromFile("src/shader.vert");
const GLchar * vertexShaderSource = vertexString.c_str();
glShaderSource(vertexShader, 1, (const GLchar **)&vertexShaderSource, NULL);
glCompileShader(vertexShader);
glAttachShader(programID, vertexShader);
GLuint fragmentShader = glCreateShader( GL_FRAGMENT_SHADER );
//Get fragment source
std::string fragmentString = getShaderFromFile("src/shader.frag");
const GLchar* fragmentShaderSource = fragmentString.c_str();
//Set fragment source
glShaderSource( fragmentShader, 1, (const GLchar **)&fragmentShaderSource, NULL );
//Compile fragment source
glCompileShader( fragmentShader );
glAttachShader(programID, fragmentShader );
glLinkProgram(programID);
return true;
}
bool App::OnInit()
{
SDL_GL_SetAttribute( SDL_GL_CONTEXT_MAJOR_VERSION, 3 );
SDL_GL_SetAttribute( SDL_GL_CONTEXT_MINOR_VERSION, 2 );
SDL_GL_SetAttribute( SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE );
if(SDL_Init(SDL_INIT_EVERYTHING) < 0) {
return false;
}
if((screen = SDL_CreateWindow("Color Wars", 100, 100, 1024, 600, SDL_WINDOW_OPENGL | SDL_WINDOW_SHOWN)) == NULL) {
return false;
}
if ((gl_context = SDL_GL_CreateContext(screen)) == NULL)
{
return false;
}
if((renderer = SDL_CreateRenderer(screen, -1, 0)) == NULL)
{
return false;
}
if (SDL_GL_SetSwapInterval(1) < 0)
return false;
glewExperimental = GL_TRUE;
if (glewInit() != GLEW_OK)
{
return false;
}
if (!OpenGLInit())
return false;
game_screen = new GameScreen();
game_screen->Init(programID);
return true;
}
my Render functions:
void App::OnRender()
{
glClear( GL_COLOR_BUFFER_BIT );
glUseProgram( programID );
game_screen->Render(renderer);
glUseProgram( NULL );
SDL_GL_SwapWindow(screen);
}
void GameScreen::Render(SDL_Renderer *renderer)
{
//Enable vertex position
glEnableVertexAttribArray(vertex2DPosition);
//Set vertex data
glBindBuffer( GL_ARRAY_BUFFER, VBO );
glVertexAttribPointer(vertex2DPosition, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(GLfloat), NULL );
//Set index data and render
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, IBO );
glDrawElements(GL_TRIANGLE_FAN, 12, GL_UNSIGNED_INT, NULL );
//Disable vertex position
glDisableVertexAttribArray( vertex2DPosition );
}
Dummy text: asdasdasdasdasdasd
You have three things working against you in this example:
You are drawing a cube in Normalized Device Coordinates, which have the range XYZ: [-1,1].
You are drawing into a window that is not square, so the cube face is stretched because your coordinate space has nothing to do with the size of your window.
It appears to be a rectangle because you have not defined any viewing transformation and are viewing it face-on with no perspective.
You can correct all of this if you do this whenever your window is resized:
GLfloat aspect = (GLfloat)width / (GLfloat)height;
glMatrixMode (GL_PROJECTION);
glLoadIdentity ();
glOrtho ((-1.0f * aspect), (1.0f * aspect), -1.0f, 1.0f, -1.0f, 1.0f);
glViewport (0, 0, width, height);
glMatrixMode (GL_MODELVIEW);
glLoadIdentity ();
glRotatef (45.0f, 0.25f, 0.5f, 0.75f); // Let's rotate this sucker!
Where width and height are the width and height of your SDL window, respectively.
UPDATE:
See, now it is clear why this did not work. You never mentioned in your question that you were using shaders. Thank you for posting your updated code. In order for this to work with shaders you need to pass a projection and modelview matrix. You can use the same setup to build your matrices as I described using the old OpenGL matrix manipulation functions, and glm would probably make this easiest.