OpenGL: When applying my own lookAt function the screen is blank - c

I'm writing my own version of gluLookAt. I believe I have the correct implementation however, often the matrix that gets returned is full of nan's.
Here is my function:
mat4 look_at(vec4 eye, vec4 at, vec4 up)
{
vec4 vpn = vec4_sub(eye, at);
vec4 n = vec4_norm(vpn);
vec4 u = vec4_norm(vec4_cross(up, n));
vec4 v = vec4_norm(vec4_cross(n, u));
vec4 p = { eye.x, eye.y, eye.z, 1 };
mat4 m;
m.x.x = u.x; m.y.x = u.y; m.z.x = u.z; m.w.x = u.w;
m.x.y = v.x; m.y.y = v.y; m.z.y = v.z; m.w.y = v.w;
m.x.z = n.x; m.y.z = n.y; m.z.z = n.z; m.w.z = n.w;
m.x.w = p.x; m.y.w = p.y; m.z.w = p.z; m.w.w = p.w;
mat4 m_t = mat4_trans(m);
mat4 m_t_inv= mat4_inv(m_t);
return m_t_inv;
}
I am currently trying to look at the top of a cube I made. The cube is 1x1x1 and is centered at the origin. I am setting the model_view like so:
vec4 e = {0, 1, 0, 1};
vec4 a = {0, 0, 0, 0};
vec4 u = {0, 0, 1, 0};
model_view = look_at(e, a, u);
I believe I have the parameters correct. I want to look down at the origin from y=1.
Does the issue appear to be in my function? or have I misunderstood the parameters?

The fourth component of the axis vectors u, v and p has to be zero. In your case the fourth component of vpn is not zero, because eye is {0, 1, 0, 1}.
I recommend doing the computation of u, v and p with vec3 rather than vec4. However, you can fix the issue with setting vpn[3] = 0:
vec4 vpn = vec4_sub(eye, at);
vpn[3] = 0

Related

Vulkan Ray Tracing - Not every primitive Id from any hit shader

EDIT: I added the .cpp file of my project, it is quit similar to the example in the repo from Sascha Willems.
I'm new in Vulkan and I try to write a Ray/Triangle intersection. Sadly I did not find an Example. Maybe you know one?
I need these intersections to calculate the attenuation of rays.
So I did a raygen shader like this:
#version 460
#extension GL_NV_ray_tracing : require
#define debug 0
layout(binding = 0, set = 0) uniform accelerationStructureNV topLevelAS;
layout(binding = 1, set = 0, rgba8) uniform image2D image;
layout(binding = 2, set = 0) uniform CameraProperties
{
mat4 viewInverse;
mat4 projInverse;
} cam;
layout(binding = 3, set = 0) buffer detectorProperties
{
double detectorValue[];
} detectors;
//layout(binding = 4, set = 0) buffer outputProperties
//{
// double outputValues[];
//} outputData;
layout(binding = 5, set = 0) buffer debugProperties
{
double debugValues[];
} debugData;
struct RayPayload {
uint outputId;
uint hitCounter;
};
layout(location = 0) rayPayloadNV RayPayload rayPayload;
void main()
{
rayPayload.outputId = gl_LaunchIDNV.x * 18+ gl_LaunchIDNV.y * gl_LaunchSizeNV.x * 18;
rayPayload.hitCounter = 0;
vec3 origin = vec3(cam.viewInverse[0].x, cam.viewInverse[1].y, cam.viewInverse[2].z);
uint rayId = uint(gl_LaunchIDNV.x + gl_LaunchSizeNV.x * gl_LaunchIDNV.y);
uint targetXId = rayId;
uint targetYId = rayId + gl_LaunchSizeNV.x * gl_LaunchSizeNV.y;
uint targetZId = rayId + gl_LaunchSizeNV.x * gl_LaunchSizeNV.y *2;
vec3 target = vec3(detectors.detectorValue[targetXId],detectors.detectorValue[targetYId], detectors.detectorValue[targetZId]) ;
vec3 direction = target.xyz-origin.xyz ;
#ifdef debug
uint debugId = rayPayload.outputId;
debugData.debugValues[debugId + 0 ] = gl_LaunchSizeNV.x;
debugData.debugValues[debugId + 1] = gl_LaunchSizeNV.y;
debugData.debugValues[debugId+ 2 ] = gl_LaunchIDNV.x;
debugData.debugValues[debugId+ 3 ] = gl_LaunchIDNV.y;
debugData.debugValues[debugId + 4] = targetXId;
debugData.debugValues[debugId + 5] = targetYId;
debugData.debugValues[debugId + 6] = targetZId;
debugData.debugValues[debugId + 7] = target.x;
debugData.debugValues[debugId + 8] = target.y;
debugData.debugValues[debugId + 9] = target.z;
debugData.debugValues[debugId + 10] = origin.x;
debugData.debugValues[debugId + 11] = origin.y;
debugData.debugValues[debugId + 12] = origin.z;
debugData.debugValues[debugId + 13] = direction.x;
debugData.debugValues[debugId + 14] = direction.y;
debugData.debugValues[debugId + 15] = direction.z;
debugData.debugValues[debugId + 16] = rayId;
debugData.debugValues[debugId + 17] = rayPayload.outputId;
#endif
uint rayFlags = gl_RayFlagsNoneNV;
uint cullMask = 0xff;
float tmin = 0.00001;
float tmax = 10000.0;
traceNV(topLevelAS, rayFlags, cullMask, 0, 0, 0, origin.xyz, tmin, direction.xyz, tmax, 0);
// uint outputId = gl_LaunchIDNV.x * 18+ gl_LaunchIDNV.y * gl_LaunchSizeNV.x *18;
// outputData.outputValues[outputId + hitCounter] = double(hitValue[hitCounter]);
imageStore(image, ivec2(gl_LaunchIDNV.xy), vec4(rayPayload.hitCounter,0,0, 0.0));
}
And with the any-hit shader I just want to give back the primitive Id like that:
#version 460
#extension GL_NV_ray_tracing : require
#extension GL_EXT_nonuniform_qualifier : enable
layout(binding = 4, set = 0) buffer outputProperties
{
float outputValues[];
} outputData;
struct RayPayload {
uint outputId;
uint hitCounter;
};
layout(location = 0) rayPayloadInNV RayPayload rayPayload;
hitAttributeNV vec3 attribs;
void main()
{
// uint outputIdCurrentTriangle = rayPayload.outputId + rayPayload.hitCounter;
uint outputIdCurrentTriangle = rayPayload.outputId + rayPayload.hitCounter++;
outputData.outputValues[outputIdCurrentTriangle] = gl_PrimitiveID;
// rayPayload.hitCounter ++;
}
You can see the .cpp file here:
https://drive.google.com/file/d/1iTX3ATaP3pT7d4CEowo4IVnQxTOerxaD/view?usp=sharing
My Problem is, that I just find all surface triangles, nearest to the source even though the object is noOpaque (it's tested with setting Rayflag to cullNoOpaques).
Is anyone have the same problem or have an example which also gives primitivesId of any hit back?
I just found one Example for any-hit shaders. Is the any-hit shader this less used?
Thank you for your help!
If you want to have your any hit shader to register intersections for all triangles in your scene you should call ignoreIntersectionNV in your any hit shader (see GLSL_NV_ray_tracing specs).
With this, your any hit shader will carry on without modifying gl_RayTmaxNV and gl_HitKindNV, triggering for all ray intersections.
With a simple any hit shader like this:
#version 460
#extension GL_NV_ray_tracing : require
#extension GL_GOOGLE_include_directive : enable
#include "raypayload.glsl"
layout(binding = 1, set = 0, rgba8) uniform image2D image;
layout(location = 0) rayPayloadInNV RayPayload rayPayload;
void main()
{
rayPayload.hitcount++;
ignoreIntersectionNV();
}
The hitCount will be increased by one for each intersection, visualizing this with color coding by hitcount will yield something like this:

handle large arrays/textures in fragment shader

I am trying to pass a large amount of information to my fragment shader but I always reach a limit (too many textures binded, texture too large, etc., array too large, etc.). I use a ThreeJS custom shader.
I have a 256*256*256 rgba volume that I want to pass to my shader.
In my shader, I want to map the fragments's world position to a voxel in this 256*256*256 volume.
Is there a good strategy to deal with this amount of information? Which would be the best pratice? Is there any good workaround?
My current approach is to generate 4 different 2048x2048 rgba texture containing all the data I need.
To create each 2048x2048 texture, I just push every row of every slice sequencially to a big array and split this array in 2048x2048x4 chuncks, which are my textures:
var _imageRGBA = new Uint8Array(_dims[2] *_dims[1] * _dims[0] * 4);
for (_k = 0; _k < _dims[2]; _k++) {
for (_j = 0; _j < _dims[1]; _j++) {
for (_i = 0; _i < _dims[0]; _i++) {
_imageRGBA[4*_i + 4*_dims[0]*_j + 4*_dims[1]*_dims[0]*_k] = _imageRGBA[4*_i + 1 + 4*_dims[0]*_j + 4*_dims[1]*_dims[0]*_k] = _imageRGBA[4*_i + 2 + 4*_dims[0]*_j + 4*_dims[1]*_dims[0]*_k] = _imageN[_k][_j][_i];//255 * i / (_dims[2] *_dims[1] * _dims[0]);
_imageRGBA[4*_i + 3 + 4*_dims[0]*_j + 4*_dims[1]*_dims[0]*_k] = 255;
}
}
}
Each texture looks something like that:
On the shader side, I try to map a fragment's worldposition to an actual color from the texture:
Vertex shader:
uniform mat4 rastoijk;
varying vec4 vPos;
varying vec2 vUv;
void main() {
vPos = modelMatrix * vec4(position, 1.0 );
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1.0 );
}
</script>
Fragment shader:
<script id="fragShader" type="shader">
vec4 getIJKValue( sampler2D tex0, sampler2D tex1, sampler2D tex2, sampler2D tex3, vec3 ijkCoordinates, vec3 ijkDimensions) {
// IJK coord to texture
float textureSize = 2048.0;
float index = ijkCoordinates[0] + ijkCoordinates[1]*ijkDimensions[0] + ijkCoordinates[2]*ijkDimensions[0]*ijkDimensions[1];
// map index to right 2048 x 2048 slice
float sliceIndex = floor(index / (textureSize*textureSize));
float inTextureIndex = mod(index, textureSize*textureSize);
// get row in the texture
float rowIndex = floor(inTextureIndex/textureSize);
float colIndex = mod(inTextureIndex, textureSize);
// map indices to u/v
float u = colIndex/textureSize;
float v =1.0 - rowIndex/textureSize;
vec2 uv = vec2(u,v);
vec4 ijkValue = vec4(0, 0, 0, 0);
if(sliceIndex == float(0)){
ijkValue = texture2D(tex0, uv);
}
else if(sliceIndex == float(1)){
ijkValue = texture2D(tex1, uv);
}
else if(sliceIndex == float(2)){
ijkValue = texture2D(tex2, uv);
}
else if(sliceIndex == float(3)){
ijkValue = texture2D(tex3, uv);
}
return ijkValue;
}
uniform mat4 rastoijk;
uniform sampler2D ijk00;
uniform sampler2D ijk01;
uniform sampler2D ijk02;
uniform sampler2D ijk03;
uniform vec3 ijkDimensions;
varying vec4 vPos;
varying vec2 vUv;
void main(void) {
// get IJK coordinates of current element
vec4 ijkPos = rastoijk * vPos;
// show whole texture in the back...
vec3 color = texture2D(ijk00, vUv).rgb;
//convert IJK coordinates to texture coordinates
if(int(floor(ijkPos[0])) > 0
&& int(floor(ijkPos[1])) > 0
&& int(floor(ijkPos[2])) > 0
&& int(floor(ijkPos[0])) < int(ijkDimensions[0])
&& int(floor(ijkPos[1])) < int(ijkDimensions[1])
&& int(floor(ijkPos[2])) < int(ijkDimensions[2])){
// try to map IJK to value...
vec3 ijkCoordinates = vec3(floor(ijkPos[0]), floor(ijkPos[1]), floor(ijkPos[2]));
vec4 ijkValue = getIJKValue(ijk00, ijk01, ijk02, ijk03, ijkCoordinates, ijkDimensions);
color = ijkValue.rgb;
}
gl_FragColor = vec4(color, 1.0);
// or discard if not in IJK bounding box...
}
</script>
That doesn't work well. I now get an image with weird artifacts (nyquist shannon effect?). As I zoom in, the image appears. (even though not perfect, some black dots)
Any help advices would be greatly appreciated. I also plan to do some raycasting for volume rendering using this approach (very needed in the medical field)
Best,
The approach to handle large arrays using multiple textures was fine.
The issue was how I was generating the texture with THREE.js.
The texture was generated using the default linear interpolation: http://threejs.org/docs/#Reference/Textures/DataTexture
What I needed was nearest neighboor interpolation. This was, the texture is still pixelated and we can access the real IJK value (not an interpolated value)
Found it there: http://www.html5gamedevs.com/topic/8109-threejs-custom-shader-creates-weird-artifacts-space-between-faces/
texture = new THREE.DataTexture( textureData, tSize, tSize, THREE.RGBAFormat, THREE.UnsignedByteType, THREE.UVMapping,
THREE.ClampToEdgeWrapping, THREE.ClampToEdgeWrapping, THREE.NearestFilter, THREE.NearestFilter );
Thanks

OpenGL finding `in vec3 vert` but not `in float val` in vertex shader

I have some code that draws squares by passing points through a geometry shader. I construct an array which is sequences of 3 floats, bind that to the in vec3 vert attribute of my vertex shader, and everything is fine.
However, I want to add another float which the fragment shader will use to calculate color. This is in the vertex shader (to pass through) as in float val. Despite being able to find vert, glGetAttribLocation can't find val (get_program_attrib(): Atrrib val not found (-1)).
Code:
void load_model(GLuint* vao, GLuint* vbo) {
glGenVertexArrays(1, vao);
glBindVertexArray(*vao);
glGenBuffers(1, vbo);
glBindBuffer(GL_ARRAY_BUFFER, *vbo);
float data[SQUARES_PER_AXIS_SQ * 4] = {0};
squares_count = 0;
for (int i = 0; i < SQUARES_PER_AXIS_SQ; i++) {
int x_pos = i % SQUARES_PER_AXIS;
int y_pos = i / SQUARES_PER_AXIS;
if (fabs(squares[i]) > 0.0) {
data[squares_count * 4 + 0] = x_pos / ((float)SQUARES_PER_AXIS) * 2 - 1;
data[squares_count * 4 + 1] = (SQUARES_PER_AXIS - y_pos) / ((float)SQUARES_PER_AXIS) * 2 - 1;
data[squares_count * 4 + 2] = 0.5f;
data[squares_count * 4 + 3] = (float)squares[i];
squares_count++;
}
}
DPRINT("Loaded %d squares\n", squares_count);
glBufferData(GL_ARRAY_BUFFER, squares_count * 4 * sizeof(float), data, GL_STATIC_DRAW);
glEnableVertexAttribArray(get_program_attrib(main_shader, "vert"));
glEnableVertexAttribArray(get_program_attrib(main_shader, "val"));
glVertexAttribPointer(get_program_attrib(main_shader, "vert"), 3, GL_FLOAT, GL_FALSE, 4, NULL);
glVertexAttribPointer(get_program_attrib(main_shader, "val"), 1, GL_FLOAT, GL_FALSE, 4, (float*)(3 * sizeof(float)));
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
GLuint get_program_attrib(program_t* prog, GLchar* name) {
if (!name) {
DPRINT("ERROR: name == NULL\n");
return -1;
}
GLint attrib = glGetAttribLocation(prog->id, name);
if (attrib < 0)
DPRINT("Atrrib %s not found (%d)\n", name, attrib);
return attrib;
}
Vertex shader:
#version 150
in vec3 vert;
in float val;
out float value;
void main() {
gl_Position = vec4(vert, 1);
value = val;
}
Fragment shader:
#version 150
in float value;
out vec4 color;
void main() {
color = vec4(value, 0, 0, 1);
}
Geometry shader:
#version 150
layout (points) in;
layout (triangle_strip, max_vertices=4) out;
uniform float square_size;
void main() {
vec4 position = gl_in[0].gl_Position;
gl_Position = vec4(position.x, position.y, position.zw);
EmitVertex();
gl_Position = vec4(position.x, position.y + square_size, position.zw);
EmitVertex();
gl_Position = vec4(position.x + square_size, position.y, position.zw);
EmitVertex();
gl_Position = vec4(position.x + square_size, position.y + square_size, position.zw);
EmitVertex();
EndPrimitive();
}
Vertex shader outputs are not passed directly to the fragment shader when you have a geometry shader.
It is this that is causing all of your problems. For a vertex attribute to be active, it has to contribute to the final output of your program. Basically that means something calculated in the fragment shader has to be based off of it.
Unfortunately, that is not happening right now. You have a variable called value that is output from your vertex shader and a variable called value that is input by your fragment shader. Because the geometry shader sits inbetween the two of them, the fragment shader only looks for an output named value in the geometry shader -- no such output exists.
Naturally you might think that the solution would be to create a variable called value in the geometry shader that serves as the input and the output. However, that will not work, you would have to declare it inout value and that is invalid.
Here are the necessary corrections:
Vertex shader:
#version 150
in vec3 vert;
in float val;
out float value_vtx; // Output is fed to the Geometry Shader
void main() {
gl_Position = vec4(vert, 1);
value_vtx = val;
}
Fragment shader:
#version 150
in float value_geo; // Takes its input from the Geometry Shader
out vec4 color;
void main() {
color = vec4(value_geo, 0, 0, 1);
}
Geometry shader:
#version 150
layout (points) in;
layout (triangle_strip, max_vertices=4) out;
uniform float square_size;
in float value_vtx []; // This was output by the vertex shader
out float value_geo; // This will be the input to the fragment shader
void main() {
vec4 position = gl_in[0].gl_Position;
gl_Position = vec4(position.x, position.y, position.zw);
value_geo = value_vtx[0];
EmitVertex();
gl_Position = vec4(position.x, position.y + square_size, position.zw);
value_geo = value_vtx[0];
EmitVertex();
gl_Position = vec4(position.x + square_size, position.y, position.zw);
value_geo = value_vtx[0];
EmitVertex();
gl_Position = vec4(position.x + square_size, position.y + square_size, position.zw);
value_geo = value_vtx[0];
EmitVertex();
EndPrimitive();
}
You may be asking why I assigned value_geo 4 times when it is constant. That is because EmitVertex (...) causes all output variables to become undefined when it returns, so you have to set it every time.

How to cope with WebGL missing glBlendEquation(GL_MAX)

Here's my current C code that does what I'd like to do, but it relies on glBlendEquation(GL_MAX) which is unavailable in WebGL. What I want is to render a wiggly fuzzy line. I could use a Gaussian blur but it would have to have a VERY large radius (16 pixels) and I expect it would be REALLY slow.
Note I've removed some gl state management code and a couple other things fore clarity but the code should work as is.
Existing C code:
static const char *pnt_vtx_shader =
"#version 110\n"
"varying vec2 uv;\n"
"void main() {\n"
" uv = (gl_MultiTexCoord0.st - 1.0f);\n"
" gl_Position = gl_Vertex;\n"
"}";
static const char *pnt_shader_src =
"#version 110\n"
"varying vec2 uv;\n"
"void main() {\n"
" gl_FragColor = vec4(exp(-4.5f*0.5f*log2(dot(uv,uv)+1.0f)));\n"
"}";
GLuint shader_prog ;
int samp;
float pw, ph;
float sco_verts[128*8*4];
int sco_ind[128*3*6];
void init(int width, int height, int num_samp)
{
pw = 0.5f*fmaxf(1.0f/24, 8.0f/width), ph = 0.5f*fmaxf(1.0f/24, 8.0f/height);
samp = num_samp;
// helper function, compiles and links the shader, prints out any errors
shader_prog = compile_program(pnt_vtx_shader, pnt_shader_src);
for(int i=0; i<samp; i++) {
sco_verts[(i*8+0)*4+0] = 0; sco_verts[(i*8+0)*4+1] = 2;
sco_verts[(i*8+1)*4+0] = 0; sco_verts[(i*8+1)*4+1] = 0;
sco_verts[(i*8+2)*4+0] = 1; sco_verts[(i*8+2)*4+1] = 2;
sco_verts[(i*8+3)*4+0] = 1; sco_verts[(i*8+3)*4+1] = 0;
sco_verts[(i*8+4)*4+0] = 1; sco_verts[(i*8+4)*4+1] = 2;
sco_verts[(i*8+5)*4+0] = 1; sco_verts[(i*8+5)*4+1] = 0;
sco_verts[(i*8+6)*4+0] = 2; sco_verts[(i*8+6)*4+1] = 2;
sco_verts[(i*8+7)*4+0] = 2; sco_verts[(i*8+7)*4+1] = 0;
}
for(int i=0; i<samp; i++) {
sco_ind[(i*6+0)*3+0] = i*8+0; sco_ind[(i*6+0)*3+1] = i*8+1; sco_ind[(i*6+0)*3+2] = i*8+3;
sco_ind[(i*6+1)*3+0] = i*8+0; sco_ind[(i*6+1)*3+1] = i*8+3; sco_ind[(i*6+1)*3+2] = i*8+2;
sco_ind[(i*6+2)*3+0] = i*8+2; sco_ind[(i*6+2)*3+1] = i*8+4; sco_ind[(i*6+2)*3+2] = i*8+5;
sco_ind[(i*6+3)*3+0] = i*8+2; sco_ind[(i*6+3)*3+1] = i*8+5; sco_ind[(i*6+3)*3+2] = i*8+3;
sco_ind[(i*6+4)*3+0] = i*8+4; sco_ind[(i*6+4)*3+1] = i*8+6; sco_ind[(i*6+4)*3+2] = i*8+7;
sco_ind[(i*6+5)*3+0] = i*8+4; sco_ind[(i*6+5)*3+1] = i*8+7; sco_ind[(i*6+5)*3+2] = i*8+5;
}
}
// getsamp does some averaging over samples
static float getsamp(const float *data, int len, int i, int w) {
float sum = 0, err = 0;
int l = IMAX(i-w, 0);
int u = IMIN(i+w, len);
for(int i = l; i < u; i++)
sum+= data[i];
return sum / (2*w);
}
// R holds a rotation matrix... it's the transpose of what you would give GL though
// because of reasons :P (I wrote code that did all the stuff from this program in
// software first and the GL version shares a bunch of code with that one)
// data is audio samples, [-1, 1], the length of the array is in len
void render_scope(float R[3][3], const float *data, int len)
{
// do the rotate/project ourselves because the GL matrix won't do the right
// thing if we just send it our verticies, we want wour tris to always be
// parrallel to the view plane, because we're actually drawing a fuzzy line
// not a 3D object
// also it makes it easier to match the software implementation
float px, py;
{
float s = getsamp(data, len, 0, len/96);
s=copysignf(log2f(fabsf(s)*3+1)/2, s);
float xt = -0.5f, yt = 0.2f*s, zt = 0.0f;
float x = R[0][0]*xt + R[1][0]*yt + R[2][0]*zt;
float y = R[0][1]*xt + R[1][1]*yt + R[2][1]*zt;
float z = R[0][2]*xt + R[1][2]*yt + R[2][2]*zt;
const float zvd = 1/(z+2);
px=x*zvd*4/3; py=y*zvd*4/3;
}
for(int i=0; i<samp; i++) {
float s = getsamp(data, len, (i+1)*len/(samp), len/96);
s=copysignf(log2f(fabsf(s)*3+1)/2, s);
float xt = (i+1 - (samp)/2.0f)*(1.0f/(samp)), yt = 0.2f*s, zt = 0.0f;
float x = R[0][0]*xt + R[1][0]*yt + R[2][0]*zt;
float y = R[0][1]*xt + R[1][1]*yt + R[2][1]*zt;
float z = R[0][2]*xt + R[1][2]*yt + R[2][2]*zt;
const float zvd = 1/(z+2);
x=x*zvd*4/3; y=y*zvd*4/3;
const float dx=x-px, dy=y-py;
const float d = 1/hypotf(dx, dy);
const float tx=dx*d*pw, ty=dy*d*pw;
const float nx=-dy*d*pw, ny=dx*d*ph;
sco_verts[(i*8+0)*4+2] = px-nx-tx; sco_verts[(i*8+0)*4+3] = py-ny-ty;
sco_verts[(i*8+1)*4+2] = px+nx-tx; sco_verts[(i*8+1)*4+3] = py+ny-ty;
sco_verts[(i*8+2)*4+2] = px-nx ; sco_verts[(i*8+2)*4+3] = py-ny;
sco_verts[(i*8+3)*4+2] = px+nx ; sco_verts[(i*8+3)*4+3] = py+ny;
sco_verts[(i*8+4)*4+2] = x-nx ; sco_verts[(i*8+4)*4+3] = y-ny;
sco_verts[(i*8+5)*4+2] = x+nx ; sco_verts[(i*8+5)*4+3] = y+ny;
sco_verts[(i*8+6)*4+2] = x-nx+tx; sco_verts[(i*8+6)*4+3] = y-ny+ty;
sco_verts[(i*8+7)*4+2] = x+nx+tx; sco_verts[(i*8+7)*4+3] = y+ny+ty;
px=x,py=y;
}
glEnable(GL_BLEND);
glBlendEquation(GL_MAX);
glUseProgram(shader_prog);
glColor4f(1.0f, 1.0f, 1.0f, 1.0f);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glTexCoordPointer(2, GL_FLOAT, sizeof(float)*4, sco_verts);
glVertexPointer(2, GL_FLOAT, sizeof(float)*4, sco_verts + 2);
glDrawElements(GL_TRIANGLES, samp*3*6, GL_UNSIGNED_INT, sco_ind);
}
Here's a screenshot from a test app, I'm not sure the line width is right in this screen shot... but meh it gives the idea, also I'd be using way more points so the lines would be smoother.

Assign multiple values to array in C

Is there any way to do this in a condensed form?
GLfloat coordinates[8];
...
coordinates[0] = 1.0f;
coordinates[1] = 0.0f;
coordinates[2] = 1.0f;
coordinates[3] = 1.0f;
coordinates[4] = 0.0f;
coordinates[5] = 1.0f;
coordinates[6] = 0.0f;
coordinates[7] = 0.0f;
return coordinates;
Something like coordinates = {1.0f, ...};?
If you really to assign values (as opposed to initialize), you can do it like this:
GLfloat coordinates[8];
static const GLfloat coordinates_defaults[8] = {1.0f, 0.0f, 1.0f ....};
...
memcpy(coordinates, coordinates_defaults, sizeof(coordinates_defaults));
return coordinates;
Although in your case, just plain initialization will do, there's a trick to wrap the array into a struct (which can be initialized after declaration).
For example:
struct foo {
GLfloat arr[10];
};
...
struct foo foo;
foo = (struct foo) { .arr = {1.0, ... } };
The old-school way:
GLfloat coordinates[8];
...
GLfloat *p = coordinates;
*p++ = 1.0f; *p++ = 0.0f; *p++ = 1.0f; *p++ = 1.0f;
*p++ = 0.0f; *p++ = 1.0f; *p++ = 0.0f; *p++ = 0.0f;
return coordinates;
You can use:
GLfloat coordinates[8] = {1.0f, ..., 0.0f};
but this is a compile-time initialisation - you can't use that method in the current standard to re-initialise (although I think there are ways to do it in the upcoming standard, which may not immediately help you).
The other two ways that spring to mind are to blat the contents if they're fixed:
GLfloat base_coordinates[8] = {1.0f, ..., 0.0f};
GLfloat coordinates[8];
:
memcpy (coordinates, base_coordinates, sizeof (coordinates));
or provide a function that looks like your initialisation code anyway:
void setCoords (float *p0, float p1, ..., float p8) {
p0[0] = p1; p0[1] = p2; p0[2] = p3; p0[3] = p4;
p0[4] = p5; p0[5] = p6; p0[6] = p7; p0[7] = p8;
}
:
setCoords (coordinates, 1.0f, ..., 0.0f);
keeping in mind those ellipses (...) are placeholders, not things to literally insert in the code.
I went with an array initialization method:
#include <stdarg.h>
void int_array_init(int *a, const int ct, ...) {
va_list args;
va_start(args, ct);
for(int i = 0; i < ct; ++i) {
a[i] = va_arg(args, int);
}
va_end(args);
}
called like,
const int node_ct = 8;
int expected[node_ct];
int_array_init(expected, node_ct, 1, 3, 4, 2, 5, 6, 7, 8);
The C99 array initialization, like this:
const int node_ct = 8;
const int expected[node_ct] = { 1, 3, 4, 2, 5, 6, 7, 8 };
And in the configure.ac:
AC_PROG_CC_C99
had the compiler on my dev box perfectly happy. The compiler on the server complained with:
error: variable-sized object may not be initialized
const int expected[node_ct] = { 1, 3, 4, 2, 5, 6, 7, 8 };
and
warning: excess elements in array initializer
const int expected[node_ct] = { 1, 3, 4, 2, 5, 6, 7, 8 };
for each element
It doesn't complain at all about, for example:
int expected[] = { 1, 2, 3, 4, 5 };
I like the check on size, and that the varargs support is acting more robustly than the support for the array initializer.
Find PR with sample code at https://github.com/wbreeze/davenport/pull/15/files
Regarding https://stackoverflow.com/a/3535455/608359 from #paxdiablo, I liked it; but, felt insecure about having the number of times the initializaion pointer advances synchronized with the number of elements allocated to the array. Worst case, the initializing pointer moves beyond the allocated length. As such, the diff in the PR contains,
int expected[node_ct];
- int *p = expected;
- *p++ = 1; *p++ = 2; *p++ = 3; *p++ = 4;
+ int_array_init(expected, node_ct, 1, 2, 3, 4);
The int_array_init method will safely assign junk if the number of
arguments is fewer than the node_ct. The junk assignment ought to be easier
to catch and debug.
Exactly, you nearly got it:
GLfloat coordinates[8] = {1.0f, ..., 0.0f};
If you are doing these same assignments a lot in your program and want a shortcut, the most straightforward solution might be to just add a function
static inline void set_coordinates(
GLfloat coordinates[static 8],
GLfloat c0, GLfloat c1, GLfloat c2, GLfloat c3,
GLfloat c4, GLfloat c5, GLfloat c6, GLfloat c7)
{
coordinates[0] = c0;
coordinates[1] = c1;
coordinates[2] = c2;
coordinates[3] = c3;
coordinates[4] = c4;
coordinates[5] = c5;
coordinates[6] = c6;
coordinates[7] = c7;
}
and then simply call
GLfloat coordinates[8];
// ...
set_coordinates(coordinates, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 0.0f, 0.0f);
typedef struct{
char array[4];
}my_array;
my_array array = { .array = {1,1,1,1} }; // initialisation
void assign(my_array a)
{
array.array[0] = a.array[0];
array.array[1] = a.array[1];
array.array[2] = a.array[2];
array.array[3] = a.array[3];
}
char num = 5;
char ber = 6;
int main(void)
{
printf("%d\n", array.array[0]);
// ...
// this works even after initialisation
assign((my_array){ .array = {num,ber,num,ber} });
printf("%d\n", array.array[0]);
// ....
return 0;
}

Resources