I have a textured quad that needs to be drawn in screenspace, sized/aligned to the viewport, in a SceneKit scene at the far plane - think of it like a textured quad to act as a background fill.
I've manually created a 4 vertex quad in clip space, and written a vertex/fragment shader to draw it. The vertices are set to be at z=1, which should place them at the far plane.
However, while the quad is correctly rendered as expected in screen space (e.g., it is correctly aligned) it is drawn atop all scene contents - with z=1 for the clip space vertices it should be behind everything.
I've been able to work around this by setting a low renderingOrder and disabling depth writes, but that's not a fix, that's a hack.
// build the plane vertices in clip space
let vertices: [SCNVector3] = [
SCNVector3(-1, -1, +1),
SCNVector3(+1, -1, +1),
SCNVector3(+1, +1, +1),
SCNVector3(-1, +1, +1),
]
let texCoords: [CGPoint] = [
CGPoint(x: 0, y: 1),
CGPoint(x: 1, y: 1),
CGPoint(x: 1, y: 0),
CGPoint(x: 0, y: 0),
]
let indices: [UInt16] = [
0,1,2,
0,2,3,
]
let vertexSource = SCNGeometrySource(vertices: vertices)
let texCoordSource = SCNGeometrySource(textureCoordinates: texCoords)
let elementSource = SCNGeometryElement(indices: indices, primitiveType: .triangles)
let planeGeometry = SCNGeometry(sources: [vertexSource, texCoordSource], elements: [elementSource])
let program = SCNProgram()
program.library = ShaderUtilities.metalLibrary
program.vertexFunctionName = "plane_vertex"
program.fragmentFunctionName = "plane_fragment"
if let material = planeGeometry.firstMaterial {
material.program = program
material.setValue(SCNMaterialProperty(contents: backdropImage as Any), forKey: "backgroundTexture")
}
let planeNode = SCNNode(geometry: planeGeometry)
meshCameraNode.addChildNode(planeNode)
The corresponding metal shaders:
struct PlaneVertexIn {
float3 position [[attribute(SCNVertexSemanticPosition)]];
float2 uv [[attribute(SCNVertexSemanticTexcoord0)]];
};
struct PlaneVertexOut {
float4 position [[position]];
float2 uv;
};
vertex PlaneVertexOut plane_vertex(PlaneVertexIn in [[ stage_in ]],
constant SCNSceneBuffer& scn_frame [[buffer(0)]],
constant NodeBuffer& scn_node [[buffer(1)]]) {
PlaneVertexOut out;
// the vertices are already in clip space to form a screen-aligned quad, so no need to apply a transform.
out.position = float4(in.position.x, in.position.y, in.position.z, 1.0);
out.uv = in.uv;
return out;
}
fragment float4 plane_fragment(PlaneVertexOut out [[ stage_in ]],
constant SCNSceneBuffer& scn_frame [[buffer(0)]],
texture2d<float, access::sample> backgroundTexture [[texture(0)]]) {
constexpr sampler textureSampler(coord::normalized, filter::linear, address::repeat);
return backgroundTexture.sample(textureSampler, out.uv);
}
Related
I'm making a simple game in swift 5 using SpriteKit that contains a ball, target and a barrier, The goal is to drag the barrier so the ball bounces off of it and hits the target. The ball is the only object that's supposed to have gravity. Everything was working fine until I wanted to change the code so it has an array of barrier objects so I can add more barriers but now when I run the code, the barrier immediately falls off so it has gravity. Here is the part of the code that adds the barrier.
fileprivate func addBarrier(at position: Point, width: Double, height: Double, angle: Double) {
// Add a barrier to the scene and make it immobile (it won't move when forces act on it)
let barrierPoints = [Point(x: 0, y: 0), Point(x: 0, y: height), Point(x: width, y: height), Point(x: width, y: 0)]
let barrier = PolygonShape(points: barrierPoints)
barriers.append(barrier)
barrier.position = position
barrier.isImmobile = true
barrier.hasPhysics = true
barrier.fillColor = .brown
barrier.angle = angle
scene.add(barrier)
}
The whole code is available at: https://github.com/Capslockhuh/BouncyBall
I have a gradient, generated by a Metal fragment shader that I've applied to a SCNNode defined by a plane geometry.
It looks like this:
When I use the same shader applied to a MTKView rendered in an Xcode playground, the colors are darker. What is causing the colors to be lighter in the Scenekit version?
Here is the Metal shader and the GameViewController.
Shader:
#include <metal_stdlib>
using namespace metal;
#include <SceneKit/scn_metal>
struct myPlaneNodeBuffer {
float4x4 modelTransform;
float4x4 modelViewTransform;
float4x4 normalTransform;
float4x4 modelViewProjectionTransform;
float2x3 boundingBox;
};
typedef struct {
float3 position [[ attribute(SCNVertexSemanticPosition) ]];
float2 texCoords [[ attribute(SCNVertexSemanticTexcoord0) ]];
} VertexInput;
struct SimpleVertexWithUV
{
float4 position [[position]];
float2 uv;
};
vertex SimpleVertexWithUV gradientVertex(VertexInput in [[ stage_in ]],
constant SCNSceneBuffer& scn_frame [[buffer(0)]],
constant myPlaneNodeBuffer& scn_node [[buffer(1)]])
{
SimpleVertexWithUV vert;
vert.position = scn_node.modelViewProjectionTransform * float4(in.position, 1.0);
int width = abs(scn_node.boundingBox[0].x) + abs(scn_node.boundingBox[1].x);
int height = abs(scn_node.boundingBox[0].y) + abs(scn_node.boundingBox[1].y);
float2 resolution = float2(width,height);
vert.uv = vert.position.xy * 0.5 / resolution;
vert.uv = 0.5 - vert.uv;
return vert;
}
fragment float4 gradientFragment(SimpleVertexWithUV in [[stage_in]],
constant myPlaneNodeBuffer& scn_node [[buffer(1)]])
{
float4 fragColor;
float3 color = mix(float3(1.0, 0.6, 0.1), float3(0.5, 0.8, 1.0), sqrt(1-in.uv.y));
fragColor = float4(color,1);
return(fragColor);
}
Game view controller:
import SceneKit
import QuartzCore
class GameViewController: NSViewController {
#IBOutlet weak var gameView: GameView!
override func awakeFromNib(){
super.awakeFromNib()
// create a new scene
let scene = SCNScene()
// create and add a camera to the scene
let cameraNode = SCNNode()
cameraNode.camera = SCNCamera()
scene.rootNode.addChildNode(cameraNode)
// place the camera
cameraNode.position = SCNVector3(x: 0, y: 0, z: 15)
// turn off default lighting
self.gameView!.autoenablesDefaultLighting = false
// set the scene to the view
self.gameView!.scene = scene
// allows the user to manipulate the camera
self.gameView!.allowsCameraControl = true
// show statistics such as fps and timing information
self.gameView!.showsStatistics = true
// configure the view
self.gameView!.backgroundColor = NSColor.black
var geometry:SCNGeometry
geometry = SCNPlane(width:10, height:10)
let geometryNode = SCNNode(geometry: geometry)
let program = SCNProgram()
program.fragmentFunctionName = "gradientFragment"
program.vertexFunctionName = "gradientVertex"
let gradientMaterial = SCNMaterial()
gradientMaterial.program = program
geometry.materials = [gradientMaterial]
scene.rootNode.addChildNode(geometryNode)
}
}
As explained in the Advances in SceneKit Rendering session from WWDC 2016, SceneKit now defaults to rendering in linear space which is required to have accurate results from lighting equations.
The difference you see comes from the fact that in the MetalKit case you are providing color components (red, green and blue values) in the sRGB color space, while in the SceneKit case you are providing the exact same components in the linear sRGB color space.
It's up to you to decide which result is the one you want. Either you want a gradient in linear space (that's what you want if you are interpolating some data) or in gamma space (that's what drawing apps use).
If you want a gradient in gamma space, you'll need to convert the color components to be linear because that's what SceneKit works with. Taking the conversion formulas from the Metal Shading Language Specification, here's a solution:
static float srgbToLinear(float c) {
if (c <= 0.04045)
return c / 12.92;
else
return powr((c + 0.055) / 1.055, 2.4);
}
fragment float4 gradientFragment(SimpleVertexWithUV in [[stage_in]],
constant myPlaneNodeBuffer& scn_node [[buffer(1)]])
{
float3 color = mix(float3(1.0, 0.6, 0.1), float3(0.5, 0.8, 1.0), sqrt(1 - in.uv.y));
color.r = srgbToLinear(color.r);
color.g = srgbToLinear(color.g);
color.b = srgbToLinear(color.b);
float4 fragColor = float4(color, 1);
return(fragColor);
}
After learning the root cause of this problem, I did a bit more research on the topic and found another solution. Gamma space rendering can be forced application wide by setting
SCNDisableLinearSpaceRendering to TRUE in the application's plist.
I'm not sure, but it looks to me like your calculation of the size of the node is off, leading your .uv to be off, depending on the position of the node.
You have:
int width = abs(scn_node.boundingBox[0].x) + abs(scn_node.boundingBox[1].x);
int height = abs(scn_node.boundingBox[0].y) + abs(scn_node.boundingBox[1].y);
I would think that should be:
int width = abs(scn_node.boundingBox[0].x - scn_node.boundingBox[1].x);
int height = abs(scn_node.boundingBox[0].y - scn_node.boundingBox[1].y);
You want the absolute difference between the two extremes, not the sum. The sum gets larger as the node moves right and down, because it effectively includes the position.
All of that said, isn't the desired (u, v) already provided to you in in.texCoords?
I have written code to render my scene objects to a cubemap texture of format GL_DEPTH_COMPONENT and then use this texture in a shader to determine whether a fragment is being directly lit or not, for shadowing purposes. However, my cubemap appears to come out as black. I suppose I am not setting up my FBO or rendering context sufficiently, but fail to see what is missing.
Using GL 3.3 in compatibility profile.
This is my code for creating the FBO and cubemap texture:
glGenFramebuffers(1, &fboShadow);
glGenTextures(1, &texShadow);
glBindTexture(GL_TEXTURE_CUBE_MAP, texShadow);
for (int sideId = 0; sideId < 6; sideId++) {
// Make sure GL knows what this is going to be.
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + sideId, 0, GL_DEPTH_COMPONENT, 512, 512, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL);
}
// Don't interpolate depth value sampling. Between occluder and occludee there will
// be an instant jump in depth value, not a linear transition.
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glBindTexture(GL_TEXTURE_CUBE_MAP, 0);
My full rendering function then looks like so:
void render() {
// --- MAKE DEPTH CUBEMAP ---
// Set shader program for depth testing
glUseProgram(progShadow);
// Get the light for which we want to generate a depth cubemap
PointLight p = pointLights.at(0);
// Bind our framebuffer for drawing; clean it up
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, fboShadow);
glClear(GL_DEPTH_BUFFER_BIT);
// Make 1:1-ratio, 90-degree view frustum for a 512x512 texture.
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(90.0, 1, 16.0, 16384.0);
glViewport(0, 0, 512, 512);
glMatrixMode(GL_MODELVIEW);
// Set modelview and projection matrix uniforms
setShadowUniforms();
// Need 6 renderpasses to complete each side of the cubemap
for (int sideId = 0; sideId < 6; sideId++) {
// Attach depth attachment of current framebuffer to level 0 of currently relevant target of texShadow cubemap texture.
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_CUBE_MAP_POSITIVE_X + sideId, texShadow, 0);
// All is fine.
GLenum status = glCheckFramebufferStatus(GL_DRAW_FRAMEBUFFER);
if (status != GL_FRAMEBUFFER_COMPLETE) {
std::cout << "Shadow FBO is broken with code " << status << std::endl;
}
// Push modelview matrix stack because we need to rotate and move camera every time
glPushMatrix();
// This does a switch-case with glRotatefs
rotateCameraForSide(GL_TEXTURE_CUBE_MAP_POSITIVE_X + sideId);
// Render from light's position.
glTranslatef(-p.getX(), -p.getY(), -p.getZ());
// Render all objects.
for (ObjectList::iterator it = objectList.begin(); it != objectList.end(); it++) {
(*it)->render();
}
glPopMatrix();
}
// --- RENDER SCENE ---
// Bind default framebuffer
glBindFramebuffer(GL_FRAMEBUFFER, 0);
// Setup proper projection matrix with 70 degree vertical FOV and ratio according to window frame dimensions.
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(70.0, ((float)vpWidth) / ((float)vpHeight), 16.0, 16384.0);
glViewport(0, 0, vpWidth, vpHeight);
glUseProgram(prog);
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
applyCameraPerspective();
// My PointLight class has both a position (world space) and renderPosition (camera space) Vec3f variable;
// The lights' renderPositions get transformed with the modelview matrix by this.
updateLights();
// And here, among other things, the lights' camera space coordinates go to the shader.
setUniforms();
// Render all objects
for (ObjectList::iterator it = objectList.begin(); it != objectList.end(); it++) {
// Object texture goes to texture unit 0
GLuint usedTexture = glTextureList.find((*it)->getTextureName())->second;
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, usedTexture);
glUniform1i(textureLoc, 0);
// Cubemap goes to texture unit 1
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_CUBE_MAP, texShadow);
glUniform1i(shadowLoc, 1);
(*it)->render();
}
glPopMatrix();
frameCount++;
}
The shader program for rendering depth values ("progShadow") is simple.
Vertex shader:
#version 330
in vec3 position;
uniform mat4 modelViewMatrix, projectionMatrix;
void main() {
gl_Position = projectionMatrix * modelViewMatrix * vec4(position, 1);
}
Fragment shader:
#version 330
void main() {
// OpenGL sets the depth anyway. Nothing to do here.
}
The shader program for final rendering ("prog") has a fragment shader which looks something like this:
#version 330
#define MAX_LIGHTS 8
in vec3 fragPosition;
in vec3 fragNormal;
in vec2 fragTexCoordinates;
out vec4 fragColor;
uniform sampler2D colorTexture;
uniform samplerCubeShadow shadowCube;
uniform uint activeLightCount;
struct Light {
vec3 position;
vec3 diffuse;
float cAtt;
float lAtt;
float qAtt;
};
// Index 0 to (activeLightCount - 1) need to be the active lights.
uniform Light lights[MAX_LIGHTS];
void main() {
vec3 lightColor = vec3(0, 0, 0);
vec3 normalFragmentToLight[MAX_LIGHTS];
float distFragmentToLight[MAX_LIGHTS];
float distEyeToFragment = length(fragPosition);
// Accumulate all light in "lightColor" variable
for (uint i = uint(0); i < activeLightCount; i++) {
normalFragmentToLight[i] = normalize(lights[i].position - fragPosition);
distFragmentToLight[i] = distance(fragPosition, lights[i].position);
float attenuation = (lights[i].cAtt
+ lights[i].lAtt * distFragmentToLight[i]
+ lights[i].qAtt * pow(distFragmentToLight[i], 2.0));
float dotProduct = dot(fragNormal, normalFragmentToLight[i]);
lightColor += lights[i].diffuse * max(dotProduct, 0.0) / attenuation;
}
// Shadow mapping only for light at index 0 for now.
float distOccluderToLight = texture(shadowCube, vec4(normalFragmentToLight[0], 1));
// My geometries use inches as units, hence a large bias of 1
bool isLit = (distOccluderToLight + 1) < distFragmentToLight[0];
fragColor = texture2D(colorTexture, fragTexCoordinates) * vec4(lightColor, 1.0f) * int(isLit);
}
I have verified that all uniform location variables are set to a proper value (i.e. not -1).
It might be worth noting I do no call to glBindFragDataLocation() for "progShadow" prior to linking it, because no color value should be written by that shader.
See anything obviously wrong here?
For shadow maps, depth buffer internal format is pretty important (too small and things look awful, too large and you eat memory bandwidth). You should use a sized format (e.g. GL_DEPTH_COMPONENT24) to guarantee a certain size, otherwise the implementation will pick whatever it wants. As for debugging a cubemap shadow map, the easiest thing to do is actually to draw the scene into each cube face and output color instead of depth. Then, where you currently try to use the cubemap to sample depth, write the sampled color to fragColor instead. You can rule out view issues immediately this way.
There is another much more serious issue, however. You are using samplerCubeShadow, but you have not set GL_TEXTURE_COMPARE_MODE for your cube map. Attempting to sample from a depth texture with this sampler type and without GL_TEXTURE_COMPARE_MODE = GL_COMPARE_REF_TO_TEXTURE will produce undefined results. Even if you did have this mode set properly, the 4th component of the texture coordinates are used as the depth comparison reference -- a constant value of 1.0 is NOT what you want.
Likewise, the depth buffer does not store linear distance, you cannot directly compare the distance you computed here:
distFragmentToLight[i] = distance(fragPosition, lights[i].position);
Instead, something like this will be necessary:
float VectorToDepth (vec3 Vec)
{
vec3 AbsVec = abs(Vec);
float LocalZcomp = max(AbsVec.x, max(AbsVec.y, AbsVec.z));
// Replace f and n with the far and near plane values you used when
// you drew your cube map.
const float f = 2048.0;
const float n = 1.0;
float NormZComp = (f+n) / (f-n) - (2*f*n)/(f-n)/LocalZcomp;
return (NormZComp + 1.0) * 0.5;
}
float LightDepth = VectorToDepth (fragPosition - lights [i].position);
float depth_compare = texture(shadowCube,vec4(normalFragmentToLight[0],LightDepth));
* Code for float VectorToDepth (vec3 Vec)borrowed from Omnidirectional shadow mapping with depth cubemap
Now depth_compare will be a value between 0.0 (completely in shadow) and 1.0 (completely out of shadow). If you have linear texture filtering enabled, the hardware will sample the depth at 4 points and may give you a form of 2x2 PCF filtering. If you have nearest texture filtering, then it will either be 1.0 or 0.0.
I have a program that generates a heightmap and then displays it as a mesh with OpenGL. When I try to add lighting, it ends up with weird square shapes covering the mesh. They are more noticeable in some areas than others, but are always there.
I was using a quad mesh, but nothing changed after switching to a triangle mesh. I've used at least three different methods to calculate the vertex normals, all with the same effect. I was doing the lighting manually with shaders, but nothing changes when using the builtin
OpenGL lighting system.
My latest normal-generating code (faces is an array of indices into verts, the vertex array):
int i;
for (i = 0; i < NINDEX; i += 3) {
vec v[3];
v[0] = verts[faces[i + 0]];
v[1] = verts[faces[i + 1]];
v[2] = verts[faces[i + 2]];
vec v1 = vec_sub(v[1], v[0]);
vec v2 = vec_sub(v[2], v[0]);
vec n = vec_norm(vec_cross(v2, v1));
norms[faces[i + 0]] = vec_add(norms[faces[i + 0]], n);
norms[faces[i + 1]] = vec_add(norms[faces[i + 1]], n);
norms[faces[i + 2]] = vec_add(norms[faces[i + 2]], n);
}
for (i = 0; i < NVERTS; i++) {
norms[i] = vec_norm(norms[i]);
}
Although that isn't the only code I've used, so I doubt that it is the cause of the problem.
I draw the mesh with:
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_FLOAT, 0, verts);
glEnableClientState(GL_NORMAL_ARRAY);
glNormalPointer(GL_FLOAT, 0, norms);
glDrawElements(GL_TRIANGLES, NINDEX, GL_UNSIGNED_SHORT, faces);
And I'm not currently using any shaders.
What could be causing this?
EDIT: A more comprehensive set of screenshots:
Wireframe
Flat shading, OpenGL lighting
Smooth shading, OpenGL lighting
Lighting done in shader
For the last one, the shader code is
Vertex:
varying vec3 lightvec, normal;
void main() {
vec3 lightpos = vec3(0, 0, 100);
vec3 v = vec3(gl_ModelViewMatrix * gl_Vertex);
normal = gl_NormalMatrix * gl_Normal;
lightvec = normalize(lightpos - v);
gl_Position = ftransform();
}
Fragment:
varying vec3 lightvec, normal;
void main(void) {
float l = dot(lightvec, normal);
gl_FragColor = vec4(l, l, l, 1);
}
You need to either normalize the normal in the fragment shader, like so:
varying vec3 lightvec, normal;
void main(void) {
vec3 normalNormed = normalize(normal);
float l = dot(lightvec, normalNormed);
gl_FragColor = vec4(l, l, l, 1);
}
This can be expensive though. What will also work in this case, with directional lights, is to use vertex lighting. So calculate the light value in the vertex shader
varying float lightItensity;
void main() {
vec3 lightpos = vec3(0, 0, 100);
vec3 v = vec3(gl_ModelViewMatrix * gl_Vertex);
normal = gl_NormalMatrix * gl_Normal;
lightvec = normalize(lightpos - v);
lightItensity = dot(normal, lightvec);
gl_Position = ftransform();
}
and use it in the fragment shader,
varying float light;
void main(void) {
float l = light;
gl_FragColor = vec4(l, l, l, 1);
}
I hope this fixes it, let me know if it doesn't.
EDIT: Heres a small diagram that explains what is most likely happening
EDIT2:
If that doesn't help, add more triangles. Interpolate the values of your heightmap and add some vertices in between.
Alternatively, try changing your tesselation scheme. For example a mesh of equilateral triangles like so could make the artifacts less prominent.
You'll have to do some interpolation on your heightmap.
Otherwise I have no idea.. Good luck!
I don't have a definitive answer for the non-shader versions, but I wanted to add that if you're doing per pixel lighting in your fragment shader, you should probably be normalizing the normal and lightvec inside the fragment shader.
If you don't do this they not be unit length (a linear interpolation between two normalized vectors is not necessarily normalized). This could explain some of the artifacts you see in the shader version, as the magnitude of the dot product would vary as a function of the distance from the vertices, which kind of looks like what you're seeing.
EDIT: Another thought, are you doing any non-uniform scaling (different x,y,z) of the mesh when rendering the non-shader version? If you scale it, then you need to either modify the normals by the inverse scale factor, or set glEnable(GL_NORMALIZE). See here for more:
http://www.lighthouse3d.com/tutorials/glsl-tutorial/normalization-issues/
I've searched SO but I just can't figure this out. The other questions didn't help or I didn't understand them.
The problem is, I have a bunch of points in a 3D image. The points are for a rectangle, which doesn't look like a rectangle from the 3d camera's view because of perspective. The task is to map the points from that rectangle to the screen. I've seen some ways which some call "quad to quad transformations" but most of them are for mapping a 2d quadrilateral to another one. But I've got the X, Y and Z coordinates of the rectangle in the real world so I'm looking for some easier ways. Does anyone know any practical algorithm or method of doing this?
If it helps, my 3d camera is actually a Kinect device with OpenNI and NITE middlewares, and I'm using WPF.
Thanks in advance.
edit:
I also found the 3d-projection page on Wikipedia that used angles and cosines but that seems to be a difficult way (finding angles in the 3d image) and I'm not sure if it's the real solution or not.
You might want to check out projection matrices
That's how any 3D rasterizer "flattens" 3D volumes on a 2D screen.
See this code to get the projection matrix for a given WPF camera:
private static Matrix3D GetProjectionMatrix(OrthographicCamera camera, double aspectRatio)
{
// This math is identical to what you find documented for
// D3DXMatrixOrthoRH with the exception that in WPF only
// the camera's width is specified. Height is calculated
// from width and the aspect ratio.
double w = camera.Width;
double h = w / aspectRatio;
double zn = camera.NearPlaneDistance;
double zf = camera.FarPlaneDistance;
double m33 = 1 / (zn - zf);
double m43 = zn * m33;
return new Matrix3D(
2 / w, 0, 0, 0,
0, 2 / h, 0, 0,
0, 0, m33, 0,
0, 0, m43, 1);
}
private static Matrix3D GetProjectionMatrix(PerspectiveCamera camera, double aspectRatio)
{
// This math is identical to what you find documented for
// D3DXMatrixPerspectiveFovRH with the exception that in
// WPF the camera's horizontal rather the vertical
// field-of-view is specified.
double hFoV = MathUtils.DegreesToRadians(camera.FieldOfView);
double zn = camera.NearPlaneDistance;
double zf = camera.FarPlaneDistance;
double xScale = 1 / Math.Tan(hFoV / 2);
double yScale = aspectRatio * xScale;
double m33 = (zf == double.PositiveInfinity) ? -1 : (zf / (zn - zf));
double m43 = zn * m33;
return new Matrix3D(
xScale, 0, 0, 0,
0, yScale, 0, 0,
0, 0, m33, -1,
0, 0, m43, 0);
}
/// <summary>
/// Computes the effective projection matrix for the given
/// camera.
/// </summary>
public static Matrix3D GetProjectionMatrix(Camera camera, double aspectRatio)
{
if (camera == null)
{
throw new ArgumentNullException("camera");
}
PerspectiveCamera perspectiveCamera = camera as PerspectiveCamera;
if (perspectiveCamera != null)
{
return GetProjectionMatrix(perspectiveCamera, aspectRatio);
}
OrthographicCamera orthographicCamera = camera as OrthographicCamera;
if (orthographicCamera != null)
{
return GetProjectionMatrix(orthographicCamera, aspectRatio);
}
MatrixCamera matrixCamera = camera as MatrixCamera;
if (matrixCamera != null)
{
return matrixCamera.ProjectionMatrix;
}
throw new ArgumentException(String.Format("Unsupported camera type '{0}'.", camera.GetType().FullName), "camera");
}
You could do a basic orthographic projection (I'm thinking in terms of raytracing, so this might not apply to what you're doing):
The code is quite intuitive:
for y in image.height:
for x in image.width:
ray = new Ray(x, 0, z, Vector(0, 1, 0)) # Pointing forward
intersection = prism.intersection(ray) # Since you aren't shading, you can check only for intersections.
image.setPixel(x, y, intersection) # Returns black and white image of prism mapped to plane
You just shoot vectors with a direction of (0, 1, 0) directly out into space and record which ones hit.
I found this. Uses straight forward mathematics instead of matricies.
This is called perspective projection to convert from a 3D vertex to a 2D screen vertex. I used this to help me with my 3D program I have made.
HorizontalFactor = ScreenWidth / Tan(PI / 4)
VerticalFactor = ScreenHeight / Tan(PI / 4)
ScreenX = ((X * HorizontalFactor) / Y) + HalfWidth
ScreenY = ((Z * VerticalFactor) / Y) + HalfHeight
Hope this could help. I think its what you where looking for. Sorry about the formatting (new here)
Mapping points in a 3d world to a 2d screen is part of the job of frameworks like OpenGL and Direct3d. It's called Rasterisation like Heandel said. Perhaps you could use Direct3d?