Related
I am making a solar system simulator in Processing. I have initialsed an array in the Planet class which holdes the textures for the PShape planetary objects.
I have added this array to the constructor. Then I tried to call it in the main class where I have initialised the planets, but I get the following erros:
Main class: Syntax Error - Error on parametre or method declaration near 'void'?
Clearly I am missing something or done something wrong. Thank you for the reply.
// Solar_system_enginee main class
//Solar system and it's gravitation's simulation.
// The central sun applies gravitation to other celestial bodies
PImage background;
PImage sun_Texture;
float cameraX, cameraY;
// zoom and pan
float scale = 0.06;
float xPan = 600;
float yPan = 398;
boolean zoomIn = false;
boolean zoomOut = false;
//includes
Sun sun;
Planet mercury;
Planet venus;
Planet earth;
Planet mars;
Planet jupiter;
Planet saturn;
Planet uranus;
Planet neptune;
Planet pluto;
Planet _planetTextures
//screen setup
void setup() {
//Screen size
size(1200, 799, P3D);
sun_Texture = loadImage("sun.jpg");
// Celestial object properties
// 1; Planet's size
// 2; Planet's color / texture
// 3; Movement speed
// 4; Planet's path color
// 5; Planet's path size
// 6; Distance from Sun
// 7; Planet's number of moons // need to check it
//Spawn celestial objects
sun = new Sun(696, color(255, 50, 0));
mercury = new Planet(5, 0.2, 2, color(255, 200, 100), 1448, _planetTextures[0]);
venus = new Planet(12, 0.2, 2, color(150, 10, 100), 1499, _planetTextures[1]);
earth = new Planet(13, 0.2, 2, color(255, 255, 100, 100), 1540, _planetTextures[2]);
//mars = new Planet(7, 0.5, 2, color(167, 45, 160), 1618);
//jupiter = new Planet(143, 0.5, 2, color(167, 45, 160), 2169);
//saturn = new Planet(121, 0.5, 2, color(167, 45, 160), 2825);
//uranus = new Planet(51, 0.5, 2, color(167, 45, 160), 4263);
//neptune = new Planet(50,0.5, 2, color(167, 45, 160), 5886);
//pluto = new Planet(2, 0.5, 2, color(167, 45, 160), 6486);
}
// sceene setup
void sceene() {
// Load background image
background = loadImage("stars.jpg");
background(background);
// Set it in the middle of the screen
translate(width/2, height/2, 0);
}
// draw setup
void draw() {
// Zoom towards the center of the screen, rather the edge of the screen
translate(width/2, height/2, 0);
scale(scale);
// Translate Y and X pan
translate( -xPan, -yPan);
// Draw background
sceene();
// Zoom In
if (zoomIn) {
scale *= 1.01;
}
// Zoom Out
if (zoomOut) {
scale /= 1.01;
}
// Display sun and rotate Y axis camera around sun
sun.display();
sun.move(mouseY);
//mercury
mercury.display();
mercury.move();
//venus
venus.display();
venus.move();
//earth
earth.display();
earth.move();
////mars
//mars.display();
//mars.move();
////jupiter
//jupiter.display();
//jupiter.move();
////saturn
//saturn.display();
//saturn.move();
////uranus
//uranus.display();
//uranus.move();
////neptune
//neptune.display();
//neptune.move();
////pluto
//pluto.display();
//pluto.move();
}
// Key press record function, responsible for zoom in and out of scene
void keyPressed() {
if (keyCode == UP) {
zoomIn = true;
zoomOut = false;
}
if (keyCode == DOWN) {
zoomIn = false;
zoomOut = true;
}
}
void keyReleased() {
if (keyCode == UP) {
zoomIn = false;
}
if (keyCode == DOWN) {
zoomOut = false;
}
}
// Planets class
class Planet {
float radius = 0;
float moveSpeed;
float distance;
float size;
color planetColor;
int pathSize;
color pathColor;
PShape planet;
PImage[] planetTextures = new PImage [3];
Moon moon;
Planet(int _size, float _speed, int _path_size, color _path_color, float _dist, PImage[] _planetTextures) { //color _color
size = _size;
//planetColor = _color;
moveSpeed = _speed;
pathColor = _path_color;
pathSize = _path_size;
distance = _dist;
planetTextures = _planetTextures;
moon = new Moon(10, color(200, 200, 100));
// Set planet's texture
noStroke();
noFill();
planet = createShape(SPHERE, size);
planet.setTexture(_planetTextures[0]);
// Load textures into an array
planetTextures[0] = loadImage ("mercury.jpg");
planetTextures[1] = loadImage ("venus.jpg");
planetTextures[2] = loadImage ("earth.jpg");
}
void display() {
pushMatrix();
//path
stroke(pathColor);
strokeWeight(2);
noFill();
rotateY(radius * moveSpeed);
rotateX(PI/2);
ellipse(0, 0, 2*distance, 2*distance);
//body
noStroke();
fill(planetColor);
translate(distance, 0, 0);
shape(planet);
//sphere(size);
moon.display();
moon.move();
popMatrix();
}
void move() {
radius += moveSpeed;
}
}
The Syntax Error - Missing name or ; near ‘void’? error occurs between Planet _planetTextures and void setup().
Sometimes error can be a bit cryptic: in this case it's as straightforward as it can be: you forgot to add ; at the end of Planet _planetTextures.
That being said, I've spotted a bunch of other areas in your code you might want to revise:
_planetTextures is later used as if it's a PImage[] in setup() when you instantiate planets
the Planet constructor takes in a PImage[] _planetTextures argument. If a single planet uses a single texture, maybe you meant PImage _planetTexture instead ?
The Planet constructor loads 3 images (mercury, venus, earth): should it be task of the main sketch to load all assets, then pass each single PImage to it's respective planet (instead of reloading the same 3 images for every single planet instance)
since the planet texture is only once in the constructor, there's no need for the PImage[] property
the backround PImage was loaded continously, multiple times per frame in sceene() (called from draw()): you might want to load the image once in setup.
Here is your code with the above tweaks applied:
//Solar system and it's gravitation's simulation.
// The central sun applies gravitation to other celestial bodies
PImage background;
PImage sun_Texture;
float cameraX, cameraY;
// zoom and pan
float scale = 0.06;
float xPan = 600;
float yPan = 398;
boolean zoomIn = false;
boolean zoomOut = false;
//includes
Sun sun;
Planet mercury;
Planet venus;
Planet earth;
Planet mars;
Planet jupiter;
Planet saturn;
Planet uranus;
Planet neptune;
Planet pluto;
PImage[] planetTextures;
//screen setup
void setup() {
//Screen size
size(1200, 799, P3D);
sun_Texture = loadImage("sun.jpg");
// Celestial object properties
// 1; Planet's size
// 2; Planet's color / texture
// 3; Movement speed
// 4; Planet's path color
// 5; Planet's path size
// 6; Distance from Sun
// 7; Planet's number of moons // need to check it
// Load background image
background = loadImage("stars.jpg");
// Load textures into an array
planetTextures[0] = loadImage ("mercury.jpg");
planetTextures[1] = loadImage ("venus.jpg");
planetTextures[2] = loadImage ("earth.jpg");
//Spawn celestial objects
sun = new Sun(696, color(255, 50, 0));
mercury = new Planet(5, 0.2, 2, color(255, 200, 100), 1448, planetTextures[0]);
venus = new Planet(12, 0.2, 2, color(150, 10, 100), 1499, planetTextures[1]);
earth = new Planet(13, 0.2, 2, color(255, 255, 100, 100), 1540, planetTextures[2]);
//mars = new Planet(7, 0.5, 2, color(167, 45, 160), 1618);
//jupiter = new Planet(143, 0.5, 2, color(167, 45, 160), 2169);
//saturn = new Planet(121, 0.5, 2, color(167, 45, 160), 2825);
//uranus = new Planet(51, 0.5, 2, color(167, 45, 160), 4263);
//neptune = new Planet(50,0.5, 2, color(167, 45, 160), 5886);
//pluto = new Planet(2, 0.5, 2, color(167, 45, 160), 6486);
}
// sceene setup
void scene() {
background(background);
// Set it in the middle of the screen
translate(width/2, height/2, 0);
}
// draw setup
void draw() {
// Zoom towards the center of the screen, rather the edge of the screen
translate(width/2, height/2, 0);
scale(scale);
// Translate Y and X pan
translate( -xPan, -yPan);
// Draw background
scene();
// Zoom In
if (zoomIn) {
scale *= 1.01;
}
// Zoom Out
if (zoomOut) {
scale /= 1.01;
}
// Display sun and rotate Y axis camera around sun
sun.display();
sun.move(mouseY);
//mercury
mercury.display();
mercury.move();
//venus
venus.display();
venus.move();
//earth
earth.display();
earth.move();
////mars
//mars.display();
//mars.move();
////jupiter
//jupiter.display();
//jupiter.move();
////saturn
//saturn.display();
//saturn.move();
////uranus
//uranus.display();
//uranus.move();
////neptune
//neptune.display();
//neptune.move();
////pluto
//pluto.display();
//pluto.move();
}
// Key press record function, responsible for zoom in and out of scene
void keyPressed() {
if (keyCode == UP) {
zoomIn = true;
zoomOut = false;
}
if (keyCode == DOWN) {
zoomIn = false;
zoomOut = true;
}
}
void keyReleased() {
if (keyCode == UP) {
zoomIn = false;
}
if (keyCode == DOWN) {
zoomOut = false;
}
}
class Planet {
float radius = 0;
float moveSpeed;
float distance;
float size;
color planetColor;
int pathSize;
color pathColor;
PShape planet;
Moon moon;
Planet(int _size, float _speed, int _path_size, color _path_color, float _dist, PImage _planetTexture) { //color _color
size = _size;
//planetColor = _color;
moveSpeed = _speed;
pathColor = _path_color;
pathSize = _path_size;
distance = _dist;
moon = new Moon(10, color(200, 200, 100));
// Set planet's texture
noStroke();
noFill();
planet = createShape(SPHERE, size);
planet.setTexture(_planetTexture);
}
void display() {
pushMatrix();
//path
stroke(pathColor);
strokeWeight(2);
noFill();
rotateY(radius * moveSpeed);
rotateX(PI/2);
ellipse(0, 0, 2*distance, 2*distance);
//body
noStroke();
fill(planetColor);
translate(distance, 0, 0);
shape(planet);
//sphere(size);
moon.display();
moon.move();
popMatrix();
}
void move() {
radius += moveSpeed;
}
}
It would've made it easier to for others to test if you would have also posted the Sun/Moon classes and the textures uses: something to remember for future posts. (The easier it is for others to replicate your issue the more like to get (good) answers).
Not bad progress and you're getting the main points about classes.
There might still be confusion over how arguments get passed from the sketch(global) scope to instances, but that may also be an artefact of perhaps trying to write a lot of code in one go ?
If that's the case, I recommend slowing down, writing one bit of functionality at a time (e.g. one function or class method at a time, running the sketch and testing to ensure it works as expected first). Once a bit of code works as expected to you can move to the next and ideally text combinations of the newly added functionalities to ensure that there no weird interactions between classes. Constantly testing how the code behaves may appear slower, but resting assured the code works as go along is faster on the long run and it definitely makes debugging much easier (likely to be most recently added functionality which is also freshes in your memory). Have fun coding !
Within a MacOS app, I have a pattern defined by a SCNProgram that is mapped to an SCNPlane.
It looks like this:
The shader is supposed to make the rows of triangles shift , like in this video. The video is a screen grab of the same shader running inside of an MTKview.
animated texture
In the SceneKit version of this shader, the shader only animates when I click on the plane within the view.
How do I make the SceneKit view (or Scene?) continually animate the shader all the time? Again this app is on MacOS. I have tried setting
self.gameView!.isPlaying = true
but that doesn't seem to fix the problem
Here is the Metal shader:
#include <metal_stdlib>
using namespace metal;
#include <SceneKit/scn_metal>
struct myPlaneNodeBuffer {
float4x4 modelTransform;
float4x4 modelViewTransform;
float4x4 normalTransform;
float4x4 modelViewProjectionTransform;
float2x3 boundingBox;
};
typedef struct {
float3 position [[ attribute(SCNVertexSemanticPosition) ]];
float2 texCoords [[ attribute(SCNVertexSemanticTexcoord0) ]];
} VertexInput;
static float rand(float2 uv)
{
return fract(sin(dot(uv, float2(12.9898, 78.233))) * 43758.5453);
}
static float2 uv2tri(float2 uv)
{
float sx = uv.x - uv.y / 2;
float sxf = fract(sx);
float offs = step(fract(1 - uv.y), sxf);
return float2(floor(sx) * 2 + sxf + offs, uv.y);
}
struct SimpleVertexWithUV
{
float4 position [[position]];
float2 uv;
};
vertex SimpleVertexWithUV trianglequiltVertex(VertexInput in [[ stage_in ]],
constant SCNSceneBuffer& scn_frame [[buffer(0)]],
constant myPlaneNodeBuffer& scn_node [[buffer(1)]])
{
SimpleVertexWithUV vert;
vert.position = scn_node.modelViewProjectionTransform * float4(in.position, 1.0);
vert.uv = in.texCoords;
return vert;
}
fragment float4 trianglequiltFragment(SimpleVertexWithUV in [[stage_in]],
constant SCNSceneBuffer& scn_frame [[buffer(0)]],
constant myPlaneNodeBuffer& scn_node [[buffer(1)]])
{
float4 fragColor;
float2 uv = in.uv*10;
float timer = scn_frame.time;
uv.y += timer;
float t = timer * 0.8;
float tc = floor(t);
float tp = smoothstep(0, 0.8, fract(t));
float2 r1 = float2(floor(uv.y), tc);
float2 r2 = float2(floor(uv.y), tc + 1);
float offs = mix(rand(r1), rand(r2), tp);
uv.x += offs * 8;
float2 p = uv2tri(uv);
float ph = rand(floor(p)) * 6.3 + p.y * 0.2;
float c = abs(sin(ph + timer));
fragColor = float4(c, c, c, 1);
return(fragColor);
}
here is the view controller:
import SceneKit
import QuartzCore
class GameViewController: NSViewController {
#IBOutlet weak var gameView: GameView!
override func awakeFromNib(){
super.awakeFromNib()
// create a new scene
let scene = SCNScene()
Swift.print(gameView.colorPixelFormat.rawValue)
// create and add a camera to the scene
let cameraNode = SCNNode()
cameraNode.camera = SCNCamera()
scene.rootNode.addChildNode(cameraNode)
// place the camera
cameraNode.position = SCNVector3(x: 0, y: 0, z: 15)
// turn off default lighting
self.gameView!.autoenablesDefaultLighting = false
// set the scene to the view
self.gameView!.scene = scene
// allows the user to manipulate the camera
self.gameView!.allowsCameraControl = true
// show statistics such as fps and timing information
self.gameView!.showsStatistics = true
// configure the view
self.gameView!.backgroundColor = NSColor.black
// play it always?
self.gameView!.isPlaying = true
var geometry:SCNGeometry
geometry = SCNPlane(width:10, height:10)
let geometryNode = SCNNode(geometry: geometry)
let program = SCNProgram()
program.fragmentFunctionName = "trianglequiltFragment"
program.vertexFunctionName = "trianglequiltVertex"
let gradientMaterial = SCNMaterial()
gradientMaterial.program = program
gradientMaterial.specular.contents = NSColor.black
gradientMaterial.locksAmbientWithDiffuse = true
geometry.materials = [gradientMaterial]
geometryNode.geometry?.firstMaterial?.lightingModel = .constant
scene.rootNode.addChildNode(geometryNode)
}
}
Try setting:
gameView?.rendersContinuously = true
(You don’t need all those extra ‘self.’s either.)
I am rendering H264 video frames from an IP camera which are decoded into BGRA32 pixel format via DirectX 11 and SharpDx in WPF via D3DImage control.
After lot of research and looking at various samples and examples I have managed to get it finally got it to render H264 frames with DirectX 11
My current setup involves setting setting up vertex shader and pixel shader as below:
var device = this.Device;
var context = device.ImmediateContext;
// Compile Vertex and Pixel shaders
vertexShaderByteCode = ShaderBytecode.CompileFromFile("TriangleShader.fx", "VSMain", "vs_5_0", ShaderFlags.Debug, EffectFlags.None);
vertexShader = new VertexShader(device, vertexShaderByteCode);
pixelShaderByteCode = ShaderBytecode.CompileFromFile("TriangleShader.fx", "PSMain", "ps_5_0", ShaderFlags.Debug, EffectFlags.None);
pixelShader = new PixelShader(device, pixelShaderByteCode);
// Layout from VertexShader input signature
// An input layout is the layout of the data containing the location and properties of a vertex. It would be a format of data that you can modify and set according
layout = new InputLayout(device, ShaderSignature.GetInputSignature(vertexShaderByteCode), new[] {
new InputElement("SV_Position", 0, Format.R32G32B32A32_Float, 0, 0),
new InputElement("COLOR", 0, Format.R32G32_Float, 16, 0),
new InputElement("TEXCOORD", 0, Format.R32G32_Float, 32, 0),
});
// Write vertex data to a datastream
var stream = new DataStream(Utilities.SizeOf<Vertex>() * 6, true, true);
int iWidth = (int)this.ActualWidth;
int iHeight = (int)this.ActualHeight;
float top = iWidth / 2;
float bottom = iHeight / 2;
stream.WriteRange(new[]
{
new Vertex(
new Vector4(-top, bottom, 0.5f, 0.0f), // position top-center
new Color4(0.0f, 0.0f, 0.0f, 0.0f), // color top-center (r,g,b,alpha)
new Vector2(0f,0f)),
new Vertex(
new Vector4(top, bottom, 0.5f, 0.0f), // position top-right
new Color4(0.0f, 0.0f, 0.0f, 0.0f), // color top-right (r,g,b,alpha)
new Vector2(iWidth,iHeight)),
new Vertex(
new Vector4(-top, -bottom, 0.5f, 0.0f), // position bottom-left
new Color4(0.0f, 0.0f, 0.0f, 0.0f), // color bottom-left (r,g,b,alpha)
new Vector2(iWidth,iHeight)),
new Vertex(
new Vector4(-top, -bottom, 0.5f, 0.0f), // position bottom-right
new Color4(0.0f, 0.0f, 0.0f, 0.0f), // color bottom-left (r,g,b,alpha)
new Vector2(iWidth,0f)),
new Vertex(
new Vector4(top, -bottom, 0.5f, 0.0f), // position bottom-right
new Color4(0.0f, 0.0f, 0.0f, 0.0f), // color bottom-right (r,g,b,alpha)
new Vector2(iWidth,iHeight)),
new Vertex(
new Vector4(top, bottom, 0.5f, 0.0f), // position top-right
new Color4(0.0f, 0.0f, 0.0f, 0.0f), // color top-right (r,g,b,alpha)
new Vector2(0f, iHeight)),
});
stream.Position = 0;
// Instantiate Vertex buiffer from vertex data
//
vertices = new SharpDX.Direct3D11.Buffer(device, stream, new BufferDescription()
{
BindFlags = BindFlags.VertexBuffer,
CpuAccessFlags = CpuAccessFlags.None,
OptionFlags = ResourceOptionFlags.None,
SizeInBytes = Utilities.SizeOf<Vertex>() * 6,
Usage = ResourceUsage.Default,
StructureByteStride = 0
});
stream.Dispose();
// Prepare All the stages
context.InputAssembler.InputLayout = (layout);
context.InputAssembler.PrimitiveTopology = (PrimitiveTopology.TriangleStrip);
context.InputAssembler.SetVertexBuffers(0, new VertexBufferBinding(vertices, Utilities.SizeOf<Vertex>(), 0));
context.VertexShader.Set(vertexShader);
context.PixelShader.Set(pixelShader);
context.OutputMerger.SetTargets(m_RenderTargetView);
When ever i receive a new frame then I update the RenderTexture by mapping the resource as below:
device.ImmediateContext.ClearRenderTargetView(this.m_RenderTargetView, Color4.Black);
Texture2DDescription colordesc = new Texture2DDescription
{
BindFlags = BindFlags.ShaderResource,
Format = m_PixelFormat,
Width = iWidth,
Height = iHeight,
MipLevels = 1,
SampleDescription = new SampleDescription(1, 0),
Usage = ResourceUsage.Dynamic,
OptionFlags = ResourceOptionFlags.None,
CpuAccessFlags = CpuAccessFlags.Write,
ArraySize = 1
};
Texture2D newFrameTexture = new Texture2D(this.Device, colordesc);
DataStream dtStream = null;
DataBox dBox = Device.ImmediateContext.MapSubresource(newFrameTexture, 0, MapMode.WriteDiscard, 0, out dtStream);
if (dtStream != null)
{
int iRowPitch = dBox.RowPitch;
for (int iHeightIndex = 0; iHeightIndex < iHeight; iHeightIndex++)
{
//Copy the image bytes to Texture
// we write row strides multiplies by bytes per pixel
// as our case is bgra32 which is 4 bytes
dtStream.Position = iHeightIndex * iRowPitch;
Marshal.Copy(decodedData, iHeightIndex * iWidth * 4, new IntPtr(dtStream.DataPointer.ToInt64() + iHeightIndex * iRowPitch), iWidth * 4);
}
}
Device.ImmediateContext.UnmapSubresource(newFrameTexture, 0);
Texture2D srcTexture = m_RenderTargetView.ResourceAs<Texture2D>();
Device.ImmediateContext.CopySubresourceRegion(newFrameTexture, 0, null, this.RenderTarget, 0);
Device.ImmediateContext.Draw(6, 0);
Device.ImmediateContext.Flush();
this.D3DSurface.InvalidateD3DImage();
Disposer.SafeDispose(ref newFrameTexture);
My Effects/HLSL file:
Texture2D ShaderTexture : register(t0);
SamplerState Sampler : register(s0);
cbuffer PerObject: register(b0)
{
float4x4 WorldViewProj;
};
struct VertexShaderInput
{
float4 Position : SV_Position;
float4 Color : COLOR;
float2 TextureUV : TEXCOORD0;
};
struct VertexShaderOutput
{
float4 Position : SV_Position;
float4 Color : COLOR;
float2 TextureUV : TEXCOORD0;
};
VertexShaderOutput VSMain(VertexShaderInput input)
{
VertexShaderOutput output = (VertexShaderOutput)0;
output.Position = mul(input.Position, WorldViewProj);
output.Color = mul(input.Color, WorldViewProj);
output.TextureUV = input.TextureUV;
return output;
}
float4 PSMain(VertexShaderOutput input) : SV_Target
{
return ShaderTexture.Sample(Sampler, input.TextureUV).rgb;
}
Images are rendered correctly but it seem they are opaque if the background color of the parent color is anything other than black.
Now i can't seem to figure out what exactly is going on here which is rendering my texture as transparent.
I have also tried to render when the parent grid control has its background as a picture of a koala as shown below:
Any help in this matter would be much appreciated.
I downloaded the project from https://github.com/antonholmquist/rend-ios.
I run this project the teapot rotating 360 degree full rotation,But not stop the rotation and touches not rotate the teapot,So i work for stop the rotation it worked properly and after rotation stopped touches to move the teapot i will try this below code:
- (void)touchesMoved:(NSSet *)touches withEvent:(UIEvent *)event;
{
CGPoint currentMovementPosition = [[touches anyObject] locationInView:glView_];
[self renderByRotatingAroundX:(lastMovementPosition.x - currentMovementPosition.x)
rotatingAroundY: (lastMovementPosition.y - currentMovementPosition.y) scaling:1.0f translationInX:0.0f translationInY:0.0f];
lastMovementPosition = currentMovementPosition;
}
- (void)renderByRotatingAroundX:(float)xRotation rotatingAroundY:(float)yRotation scaling:(float)scaleF translationInX:(float)xTranslation translationInY:(float)yTranslation{
currentCalculatedMatrix = CATransform3DIdentity;
currentCalculatedMatrix = CATransform3DTranslate(currentCalculatedMatrix, 0.0, -0.2, 0.0);
currentCalculatedMatrix = CATransform3DScale(currentCalculatedMatrix, 4.5, 4.5 * (320.0/480.0), 4.5);
glClearColor(0.0f,0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
GLfloat currentModelViewMatrix[16];
// Perform incremental rotation based on current angles in X and Y
if ((xRotation != 0.0) || (yRotation != 0.0))
{
GLfloat totalRotation = sqrt(xRotation*xRotation + yRotation*yRotation);
CATransform3D temporaryMatrix = CATransform3DRotate(currentCalculatedMatrix, totalRotation * M_PI / 180.0,
((xRotation/totalRotation) * currentCalculatedMatrix.m12 + (yRotation/totalRotation) * currentCalculatedMatrix.m11),
((xRotation/totalRotation) * currentCalculatedMatrix.m22 + (yRotation/totalRotation) * currentCalculatedMatrix.m21),
((xRotation/totalRotation) * currentCalculatedMatrix.m32 + (yRotation/totalRotation) * currentCalculatedMatrix.m31));
if ((temporaryMatrix.m11 >= -100.0) && (temporaryMatrix.m11 <= 100.0))
currentCalculatedMatrix = temporaryMatrix;
}
else
{
}
// Draw the teapot model
[self convert3DTransform:¤tCalculatedMatrix toMatrix:currentModelViewMatrix];
[plainDisplayProgram use];
glUniformMatrix4fv(plainDisplayModelViewMatrix, 1, 0, currentModelViewMatrix);
glVertexAttribPointer(plainDisplayPositionAttribute, 3, GL_FLOAT, 0, 0, cube_vertices);
glEnableVertexAttribArray(plainDisplayPositionAttribute);
NSLog(#"posit:%d,matrix=%d",plainDisplayPositionAttribute,plainDisplayModelViewMatrix);
//Draw teapot. The new_teapot_indicies array is an RLE (run-length encoded) version of the teapot_indices array in teapot.h
for(int i = 0; i < num_cube_indices; i += cube_indices[i] + 1)
{
NSLog(#"count:%d,i=%d",num_cube_indices,i);
glDrawElements(GL_TRIANGLES, cube_indices[i], GL_UNSIGNED_SHORT, &cube_indices[i + 1]);
}
[glView_ presentFramebuffer];
}
- (BOOL)presentFramebuffer
{
BOOL success = FALSE;
if ([EAGLContext currentContext])
{
#ifdef MSAA
glBindFramebuffer(GL_READ_FRAMEBUFFER_APPLE, multisampleFramebuffer);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER_APPLE,framebuffer);
glResolveMultisampleFramebufferAPPLE();
#endif
glBindRenderbuffer(GL_RENDERBUFFER, colorRenderbuffer);
success = [[EAGLContext currentContext] presentRenderbuffer:GL_RENDERBUFFER];
#ifdef MSAA
glBindFramebuffer(GL_FRAMEBUFFER, multisampleFramebuffer);
#endif
}
return success;
}
Move the teapot it will not move, only blackcolor screen shown.
How to show the teapot model for touches moved?
Using touches moved, it will rotate clearly in all directions. try this.
xangle and yangle are globally declared and initialised as 0.0.
- (void)touchesMoved:(NSSet *)touches withEvent:(UIEvent *)event
{
UITouch * touch = [touches anyObject];
NSLog(#"Touch Returns : %#",touches);
CGPoint location = [touch locationInView:glView_];
CGPoint lastLoc = [touch previousLocationInView:glView_];
CGPoint diff = CGPointMake(lastLoc.x - location.x, lastLoc.y - location.y);
float rotX = 1 * DegreesToRadians(diff.y / 0.2);
float rotY = 1 * DegreesToRadians(diff.x / 0.2);
xAngle += rotX;
yAngle += rotY;
teapotNode_.rotation = CC3VectorMake(xAngle, yAngle, 0);
director_.running = YES;
}
I found answer myself https://github.com/antonholmquist/rend-ios in this rend-ios project every button touch move the object model using that code below:
-(void)rightButtonPressed
{
float angle;
angle = teapotNode_.rotationAngle;
angle +=0.4;
director_.running = YES;//Redirector object
if (director_.frameInterval == 2)
{
director_.running = NO;
}
}
its work fine.
I'm doing a surface app where I need to scale my scene (zoom) while the user scales with their fingers (i.e pinching)
Currently I have it working ok, but the issue is that I need to zoom in on the center point between the users fingers.
I have the point, but the maths behind the translation is hard to grasp.
When I apply a ScaleTransform to my Canvas scene, it zooms in on the top left of the canvas, i need it to zoom in on the center point of my pinch gesture (which, again, I do have).
How would the maths for the translation work to keep the zoom to appear to zoom in on the center point of the gesture?
Edit:
This is basically what I have:
void Affine2DManipulationDelta(object sender, Affine2DOperationDeltaEventArgs e)
{
//apply zoom and translate
if (e.ScaleDelta != 1)
ApplyZoom(e);
else if (e.Delta.Length != 0)
ApplyTranslation(e);
}
private void ApplyZoom(Affine2DOperationDeltaEventArgs e)
{
//scale
var newScale = _zoomTransform.ScaleX * e.ScaleDelta;
newScale = GetCappedZoomLevel(newScale);
_zoomTransform.ScaleX = newScale;
_zoomTransform.ScaleY = newScale;
}
private void ApplyTranslation(Affine2DOperationDeltaEventArgs e)
{
var xDiff = e.ManipulationOrigin.X - _screenStartPoint.X;
var yDiff = e.ManipulationOrigin.Y - _screenStartPoint.Y;
var translateX = xDiff + _startOffset.X;
var translateY = yDiff + _startOffset.Y;
//bounds testing to limit translation
var rect = new Rect(0.0, 0.0, ZoomCanvas.RenderSize.Width, ZoomCanvas.RenderSize.Height);
Rect bounds = ZoomCanvas.TransformToAncestor(MainViewportCanvas).TransformBounds(rect);
if (CanTranslateX(translateX, bounds))
_translateTransform.X = translateX;
if (CanTranslateY(translateY, bounds))
_translateTransform.Y = translateY;
}
Pretty basic really, but it works to a point...
_zoomTransform is a ScaleTransform, and _translateTransform is a TranslateTransform
MainViewport is a canvas that contains ZoomCanvas which is the canvas that I apply the transforms to.
Here is my implementation that ended up working just fine
private void ApplyZoom(Affine2DOperationDeltaEventArgs e)
{
//scale
var newScale = _zoomTransform.ScaleX * e.ScaleDelta;
newScale = GetCappedZoomLevel(newScale);
var pinchPoint = e.ManipulationOrigin;
DoZoom(newScale, _transformGroup.Inverse.Transform(pinchPoint), pinchPoint);
}
private void DoZoom(double newScale, Point pinchPosition, Point physicalPosition)
{
_translateTransform.X = -1 * (pinchPosition.X * newScale - physicalPosition.X);
_translateTransform.Y = -1 * (pinchPosition.Y * newScale - physicalPosition.Y);
_zoomTransform.ScaleX = newScale;
_zoomTransform.ScaleY = newScale;
}
I had to do exactly this myself. Basically, a Surface control that can host arbitrary content and allow the user to pan and zoom (using gestures). The handler for my maniplation processor's Affine2DManipulationDelta is shown below. Hopefully it's fairly self-explanatory and gets you where you need to be.
private void OnManipulationDelta(object sender, Affine2DOperationDeltaEventArgs e)
{
Debug.Assert(_scrollViewer != null);
if (CanScale && _scrollViewer != null && e.ScaleDelta != 1)
{
var scrollViewerContent = _scrollViewer.Content as UIElement;
//can't do anything if the content isn't present and isn't a UIElement
if (scrollViewerContent != null)
{
var newScale = Scale * e.ScaleDelta;
newScale = Math.Max(MinScale, newScale);
newScale = Math.Min(MaxScale, newScale);
var origin = e.ManipulationOrigin;
var pointInContent = _scrollViewer.TranslatePoint(origin, scrollViewerContent);
var deltaScale = newScale - Scale;
//width and height changes across the whole image
var deltaWidth = deltaScale * _scrollViewer.ExtentWidth;
var deltaHeight = deltaScale * _scrollViewer.ExtentHeight;
//width and height changes relative to the point in the scroll viewer's content
deltaWidth = (pointInContent.X / _scrollViewer.ExtentWidth) * deltaWidth;
deltaHeight = (pointInContent.Y / _scrollViewer.ExtentHeight) * deltaHeight;
_offset = Vector.Add(_offset, new Vector(deltaWidth, deltaHeight));
var centerPoint = new Point(origin.X + deltaWidth, origin.Y + deltaHeight);
centerPoint.Offset(_offset.X, _offset.Y);
Scale = newScale;
HorizontalOffset = _scrollViewer.HorizontalOffset + deltaWidth;
VerticalOffset = _scrollViewer.VerticalOffset + deltaHeight;
}
}
else if (CanPan && e.Delta.Length != 0)
{
var newHorizontalOffset = _scrollViewer.HorizontalOffset + (e.Delta.X * -1);
var newVerticalOffset = _scrollViewer.VerticalOffset + (e.Delta.Y * -1);
newHorizontalOffset = Math.Max(0, newHorizontalOffset);
newVerticalOffset = Math.Max(0, newVerticalOffset);
HorizontalOffset = newHorizontalOffset;
VerticalOffset = newVerticalOffset;
}
}