Scenekit transparency over black background - scenekit

I'm currently learning how to use scenekit, and am having issues with transparent objects. I've written a shader to increase transparency when looking faces head on, and everything works as expected when I choose a white background... transparency with white background
but when I choose a black background the object acts as if it's completely opaque transparency with black background
Here's my view controller code
import Cocoa
import SceneKit
import SceneKit.ModelIO
struct shaderSettings {
var Color: vector_float4
var minTransparency: Float
}
class ViewController: NSViewController {
var scnView: SCNView!
var scnScene: SCNScene!
override func viewDidLoad() {
super.viewDidLoad()
scnView = self.view as! SCNView
scnScene = SCNScene()
scnView.scene = scnScene
scnView.allowsCameraControl = true
scnView.backgroundColor = NSColor.black
//load mesh
let url = URL(fileURLWithPath: "monkey.obj")
let monkey = MDLAsset(url: url).object(at: 0)
let node = SCNNode(mdlObject: monkey)
node.name = "monkey"
//use metal shader
let program = SCNProgram();
program.vertexFunctionName = "vert";
program.fragmentFunctionName = "frag";
program.isOpaque = false
//setup material
var s = shaderSettings(Color: vector_float4(1, 0, 0, 1) ,minTransparency: 0.3)
let mat = SCNMaterial();
mat.program = program;
mat.setValue(NSData(bytes: &s, length: MemoryLayout<shaderSettings>.stride), forKey: "s")
mat.blendMode = SCNBlendMode.alpha
mat.writesToDepthBuffer = false
mat.readsFromDepthBuffer = false
mat.cullMode = SCNCullMode.back
//create node
node.geometry!.firstMaterial = mat
scnScene.rootNode.addChildNode(node)
}
override func viewDidDisappear() {
//quit when window closes
exit(0)
}
}
and, while I don't think the issue's in here, here's my shader program anyway
#include <metal_stdlib>
using namespace metal;
#include <metal_geometric>
#include <metal_common>
#include <SceneKit/scn_metal>
struct settings {
float4 Color;
float minTransparency;
};
typedef struct {
float4 pos [[ attribute(SCNVertexSemanticPosition) ]];
float4 norm [[ attribute(SCNVertexSemanticNormal) ]];
} Input;
typedef struct {
float4 pos [[position]];
float3 camDir;
float3 norm;
} v2f;
struct nodeBuffer {
float4x4 modelViewProjectionTransform;
float4x4 modelViewTransform;
float4x4 normalTransform;
};
vertex v2f vert(Input in [[ stage_in ]],
constant SCNSceneBuffer& scn_frame [[buffer(0)]],
constant nodeBuffer& scn_node [[buffer(1)]]) {
v2f o;
o.pos = scn_node.modelViewProjectionTransform * in.pos;
o.norm = normalize(float3(scn_node.normalTransform * in.norm));
o.camDir = normalize(float3(scn_node.modelViewTransform*in.pos));
return o;
}
fragment half4 frag(v2f in [[stage_in]], constant settings& s [[buffer(2)]]) {
float nDotCam = abs(dot(float3(in.norm), float3(-in.camDir)));
half4 col;
col.rgb = half3(s.Color.rgb);
col.a = half(mix(s.Color.a, s.minTransparency, nDotCam));
return col;
}
Thanks for your time!

I figured it out. I read here that scenekit assumes premultiplied alphas, so I added
col.rgb *= col.a;
before the return in the fragment shader and now it works fine

Related

Cannot create a diffuse lighting shader that works in Unity with a SPOTLIGHT

Code below - I have tried using https://catlikecoding.com/unity/tutorials/rendering/part-5/#5 but I cannot make sense of the tutorials or what they want me to do or make edits when they request them.
Shader "Unlit/DiffuseLighting"
{
Properties
{
_MainTex("Texture", 2D) = "white" {}
_LightPoint("Light Point Position", Vector) = (0, 0, 0, 0)
}
SubShader
{
Tags { "RenderType" = "Opaque" }
LOD 100
Pass
{
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma multi_compile_fog
#pragma multi_compile DIRECTIONAL POINT SPOT
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
float4 normal : NORMAL;
float2 uv : TEXCOORD0;
};
struct v2f
{
float2 uv : TEXCOORD0;
float3 worldNormal : TEXCOORD1;
float3 worldPosition : TEXCOORD2;
UNITY_FOG_COORDS(1)
float4 vertex : SV_POSITION;
};
sampler2D _MainTex;
float4 _MainTex_ST;
float4 _LightPoint;
v2f vert(appdata v)
{
v2f o;
o.worldNormal = UnityObjectToWorldNormal(v.normal);
o.vertex = UnityObjectToClipPos(v.vertex);
o.worldPosition = mul(unity_ObjectToWorld, v.vertex);
o.uv = TRANSFORM_TEX(v.uv, _MainTex);
UNITY_TRANSFER_FOG(o,o.vertex);
return o;
}
fixed4 frag(v2f i) : SV_Target
{
#if defined(POINT)||defined(SPOT)
fixed3 lightDirection = normalize(_WorldSpaceLightPos0.xyz - i.worldPosition);
fixed3 lightDifference = i.worldPosition - _LightPoint.xyz;
fixed intensity = max(-1 * dot(lightDirection, i.worldNormal), 0);
fixed4 col = intensity * tex2D(_MainTex, i.uv);
UNITY_APPLY_FOG(i.fogCoord, col);
return col;
#else
fixed3 lightDifference = i.worldPosition - _LightPoint.xyz;
fixed3 lightDirection = normalize(lightDifference);
fixed intensity = max(-1 * dot(lightDirection, i.worldNormal), 0);
fixed4 col = intensity * tex2D(_MainTex, i.uv);
UNITY_APPLY_FOG(i.fogCoord, col);
return col;
#endif
}
ENDCG
}
}
}
I tried following a tutorial i went to the discord for the tutorial and they redirected me to https://catlikecoding.com/unity/tutorials/rendering/part-5/#5 and now I am stuck and do not know how to make it work with the spotlight in our scene.

Using multiple textures in Direct3D11 with CG

I want to use a 3D texture and a 1D color map in Direct3D11/CG Here is the simple pixel shader code:
struct fourDf_In {
float4 position : POSITION;
float3 texCoord : TEXCOORD0;
};
struct fourDf_Out {
float4 color : COLOR;
};
fourDf_Out fourDf ( fourDf_In input,
const uniform sampler1D ColorMap : TEX0,
const uniform sampler3D USTexture : TEX1
)
{
fourDf_Out o, o1;
float tmp;
tmp = tex3D(USTexture, input.texCoord).r;
o.color = tex1D(ColorMap, tmp); // 2 samplers not working
return o;
}
And this my initialization code:
ID3D11Device *g_pDevice = NULL;
ID3D11Texture1D *myColorMap = NULL;
ID3D11Texture3D *myUSTexture = NULL;
ColorMap = cgGetNamedParameter(myCgFragmentProgram, "ColorMap");
cgD3D11SetTextureParameter( ColorMap, myColorMap );
cgD3D11SetSamplerStateParameter( ColorMap, NULL ); // NULL == default states
USTexture = cgGetNamedParameter(myCgFragmentProgram, "USTexture");
cgD3D11SetTextureParameter(USTexture, myUSTexture);
cgD3D11SetSamplerStateParameter( USTexture, NULL); // NULL == default states
// ---- 1D ColorMap
D3D11_TEXTURE1D_DESC tx1d;
tx1d.Width = ColorMapLength;
tx1d.MipLevels = 1;
tx1d.ArraySize = 1;
tx1d.Format = DXGI_FORMAT_R8G8B8A8_UNORM; //DXGI_FORMAT_R8G8B8A8_UINT; //DXGI_FORMAT_R8G8B8A8_UNORM; //
tx1d.Usage = D3D11_USAGE_DEFAULT;
tx1d.BindFlags = D3D11_BIND_SHADER_RESOURCE;
tx1d.CPUAccessFlags = 0;
tx1d.MiscFlags = 0;
srd = {0};
srd.pSysMem = ColorMapArray;
srd.SysMemPitch = 0; // ColorMapLength*4;
srd.SysMemSlicePitch = 0; //ColorMapLength*4;
hr = g_pDevice->CreateTexture1D(&tx1d, &srd, &myColorMap);
if( hr != S_OK )
return hr;
// ---- 3D texture
D3D11_TEXTURE3D_DESC tx3d;
tx3d.Width = iWidth;
tx3d.Height = iHeight;
tx3d.Depth = iDepth;
tx3d.MipLevels = 1;
tx3d.Format = DXGI_FORMAT_R8_UNORM; //DXGI_FORMAT_R8_UNORM; //;
tx3d.Usage = D3D11_USAGE_DYNAMIC;
tx3d.BindFlags = D3D11_BIND_SHADER_RESOURCE;
tx3d.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
tx3d.MiscFlags = 0;
hr = g_pDevice->CreateTexture3D(&tx3d, NULL, &myUSTexture);
if( hr != S_OK )
return hr;
The 3D and 1D textures work separately, but not together. i.e feeding the output of 3D texture to the 1D ColourMap fails and the image is totally black. I suspect it has something to do with initializing multiple texture units (TEX0, TEX1)?? How do I get to use 2 textures in CG with Direct3D?
Found that in the pixel shader code TEX0 and TEX1 were depreciated keywords and both were pointing to the same sampler.
The correct syntax appears to be:
const uniform sampler1D ColorMap : TEXUNIT0,
const uniform sampler3D USTexture : TEXUNIT1
CG is obsolete and I recommend removing this dependency and port it entirely into DirectX/HLSL.
Ravi Ganesh

Issue retrieving members from objects in array, value of type 'Shape' has no member 'name'

New to Swift, if anyone could point me in the right direction it would be greatly appreciated. I'm attempting to pull member values from an object array and output as strings as follows. This is inside a playground, if that has any effect on the situation.
// Main Class for array building later
class Shape {
}
//Implement the protocol on a Triangle, Square, and Rectangle.
class Triangle:Shape, Polygon {
var a: Float
var b: Float
var c: Float
var name: String = "Triangle"
var sides: Int = 3
init(a: Float, b:Float, c: Float) {
self.a = a
self.b = b
self.c = c
}
func perimiter() -> Float {
return a+b+c
}
func area() -> Float {
let sp = (a+b+c)/2
let area = sqrtf(sp*(sp-a)*(sp-b)*(sp-c))
return area
}
}
class Square:Shape, Polygon {
var a: Float
var name: String = "Square"
var sides: Int = 4
init(a: Float) {
self.a = a
}
func perimiter() -> Float {
return a*4
}
func area() -> Float {
return a*2
}
}
class Rectangle:Shape, Polygon {
var a: Float
var b: Float
var name: String = "Rectangle"
var sides: Int = 4
init(a: Float, b: Float) {
self.a = a
self.b = b
}
func perimiter() -> Float {
return 2*(a*b)
}
func area() -> Float {
return a*b
}
}
protocol Polygon {
var name: String { get }
var sides: Int { get }
func perimiter() -> Float
func area() -> Float
}
//MARK: Create Instances of, return array of
func makeShapes() -> [Shape]{
let s1 = Triangle(a:3,b:4,c:5)
//print(s1.name) works fine here
//print(s1.perimiter().description)
//print(s1.area().description)
let s2 = Triangle(a:6,b:8,c:10)
let s3 = Square(a: 15)
let s4 = Square(a: 25)
let s5 = Rectangle(a:20,b:13)
let s6 = Rectangle(a:17,b:50)
return [s1,s2,s3,s4,s5,s6]
}
func draw (shapes: [Shape]) {
shapes.forEach { shape in
print(shape.name)
}
}
var shapes = makeShapes()
draw(shapes: shapes)
I can see that it's passing the objects around on the right side of the playground, and I can see all of the member values even outside makeShapes(), I just can't get to them to print(), it gives me a "Value of type has no member error. Again, any help or nudges in the right direction as to what I'm guessing is something obvious I'm missing would be greatly appreciated, thanks ahead of time!
The Shape class doesn't have a member called name just as the error describes.
In any case, Shape shouldn't be a class. Instead it should be a protocol.
protocol Shape {
var name: String { get }
}
Or you should be drawing Polygons instead of shapes.
func draw (shapes: [Polygon]) {
shapes.forEach { shape in
print(shape.name)
}
}
var shapes = makeShapes()
draw(shapes: shapes as! [Polygon])

how to continuously animate an SCNProgram shader within an SCNScene / SCNView?

Within a MacOS app, I have a pattern defined by a SCNProgram that is mapped to an SCNPlane.
It looks like this:
The shader is supposed to make the rows of triangles shift , like in this video. The video is a screen grab of the same shader running inside of an MTKview.
animated texture
In the SceneKit version of this shader, the shader only animates when I click on the plane within the view.
How do I make the SceneKit view (or Scene?) continually animate the shader all the time? Again this app is on MacOS. I have tried setting
self.gameView!.isPlaying = true
but that doesn't seem to fix the problem
Here is the Metal shader:
#include <metal_stdlib>
using namespace metal;
#include <SceneKit/scn_metal>
struct myPlaneNodeBuffer {
float4x4 modelTransform;
float4x4 modelViewTransform;
float4x4 normalTransform;
float4x4 modelViewProjectionTransform;
float2x3 boundingBox;
};
typedef struct {
float3 position [[ attribute(SCNVertexSemanticPosition) ]];
float2 texCoords [[ attribute(SCNVertexSemanticTexcoord0) ]];
} VertexInput;
static float rand(float2 uv)
{
return fract(sin(dot(uv, float2(12.9898, 78.233))) * 43758.5453);
}
static float2 uv2tri(float2 uv)
{
float sx = uv.x - uv.y / 2;
float sxf = fract(sx);
float offs = step(fract(1 - uv.y), sxf);
return float2(floor(sx) * 2 + sxf + offs, uv.y);
}
struct SimpleVertexWithUV
{
float4 position [[position]];
float2 uv;
};
vertex SimpleVertexWithUV trianglequiltVertex(VertexInput in [[ stage_in ]],
constant SCNSceneBuffer& scn_frame [[buffer(0)]],
constant myPlaneNodeBuffer& scn_node [[buffer(1)]])
{
SimpleVertexWithUV vert;
vert.position = scn_node.modelViewProjectionTransform * float4(in.position, 1.0);
vert.uv = in.texCoords;
return vert;
}
fragment float4 trianglequiltFragment(SimpleVertexWithUV in [[stage_in]],
constant SCNSceneBuffer& scn_frame [[buffer(0)]],
constant myPlaneNodeBuffer& scn_node [[buffer(1)]])
{
float4 fragColor;
float2 uv = in.uv*10;
float timer = scn_frame.time;
uv.y += timer;
float t = timer * 0.8;
float tc = floor(t);
float tp = smoothstep(0, 0.8, fract(t));
float2 r1 = float2(floor(uv.y), tc);
float2 r2 = float2(floor(uv.y), tc + 1);
float offs = mix(rand(r1), rand(r2), tp);
uv.x += offs * 8;
float2 p = uv2tri(uv);
float ph = rand(floor(p)) * 6.3 + p.y * 0.2;
float c = abs(sin(ph + timer));
fragColor = float4(c, c, c, 1);
return(fragColor);
}
here is the view controller:
import SceneKit
import QuartzCore
class GameViewController: NSViewController {
#IBOutlet weak var gameView: GameView!
override func awakeFromNib(){
super.awakeFromNib()
// create a new scene
let scene = SCNScene()
Swift.print(gameView.colorPixelFormat.rawValue)
// create and add a camera to the scene
let cameraNode = SCNNode()
cameraNode.camera = SCNCamera()
scene.rootNode.addChildNode(cameraNode)
// place the camera
cameraNode.position = SCNVector3(x: 0, y: 0, z: 15)
// turn off default lighting
self.gameView!.autoenablesDefaultLighting = false
// set the scene to the view
self.gameView!.scene = scene
// allows the user to manipulate the camera
self.gameView!.allowsCameraControl = true
// show statistics such as fps and timing information
self.gameView!.showsStatistics = true
// configure the view
self.gameView!.backgroundColor = NSColor.black
// play it always?
self.gameView!.isPlaying = true
var geometry:SCNGeometry
geometry = SCNPlane(width:10, height:10)
let geometryNode = SCNNode(geometry: geometry)
let program = SCNProgram()
program.fragmentFunctionName = "trianglequiltFragment"
program.vertexFunctionName = "trianglequiltVertex"
let gradientMaterial = SCNMaterial()
gradientMaterial.program = program
gradientMaterial.specular.contents = NSColor.black
gradientMaterial.locksAmbientWithDiffuse = true
geometry.materials = [gradientMaterial]
geometryNode.geometry?.firstMaterial?.lightingModel = .constant
scene.rootNode.addChildNode(geometryNode)
}
}
Try setting:
gameView?.rendersContinuously = true
(You don’t need all those extra ‘self.’s either.)

SharpDX Constant/Texture Buffers Don't Work

I've been trying to get the constant/texture buffers to work in SharpDX (it's just like SlimDX), but the data I put in it doesn't seem to get into the shaders.
I've looked up how to do it and followed examples but I just can't get it to work.
Ultimately I will need to be able to input multiple large arrays of various data types into my shaders, so if anyone can give me a working example that can do that, it would be great!
But for now I've written a simple example that I've tried to test, and I just can't get it to work. Usually I can at least get something to display when I draw a triangle but right now it won't even do that.
That's probably a silly mistake I overlooked, but anyway, if someone could just take a look at it and point out what's wrong, or better yet, fix it and post the updated code (it is complete and should compile).
I'm sorry if the code is long but I tried to make it as simple as possible. Anyway, here it is:
using SharpDX;
using SharpDX.Direct3D;
using SharpDX.Direct3D11;
using SharpDX.DXGI;
using SharpDX.Windows;
using SharpDX.D3DCompiler;
using System;
using System.Collections.Generic;
using System.Drawing;
using System.Linq;
using System.Runtime.InteropServices;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
namespace test_namespace
{
class Test
{
[StructLayout(LayoutKind.Explicit, Size = 80, Pack = 16)]
public struct Data
{
[FieldOffset(0)]
public Matrix mat;
[FieldOffset(64)]
public Vector4 testColor;
}
[StructLayout(LayoutKind.Explicit)]
public struct Point
{
[FieldOffset(0)]
public Vector4 pos;
[FieldOffset(16)]
public Vector2 tex;
}
int width = 1000;
int height = 1000;
const int vertSize = 6 * sizeof(float);
RenderForm form;
PictureBox pic;
SharpDX.Direct3D11.Device dev;
DeviceContext dc;
SwapChainDescription scd;
SwapChain sc;
RasterizerStateDescription rsd;
RasterizerState rs;
Viewport vp;
Texture2DDescription depthDesc;
DepthStencilView dsv;
RenderTargetView rtv;
SharpDX.Direct3D11.Buffer buffer;
InputLayout il;
VertexShader vs;
ShaderBytecode vsCode;
PixelShader ps;
ShaderBytecode psCode;
Matrix view;
Matrix proj;
Matrix mat;
Data data;
DataStream pointStream;
SharpDX.Direct3D11.Buffer pointBuffer;
public Test()
{
init();
initMat();
data.testColor = new Vector4(1.0f, 0.5f, 0.25f, 0.0f);
string code = "struct vert { float4 pos : POSITION; float2 tex : TEXCOORD; };\n"
+ "struct pix { float4 pos : SV_POSITION; float2 tex : TEXCOORD; };\n"
+ "cbuffer buf1 : register(b0) { float4x4 mat; float4 testColor; }\n"
+ "pix VS(vert vertIn) { pix pixOut = (pix)0; pixOut.pos = mul(vertIn.pos, mat); pixOut.tex = vertIn.tex; return pixOut; }\n"
+ "float4 PS(pix pixIn) : SV_Target { return testColor; }";
vsCode = ShaderBytecode.Compile(code, "VS", "vs_5_0");
vs = new VertexShader(dev, vsCode);
psCode = ShaderBytecode.Compile(code, "PS", "ps_5_0");
ps = new PixelShader(dev, psCode);
dc.VertexShader.Set(vs);
dc.PixelShader.Set(ps);
il = new InputLayout(dev, ShaderSignature.GetInputSignature(vsCode),
new InputElement[] {new InputElement("POSITION", 0, Format.R32G32B32_Float, 0, 0),
new InputElement("TEXCOORD", 0, Format.R32G32_Float, 16, 0)});
dc.InputAssembler.InputLayout = il;
dc.InputAssembler.PrimitiveTopology = PrimitiveTopology.TriangleList;
updateBuffer();
RenderLoop.Run(form, () =>
{
dc.ClearDepthStencilView(dsv, DepthStencilClearFlags.Depth, 1.0f, 0);
dc.ClearRenderTargetView(rtv, Color4.Black);
float dist = 10.0f;
draw(new Vector3(-dist, -dist, dist), Vector2.Zero, new Vector3(-dist, dist, dist), Vector2.UnitY,
new Vector3(dist, dist, dist), Vector2.One);
});
}
void init()
{
form = new RenderForm();
form.ClientSize = new System.Drawing.Size(width, height);
form.BackColor = System.Drawing.Color.Black;
form.FormClosed += form_FormClosed;
pic = new PictureBox();
pic.Location = new System.Drawing.Point(0, 0);
pic.Size = new Size(width, height);
pic.Show();
form.Controls.Add(pic);
scd = new SwapChainDescription();
scd.BufferCount = 1;
scd.Flags = SwapChainFlags.AllowModeSwitch;
scd.IsWindowed = true;
scd.ModeDescription = new ModeDescription(width, height, new Rational(60, 1), Format.R8G8B8A8_UNorm);
scd.OutputHandle = pic.Handle;
scd.SampleDescription = new SampleDescription(1, 0);
scd.SwapEffect = SwapEffect.Discard;
scd.Usage = Usage.RenderTargetOutput;
rsd = new RasterizerStateDescription();
rsd.CullMode = CullMode.None;
rsd.DepthBias = 0;
rsd.DepthBiasClamp = 0;
rsd.FillMode = FillMode.Solid;
rsd.IsAntialiasedLineEnabled = true;
rsd.IsDepthClipEnabled = true;
rsd.IsFrontCounterClockwise = false;
rsd.IsMultisampleEnabled = true;
rsd.IsScissorEnabled = false;
rsd.SlopeScaledDepthBias = 0;
SharpDX.Direct3D11.Device.CreateWithSwapChain(DriverType.Hardware, DeviceCreationFlags.Debug, scd, out dev, out sc);
rs = new RasterizerState(dev, rsd);
vp = new Viewport(0, 0, width, height, 0.0f, 1.0f);
dc = dev.ImmediateContext;
dc.Rasterizer.State = rs;
dc.Rasterizer.SetViewports(vp);
depthDesc = new Texture2DDescription();
depthDesc.ArraySize = 1;
depthDesc.BindFlags = BindFlags.DepthStencil;
depthDesc.CpuAccessFlags = CpuAccessFlags.None;
depthDesc.Format = Format.D32_Float_S8X24_UInt;
depthDesc.Height = height;
depthDesc.MipLevels = 1;
depthDesc.OptionFlags = ResourceOptionFlags.None;
depthDesc.SampleDescription = new SampleDescription(1, 0);
depthDesc.Usage = ResourceUsage.Default;
depthDesc.Width = width;
dsv = new DepthStencilView(dev, new Texture2D(dev, depthDesc));
rtv = new RenderTargetView(dev, (SharpDX.Direct3D11.Resource)SharpDX.Direct3D11.Resource.FromSwapChain<Texture2D>(sc, 0));
dc.OutputMerger.SetTargets(dsv, rtv);
buffer = new SharpDX.Direct3D11.Buffer(dev, Marshal.SizeOf(typeof(Data)),
ResourceUsage.Default, BindFlags.ConstantBuffer, CpuAccessFlags.None, ResourceOptionFlags.None, 0);
dc.VertexShader.SetConstantBuffer(0, buffer);
}
void initMat()
{
view = Matrix.LookAtLH(Vector3.Zero, Vector3.UnitZ, Vector3.UnitY);
proj = Matrix.PerspectiveFovLH((float)Math.PI / 4.0f, (float)width / (float)height, 0.001f, 10000.0f);
mat = view * proj;
mat.Transpose();
data.mat = mat;
}
void updateBuffer()
{
dc.UpdateSubresource<Data>(ref data, buffer);
}
public void draw(Vector3 p1, Vector2 t1, Vector3 p2, Vector2 t2, Vector3 p3, Vector2 t3)
{
Vector3[] p = new Vector3[3] {p1, p2, p3};
Vector2[] t = new Vector2[3] {t1, t2, t3};
Point[] points = new Point[3];
for(int i = 0; i < 3; i++)
{
points[i] = new Point();
points[i].pos = new Vector4(p[i].X, p[i].Y, p[i].Z, 1.0f);
points[i].tex = new Vector2(t[i].X, t[i].Y);
}
using(pointStream = new DataStream(vertSize * 3, true, true))
{
pointStream.WriteRange<Point>(points);
using(pointBuffer = new SharpDX.Direct3D11.Buffer(dev, pointStream, vertSize * 3,
ResourceUsage.Default, BindFlags.VertexBuffer, CpuAccessFlags.None, ResourceOptionFlags.None, 0))
{
dc.InputAssembler.PrimitiveTopology = PrimitiveTopology.TriangleList;
dc.InputAssembler.SetVertexBuffers(0, new VertexBufferBinding(pointBuffer, vertSize, 0));
dc.Draw(3, 0);
}
}
}
void form_FormClosed(object sender, FormClosedEventArgs e)
{
buffer.Dispose();
il.Dispose();
ps.Dispose();
psCode.Dispose();
vs.Dispose();
vsCode.Dispose();
rtv.Dispose();
dsv.Dispose();
dc.ClearState();
dc.Flush();
dc.Dispose();
dev.Dispose();
sc.Dispose();
}
}
}
Also, here is the shader code formatted in a more readable way:
struct vert
{
float4 pos : POSITION;
float2 tex : TEXCOORD;
};
struct pix
{
float4 pos : SV_POSITION;
float2 tex : TEXCOORD;
};
cbuffer buf1 : register(b0)
{
float4x4 mat;
float4 testColor;
}
pix VS(vert vertIn)
{
pix pixOut = (pix)0;
pixOut.pos = mul(vertIn.pos, mat);
pixOut.tex = vertIn.tex;
return out;
}
float4 PS(pix pixIn) : SV_Target
{
return testColor;
}
I'm not sure if this is of any help here, but why go UpdateSubresource in your updateBuffer()? In the SharpDXTutorial/Tutorial16 (the cubemap example) the buffer is initialized with the "device" object,
device.UpdateData<Data>(dataConstantBuffer, sceneInformation);
This is a very handy object. It is contained in SharpHelper, part of SharpDXTutorial,
https://github.com/RobyDX/SharpDX_Demo/blob/master/SharpDXTutorial/SharpHelper/SharpHelper.csproj
.. maybe it takes care of stuff missed, to update the constant buffer?

Resources