Using multiple textures in Direct3D11 with CG - c

I want to use a 3D texture and a 1D color map in Direct3D11/CG Here is the simple pixel shader code:
struct fourDf_In {
float4 position : POSITION;
float3 texCoord : TEXCOORD0;
};
struct fourDf_Out {
float4 color : COLOR;
};
fourDf_Out fourDf ( fourDf_In input,
const uniform sampler1D ColorMap : TEX0,
const uniform sampler3D USTexture : TEX1
)
{
fourDf_Out o, o1;
float tmp;
tmp = tex3D(USTexture, input.texCoord).r;
o.color = tex1D(ColorMap, tmp); // 2 samplers not working
return o;
}
And this my initialization code:
ID3D11Device *g_pDevice = NULL;
ID3D11Texture1D *myColorMap = NULL;
ID3D11Texture3D *myUSTexture = NULL;
ColorMap = cgGetNamedParameter(myCgFragmentProgram, "ColorMap");
cgD3D11SetTextureParameter( ColorMap, myColorMap );
cgD3D11SetSamplerStateParameter( ColorMap, NULL ); // NULL == default states
USTexture = cgGetNamedParameter(myCgFragmentProgram, "USTexture");
cgD3D11SetTextureParameter(USTexture, myUSTexture);
cgD3D11SetSamplerStateParameter( USTexture, NULL); // NULL == default states
// ---- 1D ColorMap
D3D11_TEXTURE1D_DESC tx1d;
tx1d.Width = ColorMapLength;
tx1d.MipLevels = 1;
tx1d.ArraySize = 1;
tx1d.Format = DXGI_FORMAT_R8G8B8A8_UNORM; //DXGI_FORMAT_R8G8B8A8_UINT; //DXGI_FORMAT_R8G8B8A8_UNORM; //
tx1d.Usage = D3D11_USAGE_DEFAULT;
tx1d.BindFlags = D3D11_BIND_SHADER_RESOURCE;
tx1d.CPUAccessFlags = 0;
tx1d.MiscFlags = 0;
srd = {0};
srd.pSysMem = ColorMapArray;
srd.SysMemPitch = 0; // ColorMapLength*4;
srd.SysMemSlicePitch = 0; //ColorMapLength*4;
hr = g_pDevice->CreateTexture1D(&tx1d, &srd, &myColorMap);
if( hr != S_OK )
return hr;
// ---- 3D texture
D3D11_TEXTURE3D_DESC tx3d;
tx3d.Width = iWidth;
tx3d.Height = iHeight;
tx3d.Depth = iDepth;
tx3d.MipLevels = 1;
tx3d.Format = DXGI_FORMAT_R8_UNORM; //DXGI_FORMAT_R8_UNORM; //;
tx3d.Usage = D3D11_USAGE_DYNAMIC;
tx3d.BindFlags = D3D11_BIND_SHADER_RESOURCE;
tx3d.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
tx3d.MiscFlags = 0;
hr = g_pDevice->CreateTexture3D(&tx3d, NULL, &myUSTexture);
if( hr != S_OK )
return hr;
The 3D and 1D textures work separately, but not together. i.e feeding the output of 3D texture to the 1D ColourMap fails and the image is totally black. I suspect it has something to do with initializing multiple texture units (TEX0, TEX1)?? How do I get to use 2 textures in CG with Direct3D?

Found that in the pixel shader code TEX0 and TEX1 were depreciated keywords and both were pointing to the same sampler.
The correct syntax appears to be:
const uniform sampler1D ColorMap : TEXUNIT0,
const uniform sampler3D USTexture : TEXUNIT1
CG is obsolete and I recommend removing this dependency and port it entirely into DirectX/HLSL.
Ravi Ganesh

Related

Vulkan Memory usage constantly increasing while recording the command buffer

I've been trying to learn Vulkan for 1 week now and I managed to draw a triangle on the screen following the turoial on https://vulkan-tutorial.com. The memory usage increases and doesn't stop and while doing some debuging I figured out it is the RecordCommandBuffer function.
Here is the function that records the command buffer:
void RecordCommandBuffer(VkCommandBuffer commandBuffer, uint32_t imageIndex)
{
VkCommandBufferBeginInfo beginInfo = {0};
beginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
beginInfo.flags = 0;
beginInfo.pInheritanceInfo = NULL;
if (vkBeginCommandBuffer(commandBuffer, &beginInfo) != VK_SUCCESS)
fprintf(stderr, "Failed to begin recording command buffer!");
VkRenderPassBeginInfo renderPassInfo = { 0 };
renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
renderPassInfo.renderPass = application.renderPass;
renderPassInfo.framebuffer = application.swapChainFramebuffers[imageIndex];
renderPassInfo.renderArea.offset = (VkOffset2D){ 0, 0 };
renderPassInfo.renderArea.extent = application.swapChainExtent;
VkClearValue clearColor = { { {0.0f, 0.0f, 0.0f, 1.0f} } };
renderPassInfo.clearValueCount = 1;
renderPassInfo.pClearValues = &clearColor;
vkCmdBeginRenderPass(commandBuffer, &renderPassInfo, VK_SUBPASS_CONTENTS_INLINE);
vkCmdBindPipeline(commandBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, application.graphicsPipeline);
VkViewport viewport = { 0 };
viewport.x = 0.0f;
viewport.y = 0.0f;
viewport.width = (float)application.swapChainExtent.width;
viewport.height = (float)application.swapChainExtent.height;
viewport.minDepth = 0.0f;
viewport.maxDepth = 1.0f;
vkCmdSetViewport(commandBuffer, 0, 1, &viewport);
VkRect2D scissor = { 0 };
scissor.offset = (VkOffset2D){ 0, 0 };
scissor.extent = application.swapChainExtent;
vkCmdSetScissor(commandBuffer, 0, 1, &scissor);
vkCmdDraw(commandBuffer, 3, 1, 0, 0);
vkEndCommandBuffer(commandBuffer);
}
I found out that it could be the vkCmdBeginRenderPass function but I don't know how to fix it.
It turns out I missed the vkCmdEndRenderPass function.
The RenderPass was created but never released.
:)

Can't Pass Array into UBO

I'm trying to initialize a setup in Vulkan where within the Shader there is a uniform array of textures and an corresponding arrays for the textures Width and Heights:
layout(binding=0) uniform UniformBufferObject {
mat4 model; //4x4 array of floats for uniform rotation and positioning
uint texW[32]; //Widths of ith texture
uint texH[32]; //Heights of ith texture
} ubo;
For some reason the shader only reads the mat4 and the first index of the texW. Everything else is initialized to 0.
Here's the Uniform Buffer Setup Code, for reference
void createUniformBuffers() {
/**Uniform Buffer for View Transformation*/
VkDeviceSize bufferSize = sizeof(uniformBufferObject);
uniformBuffers = malloc(swapChainSize * sizeof(VkBuffer));
uniformBuffersMemory = malloc(swapChainSize * sizeof (VkDeviceMemory));
for(uint i = 0; i < swapChainSize; i++)
createBuffer(bufferSize, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, &uniformBuffers[i], &uniformBuffersMemory[i]);
}
void createDescriptorPool() {
VkDescriptorPoolSize poolSizes[4];
poolSizes[0].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
poolSizes[0].descriptorCount = swapChainSize;
poolSizes[1].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
poolSizes[1].descriptorCount = swapChainSize;
VkDescriptorPoolCreateInfo poolInfo;
poolInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
poolInfo.pNext = 0;
poolInfo.flags = 0;
poolInfo.poolSizeCount = 2;
poolInfo.pPoolSizes = poolSizes;
poolInfo.maxSets = swapChainSize;
if(vkCreateDescriptorPool(lDevice, &poolInfo, 0, & descriptorPool)) {
fprintf(stderr, "Failed to create Descriptor Pool\n");
exit(1);
}
}
void createDescriptorSets() {
VkDescriptorSetLayout layouts[swapChainSize];
for(uint i = 0; i < swapChainSize; i++) layouts[i] = descriptorSetLayout;
VkDescriptorSetAllocateInfo allocInfo;
allocInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
allocInfo.pNext = 0;
allocInfo.descriptorPool = descriptorPool;
allocInfo.descriptorSetCount = swapChainSize;
allocInfo.pSetLayouts = layouts;
descriptorSets = malloc(sizeof(VkDescriptorSet) * swapChainSize);
if(vkAllocateDescriptorSets(lDevice, &allocInfo, descriptorSets) ) {
fprintf(stderr,"Failed to allocate descriptor sets\n");
exit(1);
}
for(uint i = 0; i < swapChainSize; i++) {
VkDescriptorBufferInfo bufferInfo;
bufferInfo.buffer = uniformBuffers[i];
bufferInfo.offset = 0;
bufferInfo.range = VK_WHOLE_SIZE; //sizeof(uniformBufferObject);
VkWriteDescriptorSet descriptorWrites[2];
descriptorWrites[0].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptorWrites[0].pNext = 0;
descriptorWrites[0].dstSet = descriptorSets[i];
descriptorWrites[0].dstBinding = 0;
descriptorWrites[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
descriptorWrites[0].descriptorCount = 1;
descriptorWrites[0].pBufferInfo = &bufferInfo;
descriptorWrites[0].pImageInfo = 0;
descriptorWrites[0].pTexelBufferView = 0;
descriptorWrites[0].dstArrayElement = 0;
VkDescriptorImageInfo imageInfo;
imageInfo.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
imageInfo.imageView = textureImageView[0];
imageInfo.sampler= textureSampler;
descriptorWrites[1].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptorWrites[1].pNext = 0;
descriptorWrites[1].dstBinding = 1;
descriptorWrites[1].dstSet = descriptorSets[i];
descriptorWrites[1].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
descriptorWrites[1].descriptorCount = 1;
descriptorWrites[1].pImageInfo = &imageInfo;
descriptorWrites[1].pBufferInfo = 0;
descriptorWrites[1].pTexelBufferView = 0;
descriptorWrites[1].dstArrayElement = 0;
vkUpdateDescriptorSets(lDevice, 2, descriptorWrites,0,0);
}
}
Here's the code where the uniformBuffer is updated,
void updateUniformBuffer(uint currentImage) {
//FTR, I know I probably shouldn't remap each update, optimizing this is on my todo-list
vkMapMemory(lDevice, uniformBuffersMemory[currentImage], 0, sizeof(uniformBufferObject), 0, (void*) &uData);
memcpy(uData, &uniformData, sizeof(uniformBufferObject));
memset(uData->model, 0, sizeof(uData->model));
//Rotating the View, GLSL acknowledges this data
uData->model[2][2] = uData->model[3][3] = 1;
uData->model[0][0] = cosf(angle);
uData->model[0][1] = -sinf(angle);
uData->model[1][1] = cosf(angle);
uData->model[1][0] = sinf(angle);
angle+= 0.05f;
uData->texW[0] = 1024; //<-------GLSL Vertex Shader will acknowledge this =)
uData->texH[0] = 1024; //<-------GLSL Vertex Shader will ignore this =(
uData->texH[0] = 1024; //<-------GLSL Vertex Shader will also ignore this >_<
vkUnmapMemory(lDevice,uniformBuffersMemory[currentImage]);}
Any pointers or advice would be greatly appreciated.

Vulkan Vertex Buffer C

I am attempting to create a vertex buffer in Vulkan using C. When attempting to pass vertex data to the GPU nothing is displayed. Additionally having validation layers on, no errors or warnings are raised. I have tried changing the buffer size or using a different data set and checking the size of the buffers but to no avail.
Posted below is the relevant sections of code for creating the buffer.
Vertex data struct:
typedef struct vertexData {
vec2 pos;
vec3 color;
} vertexData;
Vertex data:
const vertexData vertices[3] = {
{{0.0f,-0.5},{1.0f, 0.0f, 0.0f}},
{{0.5f,0.5},{0.0f, 1.0f, 0.0f}},
{{-0.5f,0.5},{0.0f, 0.0f, 1.0f}},
};
Vertex descriptions:
static VkVertexInputBindingDescription getBindingDescription() {
VkVertexInputBindingDescription bindingDescription ={
.binding = 0,
.stride = sizeof(vertexData),
.inputRate = VK_VERTEX_INPUT_RATE_VERTEX,
};
return bindingDescription;
}
static VkVertexInputAttributeDescription *getAttributeDescriptions() {
VkVertexInputAttributeDescription *attributeDescriptions = malloc(2*sizeof(VkVertexInputAttributeDescription));
attributeDescriptions[0].binding = 0;
attributeDescriptions[0].location = 0;
attributeDescriptions[0].format = VK_FORMAT_R32G32_SFLOAT;
attributeDescriptions[0].offset = offsetof(vertexData, pos);
attributeDescriptions[1].binding = 0;
attributeDescriptions[1].location = 1;
attributeDescriptions[1].format = VK_FORMAT_R32G32B32_SFLOAT;
attributeDescriptions[1].offset = offsetof(vertexData, color);
return attributeDescriptions;
}
Creating the vertex info:
VkVertexInputBindingDescription bindingDescription = getBindingDescription();
VkVertexInputAttributeDescription *attributeDescriptions;
attributeDescriptions = getAttributeDescriptions();
VkPipelineVertexInputStateCreateInfo vertexInputInfo = {
.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
.vertexBindingDescriptionCount = 1,
.pVertexBindingDescriptions = &bindingDescription,
.vertexAttributeDescriptionCount = 2,
.pVertexAttributeDescriptions = attributeDescriptions,
};
Creating the actual vertex buffer:
void createVertexBuffer() {
VkBufferCreateInfo bufferInfo = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.size = sizeof(vertices),
.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
};
if(vkCreateBuffer(device, &bufferInfo, NULL, &vertexBuffer) != VK_SUCCESS) {
printf("Failed to create vertex buffer.\n");
cleanup();
}
VkMemoryRequirements memRequirements;
vkGetBufferMemoryRequirements(device, vertexBuffer, &memRequirements);
VkMemoryAllocateInfo allocInfo = {
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
.allocationSize = memRequirements.size,
.memoryTypeIndex = findMemoryType(memRequirements.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT),
};
if(vkAllocateMemory(device, &allocInfo, NULL, &vertexBufferMemory) != VK_SUCCESS) {
printf("Failed to allocate vertex buffer memory.\n");
cleanup();
}
vkBindBufferMemory(device, vertexBuffer, vertexBufferMemory, 0);
void *data;
vkMapMemory(device, vertexBufferMemory, 0, bufferInfo.size, 0, &data);
memcpy(data, vertices, sizeof(vertices));
vkUnmapMemory(device, vertexBufferMemory);
}
And finally the actual draw command:
vkCmdBindPipeline(commandBuffers[i], VK_PIPELINE_BIND_POINT_GRAPHICS, graphicsPipeline);
VkBuffer vertexBuffers[] = {vertexBuffer};
VkDeviceSize offsets[] = {0};
vkCmdBindVertexBuffers(commandBuffers[i], 0, 1, vertexBuffers, offsets);
vkCmdDraw(commandBuffers[i], (uint32_t)sizeof(vertices)/sizeof(vertices[0]), 1, 0, 0);
The shaders are simple pass through shaders. They work when vertex data is hardcoded into them. The most I have been able to display is what looks like junk data on screen, which I believe was just allocating random/incorrect memory to the buffer. With the code in the current state nothing is displayed.
Any help is appreciated, thanks.

DirectX: The best way to get RGB data from ID3D11Texture2D with DXGI_FORMAT_NV12 format?

I'm using DirectX to draw videos. After decoding by Intel Media SDK. And then I draw it by the following Intel's Code:
mfxStatus CD3D11Device::RenderFrame(mfxFrameSurface1 * pSrf, mfxFrameAllocator * pAlloc)
{
HRESULT hres = S_OK;
mfxStatus sts;
sts = CreateVideoProcessor(pSrf);
MSDK_CHECK_STATUS(sts, "CreateVideoProcessor failed");
hres = m_pSwapChain->GetBuffer(0, __uuidof( ID3D11Texture2D ), (void**)&m_pDXGIBackBuffer.p);
if (FAILED(hres))
return MFX_ERR_DEVICE_FAILED;
D3D11_VIDEO_PROCESSOR_OUTPUT_VIEW_DESC OutputViewDesc;
if (2 == m_nViews)
{
m_pVideoContext->VideoProcessorSetStreamStereoFormat(m_pVideoProcessor, 0, TRUE,D3D11_VIDEO_PROCESSOR_STEREO_FORMAT_SEPARATE,
TRUE, TRUE, D3D11_VIDEO_PROCESSOR_STEREO_FLIP_NONE, NULL);
m_pVideoContext->VideoProcessorSetOutputStereoMode(m_pVideoProcessor,TRUE);
OutputViewDesc.ViewDimension = D3D11_VPOV_DIMENSION_TEXTURE2DARRAY;
OutputViewDesc.Texture2DArray.ArraySize = 2;
OutputViewDesc.Texture2DArray.MipSlice = 0;
OutputViewDesc.Texture2DArray.FirstArraySlice = 0;
}
else
{
OutputViewDesc.ViewDimension = D3D11_VPOV_DIMENSION_TEXTURE2D;
OutputViewDesc.Texture2D.MipSlice = 0;
}
if (1 == m_nViews || 0 == pSrf->Info.FrameId.ViewId)
{
hres = m_pDX11VideoDevice->CreateVideoProcessorOutputView(
m_pDXGIBackBuffer,
m_VideoProcessorEnum,
&OutputViewDesc,
&m_pOutputView.p );
if (FAILED(hres))
return MFX_ERR_DEVICE_FAILED;
}
D3D11_VIDEO_PROCESSOR_INPUT_VIEW_DESC InputViewDesc;
InputViewDesc.FourCC = 0;
InputViewDesc.ViewDimension = D3D11_VPIV_DIMENSION_TEXTURE2D;
InputViewDesc.Texture2D.MipSlice = 0;
InputViewDesc.Texture2D.ArraySlice = 0;
mfxHDLPair pair = {NULL};
sts = pAlloc->GetHDL(pAlloc->pthis, pSrf->Data.MemId, (mfxHDL*)&pair);
MSDK_CHECK_STATUS(sts, "pAlloc->GetHDL failed");
ID3D11Texture2D *pRTTexture2D = reinterpret_cast<ID3D11Texture2D*>(pair.first);
D3D11_TEXTURE2D_DESC RTTexture2DDesc;
if(!m_pTempTexture && m_nViews == 2)
{
pRTTexture2D->GetDesc(&RTTexture2DDesc);
hres = m_pD3D11Device->CreateTexture2D(&RTTexture2DDesc,NULL,&m_pTempTexture.p);
if (FAILED(hres))
return MFX_ERR_DEVICE_FAILED;
}
// Creating input views for left and righ eyes
if (1 == m_nViews)
{
hres = m_pDX11VideoDevice->CreateVideoProcessorInputView(
pRTTexture2D,
m_VideoProcessorEnum,
&InputViewDesc,
&m_pInputViewLeft.p );
}
else if (2 == m_nViews && 0 == pSrf->Info.FrameId.ViewId)
{
m_pD3D11Ctx->CopyResource(m_pTempTexture,pRTTexture2D);
hres = m_pDX11VideoDevice->CreateVideoProcessorInputView(
m_pTempTexture,
m_VideoProcessorEnum,
&InputViewDesc,
&m_pInputViewLeft.p );
}
else
{
hres = m_pDX11VideoDevice->CreateVideoProcessorInputView(
pRTTexture2D,
m_VideoProcessorEnum,
&InputViewDesc,
&m_pInputViewRight.p );
}
if (FAILED(hres))
return MFX_ERR_DEVICE_FAILED;
// NV12 surface to RGB backbuffer
RECT rect = {0};
rect.right = pSrf->Info.CropW;
rect.bottom = pSrf->Info.CropH;
D3D11_VIDEO_PROCESSOR_STREAM StreamData;
if (1 == m_nViews || pSrf->Info.FrameId.ViewId == 1)
{
StreamData.Enable = TRUE;
StreamData.OutputIndex = 0;
StreamData.InputFrameOrField = 0;
StreamData.PastFrames = 0;
StreamData.FutureFrames = 0;
StreamData.ppPastSurfaces = NULL;
StreamData.ppFutureSurfaces = NULL;
StreamData.pInputSurface = m_pInputViewLeft;
StreamData.ppPastSurfacesRight = NULL;
StreamData.ppFutureSurfacesRight = NULL;
StreamData.pInputSurfaceRight = m_nViews == 2 ? m_pInputViewRight : NULL;
m_pVideoContext->VideoProcessorSetStreamSourceRect(m_pVideoProcessor, 0, true, &rect);
m_pVideoContext->VideoProcessorSetStreamFrameFormat( m_pVideoProcessor, 0, D3D11_VIDEO_FRAME_FORMAT_PROGRESSIVE);
hres = m_pVideoContext->VideoProcessorBlt( m_pVideoProcessor, m_pOutputView, 0, 1, &StreamData );
if (FAILED(hres))
return MFX_ERR_DEVICE_FAILED;
}
if (1 == m_nViews || 1 == pSrf->Info.FrameId.ViewId)
{
DXGI_PRESENT_PARAMETERS parameters = {0};
hres = m_pSwapChain->Present1(0, 0, &parameters);
if (FAILED(hres))
return MFX_ERR_DEVICE_FAILED;
}
return MFX_ERR_NONE;
}
From the code's line:
ID3D11Texture2D *pRTTexture2D = reinterpret_cast<ID3D11Texture2D*>(pair.first);
I had pRTTexture2D is a ID3D11Texture2D with DXGI_FORMAT_NV12 format.
I want to get RGB data from this texture, and I tried to using the below way:
1) Map the texture use d3dContext->Map(Texture, 0, D3D11_MAP_READ, 0, &mapInfo) => must copy to the staging resource in my case
2) Create a RGB Array on System Memory and calculate to convert from NV12 on mapInfo to RGB Array.
This way worked ok, but I want to do it with the better way. Because I guess that while rendering (RenderFrame() Function) the DirectX converted Texture in to RGB in BackBuffer, and if I can get data form that BackBuffer, that will be great.
Someone can show me the code to do above way. Or is there any better way to implement it?
Thank you very much!
D3DXSaveSurfaceToFileInMemory, use this API, Maybe it can bring you some ideas.

SharpDX Constant/Texture Buffers Don't Work

I've been trying to get the constant/texture buffers to work in SharpDX (it's just like SlimDX), but the data I put in it doesn't seem to get into the shaders.
I've looked up how to do it and followed examples but I just can't get it to work.
Ultimately I will need to be able to input multiple large arrays of various data types into my shaders, so if anyone can give me a working example that can do that, it would be great!
But for now I've written a simple example that I've tried to test, and I just can't get it to work. Usually I can at least get something to display when I draw a triangle but right now it won't even do that.
That's probably a silly mistake I overlooked, but anyway, if someone could just take a look at it and point out what's wrong, or better yet, fix it and post the updated code (it is complete and should compile).
I'm sorry if the code is long but I tried to make it as simple as possible. Anyway, here it is:
using SharpDX;
using SharpDX.Direct3D;
using SharpDX.Direct3D11;
using SharpDX.DXGI;
using SharpDX.Windows;
using SharpDX.D3DCompiler;
using System;
using System.Collections.Generic;
using System.Drawing;
using System.Linq;
using System.Runtime.InteropServices;
using System.Text;
using System.Threading.Tasks;
using System.Windows.Forms;
namespace test_namespace
{
class Test
{
[StructLayout(LayoutKind.Explicit, Size = 80, Pack = 16)]
public struct Data
{
[FieldOffset(0)]
public Matrix mat;
[FieldOffset(64)]
public Vector4 testColor;
}
[StructLayout(LayoutKind.Explicit)]
public struct Point
{
[FieldOffset(0)]
public Vector4 pos;
[FieldOffset(16)]
public Vector2 tex;
}
int width = 1000;
int height = 1000;
const int vertSize = 6 * sizeof(float);
RenderForm form;
PictureBox pic;
SharpDX.Direct3D11.Device dev;
DeviceContext dc;
SwapChainDescription scd;
SwapChain sc;
RasterizerStateDescription rsd;
RasterizerState rs;
Viewport vp;
Texture2DDescription depthDesc;
DepthStencilView dsv;
RenderTargetView rtv;
SharpDX.Direct3D11.Buffer buffer;
InputLayout il;
VertexShader vs;
ShaderBytecode vsCode;
PixelShader ps;
ShaderBytecode psCode;
Matrix view;
Matrix proj;
Matrix mat;
Data data;
DataStream pointStream;
SharpDX.Direct3D11.Buffer pointBuffer;
public Test()
{
init();
initMat();
data.testColor = new Vector4(1.0f, 0.5f, 0.25f, 0.0f);
string code = "struct vert { float4 pos : POSITION; float2 tex : TEXCOORD; };\n"
+ "struct pix { float4 pos : SV_POSITION; float2 tex : TEXCOORD; };\n"
+ "cbuffer buf1 : register(b0) { float4x4 mat; float4 testColor; }\n"
+ "pix VS(vert vertIn) { pix pixOut = (pix)0; pixOut.pos = mul(vertIn.pos, mat); pixOut.tex = vertIn.tex; return pixOut; }\n"
+ "float4 PS(pix pixIn) : SV_Target { return testColor; }";
vsCode = ShaderBytecode.Compile(code, "VS", "vs_5_0");
vs = new VertexShader(dev, vsCode);
psCode = ShaderBytecode.Compile(code, "PS", "ps_5_0");
ps = new PixelShader(dev, psCode);
dc.VertexShader.Set(vs);
dc.PixelShader.Set(ps);
il = new InputLayout(dev, ShaderSignature.GetInputSignature(vsCode),
new InputElement[] {new InputElement("POSITION", 0, Format.R32G32B32_Float, 0, 0),
new InputElement("TEXCOORD", 0, Format.R32G32_Float, 16, 0)});
dc.InputAssembler.InputLayout = il;
dc.InputAssembler.PrimitiveTopology = PrimitiveTopology.TriangleList;
updateBuffer();
RenderLoop.Run(form, () =>
{
dc.ClearDepthStencilView(dsv, DepthStencilClearFlags.Depth, 1.0f, 0);
dc.ClearRenderTargetView(rtv, Color4.Black);
float dist = 10.0f;
draw(new Vector3(-dist, -dist, dist), Vector2.Zero, new Vector3(-dist, dist, dist), Vector2.UnitY,
new Vector3(dist, dist, dist), Vector2.One);
});
}
void init()
{
form = new RenderForm();
form.ClientSize = new System.Drawing.Size(width, height);
form.BackColor = System.Drawing.Color.Black;
form.FormClosed += form_FormClosed;
pic = new PictureBox();
pic.Location = new System.Drawing.Point(0, 0);
pic.Size = new Size(width, height);
pic.Show();
form.Controls.Add(pic);
scd = new SwapChainDescription();
scd.BufferCount = 1;
scd.Flags = SwapChainFlags.AllowModeSwitch;
scd.IsWindowed = true;
scd.ModeDescription = new ModeDescription(width, height, new Rational(60, 1), Format.R8G8B8A8_UNorm);
scd.OutputHandle = pic.Handle;
scd.SampleDescription = new SampleDescription(1, 0);
scd.SwapEffect = SwapEffect.Discard;
scd.Usage = Usage.RenderTargetOutput;
rsd = new RasterizerStateDescription();
rsd.CullMode = CullMode.None;
rsd.DepthBias = 0;
rsd.DepthBiasClamp = 0;
rsd.FillMode = FillMode.Solid;
rsd.IsAntialiasedLineEnabled = true;
rsd.IsDepthClipEnabled = true;
rsd.IsFrontCounterClockwise = false;
rsd.IsMultisampleEnabled = true;
rsd.IsScissorEnabled = false;
rsd.SlopeScaledDepthBias = 0;
SharpDX.Direct3D11.Device.CreateWithSwapChain(DriverType.Hardware, DeviceCreationFlags.Debug, scd, out dev, out sc);
rs = new RasterizerState(dev, rsd);
vp = new Viewport(0, 0, width, height, 0.0f, 1.0f);
dc = dev.ImmediateContext;
dc.Rasterizer.State = rs;
dc.Rasterizer.SetViewports(vp);
depthDesc = new Texture2DDescription();
depthDesc.ArraySize = 1;
depthDesc.BindFlags = BindFlags.DepthStencil;
depthDesc.CpuAccessFlags = CpuAccessFlags.None;
depthDesc.Format = Format.D32_Float_S8X24_UInt;
depthDesc.Height = height;
depthDesc.MipLevels = 1;
depthDesc.OptionFlags = ResourceOptionFlags.None;
depthDesc.SampleDescription = new SampleDescription(1, 0);
depthDesc.Usage = ResourceUsage.Default;
depthDesc.Width = width;
dsv = new DepthStencilView(dev, new Texture2D(dev, depthDesc));
rtv = new RenderTargetView(dev, (SharpDX.Direct3D11.Resource)SharpDX.Direct3D11.Resource.FromSwapChain<Texture2D>(sc, 0));
dc.OutputMerger.SetTargets(dsv, rtv);
buffer = new SharpDX.Direct3D11.Buffer(dev, Marshal.SizeOf(typeof(Data)),
ResourceUsage.Default, BindFlags.ConstantBuffer, CpuAccessFlags.None, ResourceOptionFlags.None, 0);
dc.VertexShader.SetConstantBuffer(0, buffer);
}
void initMat()
{
view = Matrix.LookAtLH(Vector3.Zero, Vector3.UnitZ, Vector3.UnitY);
proj = Matrix.PerspectiveFovLH((float)Math.PI / 4.0f, (float)width / (float)height, 0.001f, 10000.0f);
mat = view * proj;
mat.Transpose();
data.mat = mat;
}
void updateBuffer()
{
dc.UpdateSubresource<Data>(ref data, buffer);
}
public void draw(Vector3 p1, Vector2 t1, Vector3 p2, Vector2 t2, Vector3 p3, Vector2 t3)
{
Vector3[] p = new Vector3[3] {p1, p2, p3};
Vector2[] t = new Vector2[3] {t1, t2, t3};
Point[] points = new Point[3];
for(int i = 0; i < 3; i++)
{
points[i] = new Point();
points[i].pos = new Vector4(p[i].X, p[i].Y, p[i].Z, 1.0f);
points[i].tex = new Vector2(t[i].X, t[i].Y);
}
using(pointStream = new DataStream(vertSize * 3, true, true))
{
pointStream.WriteRange<Point>(points);
using(pointBuffer = new SharpDX.Direct3D11.Buffer(dev, pointStream, vertSize * 3,
ResourceUsage.Default, BindFlags.VertexBuffer, CpuAccessFlags.None, ResourceOptionFlags.None, 0))
{
dc.InputAssembler.PrimitiveTopology = PrimitiveTopology.TriangleList;
dc.InputAssembler.SetVertexBuffers(0, new VertexBufferBinding(pointBuffer, vertSize, 0));
dc.Draw(3, 0);
}
}
}
void form_FormClosed(object sender, FormClosedEventArgs e)
{
buffer.Dispose();
il.Dispose();
ps.Dispose();
psCode.Dispose();
vs.Dispose();
vsCode.Dispose();
rtv.Dispose();
dsv.Dispose();
dc.ClearState();
dc.Flush();
dc.Dispose();
dev.Dispose();
sc.Dispose();
}
}
}
Also, here is the shader code formatted in a more readable way:
struct vert
{
float4 pos : POSITION;
float2 tex : TEXCOORD;
};
struct pix
{
float4 pos : SV_POSITION;
float2 tex : TEXCOORD;
};
cbuffer buf1 : register(b0)
{
float4x4 mat;
float4 testColor;
}
pix VS(vert vertIn)
{
pix pixOut = (pix)0;
pixOut.pos = mul(vertIn.pos, mat);
pixOut.tex = vertIn.tex;
return out;
}
float4 PS(pix pixIn) : SV_Target
{
return testColor;
}
I'm not sure if this is of any help here, but why go UpdateSubresource in your updateBuffer()? In the SharpDXTutorial/Tutorial16 (the cubemap example) the buffer is initialized with the "device" object,
device.UpdateData<Data>(dataConstantBuffer, sceneInformation);
This is a very handy object. It is contained in SharpHelper, part of SharpDXTutorial,
https://github.com/RobyDX/SharpDX_Demo/blob/master/SharpDXTutorial/SharpHelper/SharpHelper.csproj
.. maybe it takes care of stuff missed, to update the constant buffer?

Resources