Hi,
I am trying to port J.Coluna XNA sample https://jcoluna.wordpress.com/page/2/ for LLP to my DX engine and I faced with some problems:
1. - For now I am using inversed depth buffer in my engine to increase depth precision and I dont know in which places it will affect my renderer, but obviously it is not working now, because in normal and depth rendertargets I see nothing and my model is rendering without depth (just ambient).
In Clear GBuffer shader I left Depth as float4(1,1,1,1), because depth is in view space, so I dont need to invert it as I understand.
struct VertexShaderInput
{
float4 Position : SV_POSITION;
};
struct VertexShaderOutput
{
float4 Position : SV_POSITION;
};
VertexShaderOutput VertexShaderFunction(VertexShaderInput input)
{
VertexShaderOutput output;
input.Position.w = 1;
output.Position = input.Position;
return output;
}
struct PixelShaderOutput
{
float4 Normal : SV_TARGET0;
float4 Depth : SV_TARGET1;
};
PixelShaderOutput PixelShaderFunction(VertexShaderOutput input)
{
PixelShaderOutput output;
//this value depends on your normal encoding method.
//on our example, it will generate a (0,0,-1) normal
output.Normal = float4(0.5, 0.5, 0.5, 0);
//max depth
output.Depth = float4(1, 1, 1, 1);
return output;
}
technique Clear
{
pass ClearPass
{
Profile = 11;
VertexShader = VertexShaderFunction;
PixelShader = PixelShaderFunction;
}
}
And here is my LPP main shader
VertexShaderOutput VertexShaderFunction(MeshVertexInput input)
{
VertexShaderOutput output;
input.Position.w = 1;
float3 viewSpacePos = mul(input.Position, WorldView);
output.Position = mul(input.Position, WorldViewProjection);
output.TexCoord = input.UV0; //pass the texture coordinates further
//we output our normals/tangents/binormals in viewspace
output.Normal = normalize(mul(input.Normal, (float3x3)WorldView));
output.Tangent = normalize(mul(input.Tangent.xyz, (float3x3) WorldView));
output.Binormal = normalize(cross(output.Normal, output.Tangent) * input.Tangent.w);
output.Depth = viewSpacePos.z; //pass depth
return output;
}
PixelShaderOutput PixelShaderFunction(VertexShaderOutput input)
{
PixelShaderOutput output = (PixelShaderOutput) 1;
//read from our normal map
half4 normalMap = NormalMap.Sample(NormalMapSampler, input.TexCoord);
half3 normalViewSpace = NormalMapToSpaceNormal(normalMap.xyz, input.Normal, input.Binormal, input.Tangent);
output.Normal.rg = EncodeNormal(normalize(normalViewSpace)); //our encoder output in RG channels
output.Normal.b = normalMap.a; //our specular power goes into B channel
output.Normal.a = 1; //not used
output.Depth.r = -input.Depth / FarClip; //output Depth in linear space, [0..1]
return output;
}
But as a result I receive the following image (see attach). I the top of the window must be 3 render targets (normals, depth and light). Light is working (more or less), but normals and depth is not displaying anything at all. As I understand I receive only color for my model.
First screenshot is from my forward renderer with directional light
Second - from my LPP renderer with directional light. As you can see in the top of the window must be 3 rendertargets for debug, but there is no any output for them.
I cannot understand what I am doing wrong here. Maybe someone could point me on the right direction.
↧