I'm trying to implement tiled deferred rendering but I'm stuck on calculating min/max depth for each tile. I'm using compute shader for this. This is its complete code:
#version 430
#define TILE_SIZE 32
layout (location = 0, rgba32f) uniform image2D u_texture;
layout (location = 1, rgba32f) uniform image2D depthtex;
layout (local_size_x = TILE_SIZE, local_size_y = TILE_SIZE) in;
shared uint min_depth;
shared uint max_depth;
void main(void){
ivec2 pos = ivec2(gl_GlobalInvocationID.xy);
ivec2 grid = ivec2(gl_WorkGroupID.xy);
if(gl_LocalInvocationIndex == 0){
min_depth = 0xFFFFFFFF;
max_depth = 0;
}
vec4 color;
vec4 color_tex = imageLoad(depthtex,pos);
float d = color_tex.x;
uint depth = uint(d * 0xFFFFFFFF);
atomicMin(min_depth, depth);
atomicMax(max_depth, depth);
barrier();
color = vec4(float(float(min_depth) / float(0xFFFFFFFF)));
imageStore(u_texture, grid, color);
}
When I try to run program it cycles in infinite loop in compute shader compilation. Any idea why is this caused and how to fix it?
EDIT: so I found out that problem is caused by glGetProgramiv() function when I try to check linking status of compute shader which I'm doing with this code:
//throw exception if linking failed
GLint status;
glGetProgramiv(_object, GL_LINK_STATUS, &status);
if (status == GL_FALSE) {
std::string msg("Program linking failure: ");
GLint infoLogLength;
glGetProgramiv(_object, GL_INFO_LOG_LENGTH, &infoLogLength);
char* strInfoLog = new char[infoLogLength + 1];
glGetProgramInfoLog(_object, infoLogLength, NULL, strInfoLog);
msg += strInfoLog;
delete[] strInfoLog;
glDeleteProgram(_object); _object = 0;
throw std::runtime_error(msg);
}
However, if I remove atomicMin() from my shader everything works fine.