2

I would like to calculated the 2D Gaussian function and the input is X,Y texture UV coordinate and get the corresponding gaussian value.

I'm facing difficulties on how to get the corresponding Texel's uv gaussian value.

float Gaussian2D(float x, float y)
{
    float x_y_squared =  x * x + y * y;
    float stDevSquared = 2 *_2D_StandardDeviation * _2D_StandardDeviation;

    float div = x_y_squared / stDevSquared;

    float gauss = pow(E, -div);
    return gauss;
}

float Gaussian(int offset)
{
    float stDevSquared = _StandardDeviation * _StandardDeviation;
    float gauss = (1 / sqrt(2 * PI * stDevSquared)) * pow(E, -((offset * offset) / (2 * stDevSquared)));
    return gauss;
}

fixed4 frag(v2f i) : SV_Target
            {
                fixed source = tex2D(_MainTex, i.uv).r;
                float g0 = Gaussian(0);
                float g1 = Gaussian(1);
                float g2 = Gaussian(2);
                float g3 = Gaussian(3);
                float g4 = Gaussian(4);
                float g5 = Gaussian(5);

                float omega = g0 + g1 + g2 + g3 + g4 + g5;
                float gauss = Gaussian2D(i.uv.x, i.uv.y);

                fixed prev_a = tex2D(_HistoryA, i.uv).r;
                fixed prev_b = tex2D(_HistoryB, i.uv).r;
                fixed prev_c = tex2D(_HistoryC, i.uv).r;
                fixed prev_d = tex2D(_HistoryD, i.uv).r;
                fixed prev_e = tex2D(_HistoryE, i.uv).r;

                fixed current = (gauss*source * g0 + gauss*prev_a * g1 + gauss*prev_b * g2 + gauss*prev_c * g3 + gauss*prev_d * g4 + gauss*prev_e * g5)/(omega);
                
                float diff = source - prev_a;

                if (diff <= _dataDelta)
                {
                    return current;
                }

                return source;
            }
            ENDCG
        }

enter image description here

Update to the Amazing work by Spektre

   sampler2D _MainTex;
            sampler2D _HistoryA;
            sampler2D _HistoryB;
            sampler2D _HistoryC;
            sampler2D _HistoryD;
            float4 _MainTex_TexelSize;
            float _dataDelta;
            float _blurRadius;
            float _stepsDelta;
            float _resolution;
            float4 _MainTex_ST;
            float _StandardDeviation;

            #define E 2.71828182846
            #define PI 3.14159265359


                v2f vert(appdata v) {
                  v2f o;
                  o.vertex = UnityObjectToClipPos(v.vertex);
         
                  o.uv = v.uv;
                  return o;
                }


                float Gaussian(int  offset)
                {
                    float stDevSquared = _StandardDeviation * _StandardDeviation;
                    float gauss = (1 / sqrt(2 * PI * stDevSquared)) * pow(E, -((offset * offset) / (2 * stDevSquared)));
                    return gauss;
                }
                float blur2d_horizontal(sampler2D tex, v2f i, float hstep, float vstep) {
                  float2 uv = i.uv;
                  float sum = 0;
                  float2 tc = uv;

                  //blur radius in pixels
                  float blur = _blurRadius / _resolution / 4;

                  sum += tex2D(tex, float2(tc.x - 4.0 * blur * hstep, tc.y - 4.0 * blur * vstep)).r * 0.0162162162;
                  sum += tex2D(tex, float2(tc.x - 3.0 * blur * hstep, tc.y - 3.0 * blur * vstep)).r * 0.0540540541;
                  sum += tex2D(tex, float2(tc.x - 2.0 * blur * hstep, tc.y - 2.0 * blur * vstep)).r * 0.1216216216;
                  sum += tex2D(tex, float2(tc.x - 1.0 * blur * hstep, tc.y - 1.0 * blur * vstep)).r * 0.1945945946;

                  sum += tex2D(tex, float2(tc.x, tc.y)).r * 0.2270270270;

                  sum += tex2D(tex, float2(tc.x + 1.0 * blur * hstep, tc.y + 1.0 * blur * vstep)).r * 0.1945945946;
                  sum += tex2D(tex, float2(tc.x + 2.0 * blur * hstep, tc.y + 2.0 * blur * vstep)).r * 0.1216216216;
                  sum += tex2D(tex, float2(tc.x + 3.0 * blur * hstep, tc.y + 3.0 * blur * vstep)).r * 0.0540540541;
                  sum += tex2D(tex, float2(tc.x + 4.0 * blur * hstep, tc.y + 4.0 * blur * vstep)).r * 0.0162162162;
                  return sum;
                }
                fixed4 frag(v2f i) : SV_Target {

                const int m = 5;
                float d = 5.0;
                float z[m];
                float gauss_curve[m];
                float zed;
                
                _resolution = 900;
                  z[0] = tex2D(_MainTex, i.uv).r;// oldest 2 frames

                  z[1] = tex2D(_HistoryA, i.uv).r;
                  if (abs(z[0] - z[1]) < _dataDelta) // threshold depth change
                  {
                   // z[0] = 0.0;
                    // 2D spatial gauss blur of z0
                    z[0] = blur2d_horizontal(_MainTex, i, _stepsDelta, _stepsDelta);
                    // fetch depths from up to m frames
                    z[2] = tex2D(_HistoryB, i.uv).r;

                    z[3] = tex2D(_HistoryC, i.uv).r;

                    z[4] = tex2D(_HistoryD, i.uv).r;
                    zed = 0.0;

                    gauss_curve[0] = Gaussian(0);
                    gauss_curve[1] = Gaussian(1);
                    gauss_curve[2] = Gaussian(2);
                    gauss_curve[3] = Gaussian(3);
                    gauss_curve[4] = Gaussian(4);

                    float sum = 0.0;
                    // 1D temporal gauss blur
                    for (int idx = 1; idx <= m; idx++)
                    {
                        zed += gauss_curve[idx - 1] * z[idx - 1];
                    }


                  }
                   else
                     zed = z[0];

                  return fixed4(zed, zed, zed, 0.0);
                }
andre_lamothe
  • 2,171
  • 2
  • 41
  • 74
  • see [2D GLSL Gaussian blur](https://stackoverflow.com/a/64845819/2521214) it might help however your code does not look like native GLSL shader ... If it is unity related I can not help as I do not code for unity ... – Spektre Nov 26 '20 at 21:06
  • @Spektre You can do it in whatever the GLSL I know you are expert in it. But Can you please show how the hell that equation in the paper can be implemented even in pseudo code ? – andre_lamothe Nov 26 '20 at 21:20
  • @Spektre You can even test it on a web cam video texture or anything else that has temporal & spatial noise – andre_lamothe Nov 26 '20 at 21:22
  • @Spektre I'm stuck in this for 20 days and have no results at all – andre_lamothe Nov 26 '20 at 21:27
  • The linked answer of mine does exactly that ... `w=w0*exp((-xx-yy)/(2.0*rr));` ... if I cross check with your code `x_y_squared` i sthe same as mine `xx+yy` and `stDevSquared` is mine `(2.0*rr)` ... however I have a slight deliberate difference in constants for slower 2D approach `pow(r,1.975);` instead of `r*r` to compensate for rounding errors ... I do not understand the logic behind your convolution texel fetch `_HistoryA,i.uv` ??? that looks to me like you are probing the same texel position instead of neighbors up to `r` distance. But as I do not recognize the code I might be wrong – Spektre Nov 26 '20 at 21:38
  • The fast 2 pass 2x 1D approach is similar (just one axis so different constants) however ouput is not that precise as it has square kernel instead of circular – Spektre Nov 26 '20 at 21:43
  • Let us [continue this discussion in chat](https://chat.stackoverflow.com/rooms/225180/discussion-between-ahmed-saleh-and-spektre). – andre_lamothe Nov 26 '20 at 21:44
  • @Spektre can you show a pseudo code please :)? Many thanks, after a discussion he is the only member in stackoverflow that did understand the paper :) Many thanks again – andre_lamothe Nov 27 '20 at 08:23
  • not right away need to work first ... afternoon will look on it however meanwhile could you prepare some depth frames for me (jpg,png,pmp or ascii) I would need to test on something ... – Spektre Nov 27 '20 at 08:26
  • 1
    @Spektre here is one https://rgbd-dataset.cs.washington.edu/ – andre_lamothe Nov 27 '20 at 08:27

1 Answers1

1

OK I think I managed to do this... well +/- as the equation:

equation

Is just symbolical simplification (common in CV/DIP) not complete equation not uniquely determined... So its interpretation (and implementation) is not clear from it... However I managed to combine the missing stuff into something like this (GLSL):

//---------------------------------------------------------------------------
// Vertex
//---------------------------------------------------------------------------
#version 420 core
//---------------------------------------------------------------------------
layout(location=0) in vec4 vertex;
out vec2 pos;   // screen position <-1,+1>
void main()
    {
    pos=vertex.xy;
    gl_Position=vertex;
    }
//---------------------------------------------------------------------------
//---------------------------------------------------------------------------
// Fragment
//---------------------------------------------------------------------------
#version 420 core
//---------------------------------------------------------------------------
in vec2 pos;                    // screen position <-1,+1>
out vec4 gl_FragColor;          // fragment output color
uniform sampler2D txr_rgb;
uniform sampler2D txr_zed0;
uniform sampler2D txr_zed1;
uniform sampler2D txr_zed2;
uniform sampler2D txr_zed3;
uniform sampler2D txr_zed4;
uniform float xs,ys;            // texture resolution
uniform float r;                // blur radius
//---------------------------------------------------------------------------
float G(float t)
    {
    return 0.0;
    }
//---------------------------------------------------------------------------
void main()
    {
    vec2 p;
    vec4 rgb;
    const int m=5;
    const float Th=0.0015;
    float z[m],zed;



    p=0.5*(pos+1.0);                    // p = pos position in texture
    rgb=texture2D(txr_rgb ,p);          // rgb color (just for view)
    z[0]=texture2D(txr_zed0,p).r;       // oldest 2 frames
    z[1]=texture2D(txr_zed1,p).r;

    if (abs(z[0]-z[1])>Th)              // threshold depth change
        {
        int i;
        float x,y,xx,yy,rr,dx,dy,w,w0;
        // 2D spatial gauss blur of z0
        rr=r*r;
        w0=0.3780/pow(r,1.975);
        z[0]=0.0;
        for (dx=1.0/xs,x=-r,p.x=0.5+(pos.x*0.5)+(x*dx);x<=r;x++,p.x+=dx){ xx=x*x;
         for (dy=1.0/ys,y=-r,p.y=0.5+(pos.y*0.5)+(y*dy);y<=r;y++,p.y+=dy){ yy=y*y;
          if (xx+yy<=rr)
            {
            w=w0*exp((-xx-yy)/(2.0*rr));
            z[0]+=texture2D(txr_zed0,p).r*w;
            }}}
        // fetch depths from up to m frames
        z[2]=texture2D(txr_zed2,p).r;
        z[3]=texture2D(txr_zed3,p).r;
        z[4]=texture2D(txr_zed4,p).r;
        // 1D temporal gauss blur
        for (zed=0.0,i=1;i<=m;i++) zed+=exp(0.5*float(i*i)/float(m*m))*z[i-1];
        zed/=2.506628274631000502415765284811*float(m);
        }
    else zed=z[0];
    zed*=20.0;                          // debug view: emphasize depth so its color is visible
//  gl_FragColor=rgb;                   // debug view: render RGB texture
    gl_FragColor=vec4(zed,zed,zed,0.0); // render resulting depth texture
    }
//---------------------------------------------------------------------------

I used this dataset for testing However the depth resolution is not very good...

Using garlic_7_1 dataset I got this result (emphasized depth):

depth preview

The temporal depth is m (hard coded) and spatial is r (uniform). The last m frames are passed in txr_zed0...txr_zed(m-1) where txr_zed0 is the oldest one. The threshold Th must be chosen so the algo select correct regions!!!

In order this to work properly You should replace txr_zed0 after applying this shader by its result (on CPU side or render to texture and then swap ids ...). Otherwise the spatial Gauss blurring will not be applied to older frames.

[edit1]

Here the preview (outputting red inside the if instead of blurring) for Th=0.01;

Th=0.01;

As you can see it selects the edges ... So the change (just for chosing Th) is:

//---------------------------------------------------------------------------
// Fragment
//---------------------------------------------------------------------------
#version 420 core
//---------------------------------------------------------------------------
in vec2 pos;                    // screen position <-1,+1>
out vec4 gl_FragColor;          // fragment output color
uniform sampler2D txr_rgb;
uniform sampler2D txr_zed0;
uniform sampler2D txr_zed1;
uniform sampler2D txr_zed2;
uniform sampler2D txr_zed3;
uniform sampler2D txr_zed4;
uniform float xs,ys;            // texture resolution
uniform float r;                // blur radius
//---------------------------------------------------------------------------
float G(float t)
    {
    return 0.0;
    }
//---------------------------------------------------------------------------
void main()
    {
    vec2 p;
    vec4 rgb;
    const int m=5;
//  const float Th=0.0015;
    const float Th=0.01;
    float z[m],zed;

    p=0.5*(pos+1.0);                    // p = pos position in texture
    rgb=texture2D(txr_rgb ,p);          // rgb color (just for view)
    z[0]=texture2D(txr_zed0,p).r;       // oldest 2 frames
    z[1]=texture2D(txr_zed1,p).r;

    if (abs(z[0]-z[1])>Th)              // threshold depth change
        {
        gl_FragColor=vec4(1.0,0.0,0.0,0.0);     // debug output
        return;

        int i;
        float x,y,xx,yy,rr,dx,dy,w,w0;
        // 2D spatial gauss blur of z0
        rr=r*r;
        w0=0.3780/pow(r,1.975);
        z[0]=0.0;
        for (dx=1.0/xs,x=-r,p.x=0.5+(pos.x*0.5)+(x*dx);x<=r;x++,p.x+=dx){ xx=x*x;
         for (dy=1.0/ys,y=-r,p.y=0.5+(pos.y*0.5)+(y*dy);y<=r;y++,p.y+=dy){ yy=y*y;
          if (xx+yy<=rr)
            {
            w=w0*exp((-xx-yy)/(2.0*rr));
            z[0]+=texture2D(txr_zed0,p).r*w;
            }}}
        // fetch depths from up to m frames
        z[2]=texture2D(txr_zed2,p).r;
        z[3]=texture2D(txr_zed3,p).r;
        z[4]=texture2D(txr_zed4,p).r;
        // 1D temporal gauss blur
        w0=0.5/float(m*m);
        for (zed=0.0,i=1;i<=m;i++) zed+=exp(w0*float(i*i))*z[i-1];
        zed/=2.506628274631000502415765284811*float(m);
        }
    else zed=z[0];
    zed*=40.0;                          // debug view: emphasize depth so its color is visible
//  gl_FragColor=rgb;                   // debug view: render RGB texture
    gl_FragColor=vec4(zed,zed,zed,0.0); // render resulting depth texture
    }
//---------------------------------------------------------------------------
Spektre
  • 49,595
  • 11
  • 110
  • 380
  • What is in vec2 pos; // screen position <-1,+1> I get actually input as texture coordinate i.uv – andre_lamothe Nov 27 '20 at 19:28
  • loop does not appear to terminate in a timely manner (1024 iterations) at line 81 (on d3d11) – andre_lamothe Nov 27 '20 at 20:16
  • The texture resolution I use is 1k by 720 its too much for that loop to be unrolled – andre_lamothe Nov 27 '20 at 20:20
  • @AhmedSaleh 1. `pos` is the vertex (I render single QUAD covering the screen) so its the pixel/fragment position in range `<-1,+1>` ... texture range is `<0,1>` hence the conversion `p=0.5*(pos+1.0);` 2. the loops are going only +/- `r` form current position so may be your `r` is too high or unset I am using `r=5` pixels. – Spektre Nov 27 '20 at 21:54
  • I found that the 2D Spatial blur only applied when the threshold is negative < 0 – andre_lamothe Nov 27 '20 at 22:36
  • otherwise the filter is not active and it doesn't do anything – andre_lamothe Nov 27 '20 at 23:01
  • @AhmedSaleh both blurs are applied only if fabs difference between last two frames is bigger than threshold ... so you need to tweak the `Th` value to match your data (it depends on the dynamic range of depth ...) – Spektre Nov 27 '20 at 23:47
  • Thanks so much <3, here is the tech specs of the camera, https://cdn.stereolabs.com/assets/datasheets/zed-mini-camera-datasheet.pdf Which threshold should I choose ? – andre_lamothe Nov 28 '20 at 07:48
  • @AhmedSaleh I chosed the `Th` on the run simply by outputting red color inside the `if` instead of filtering and tweaking the `Th` until the output was with red areas near depth changes (edges of mesh). If `Th` is too small no Red is outputed, if `Th` is too big too much Red is outputted. The `Th` depends on the image (not just camera) I think ideally depth should be linear and the Th should be few depth steps big (not just 1) so only steeper changes are detected by it. – Spektre Nov 28 '20 at 08:26
  • @AhmedSaleh the info sheet says depth range: `0.10 m to 15 m` but not how the depth is returned ... float, int, how many bits ... There is only accuracy at 2 distances but that is most likely just accuracy of the measurement and not depth granularity. Also is not know if it is linear or logarithmical ...So chose `Th` for some object and then move it further away if the `Th` is not good anymore you got non liear depth and need to linearize it first – Spektre Nov 28 '20 at 08:30
  • I faced a problem in that line using HLSL instead of GLSL `zed+=exp(0.5*float(i*i)/float(m*m))*z[i-1]; zed/=2.506628274631000502415765284811*float(m); ` I have updated the post, would you please take few mins review the shader code and tell me is it consistent with your idea ? – andre_lamothe Nov 28 '20 at 15:31
  • @AhmedSaleh what problem? and btw your `blur2d_horizontal` is just 1D blur not 2D so the properties of the filter might not be the same. – Spektre Nov 28 '20 at 17:36
  • It doesn't do anything, when I remove it, and render output, the output is rendered, but when I add it, there is no output on the screen at all – andre_lamothe Nov 28 '20 at 17:54
  • @AhmedSaleh I do not use HLSL but that sounds like you got syntax error in there which prohibits the compilation or linkage of shader hence no output. What is in the shader logs? – Spektre Nov 28 '20 at 17:57
  • Let us [continue this discussion in chat](https://chat.stackoverflow.com/rooms/225264/discussion-between-ahmed-saleh-and-spektre). – andre_lamothe Nov 28 '20 at 17:58
  • I have uploaded both depth and image rgb for you http://www.mediafire.com/file/f2ov725tgtbvq9w/depth.zip/file – andre_lamothe Nov 28 '20 at 19:11
  • got it ... will try it tomorrow – Spektre Nov 28 '20 at 20:43
  • Did you try it ? – andre_lamothe Nov 29 '20 at 11:26
  • @AhmedSaleh yes slightly,... the data is not calibrated between rgb and depth , and not very smooth and the image holds planar object which will not be ideal for the smoothing. Anyway this `Th=0.01;` thresholds looks OK for that distance to camera – Spektre Nov 29 '20 at 11:30
  • is there a modification to the original shader ? – andre_lamothe Nov 29 '20 at 11:32
  • @AhmedSaleh not really I posted it ... so with this you chose Th until it selects the problematic areas (which your data does not have so I still cant test properly) and then rem out the red color and return – Spektre Nov 29 '20 at 11:35
  • can you come to the chat – andre_lamothe Nov 29 '20 at 11:41
  • Hello My hero, Can you join the chat ? I really need your help in something :) – andre_lamothe Oct 14 '21 at 11:42
  • https://chat.stackoverflow.com/rooms/238136/ahmed-spektre – andre_lamothe Oct 14 '21 at 11:43
  • Hello Spektre, Can you join the chat ? I'm Ahmed If You remember me, Egyptian in Austria https://chat.stackoverflow.com/rooms/248890/ahmed-spektre – andre_lamothe Oct 18 '22 at 22:05