如何处理 HLSL 中的浮点运算?

计算科学 浮点 显卡 扩散 开放式
2021-11-30 06:28:31

我正在尝试为 GPU 编写 perona malik 各向异性扩散滤波器。

我的着色器基于过滤器的 matlab 实现。由于我怀疑是浮点算术问题,我遇到了麻烦。

我的着色器中有一条线

浮动 cN = 1 / pow( 1 + (nablaN / k), 2);

cN 总是计算为零,而 matlab 版本则不然。我认为我对浮点数处理不当。

我应该如何正确处理?

编辑

Nabla 是通过纹理与内核 hN 的卷积计算得出的浮点数。

float4 nablaN = Convolution(psInput, hN);

内核是

static const float3x3 hN =
{
    0,  1, 0,
    0, -1, 0,
    0,  0, 0
};

和卷积是

float4 Convolution(VertexShaderOutput input, float3x3 kernel)
{
    float4 pixel = float4(0.0f, 0.0f, 0.0f, 0.0f);

    for (int i = -1; i <= 1; ++i)
    {
        for (int j = -1; j <= 1; ++j)
        {
            pixel += kernel[i+1][j+1] * tex2D(Input0Sampler, input.TextureCoordinate + float2(i,j));
        };
    };

    return pixel;
}

编辑

如果我从像素着色器返回 nablaN 我会得到这样的图像

http://imageshack.us/photo/my-images/819/dw69.png/(nablaN/2 类似)

如果我从像素着色器返回 ( 1 + (nablaN / k)) * ( 1 + (nablaN / k)) 我得到

看起来像这样的东西 http://imageshack.us/photo/my-images/822/s49z.png/

这看起来类似于边缘检测过滤器。大多数像素值为零。所以你可以看到为什么 1/(( 1 + (nablaN / k)) * ( 1 + (nablaN / k)) ) 只是给我一个黑屏。

编辑

好的,我现在有三种语言的三个版本的程序

自由马

function diff_im = anisodiff2D(im, num_iter, delta_t, kappa, option)
%ANISODIFF2D Conventional anisotropic diffusion
%   DIFF_IM = ANISODIFF2D(IM, NUM_ITER, DELTA_T, KAPPA, OPTION) perfoms 
%   conventional anisotropic diffusion (Perona & Malik) upon a gray scale
%   image. A 2D network structure of 8 neighboring nodes is considered for 
%   diffusion conduction.
% 
%       ARGUMENT DESCRIPTION:
%               IM       - gray scale image (MxN).
%               NUM_ITER - number of iterations. 
%               DELTA_T  - integration constant (0 <= delta_t <= 1/7).
%                          Usually, due to numerical stability this 
%                          parameter is set to its maximum value.
%               KAPPA    - gradient modulus threshold that controls the conduction.
%               OPTION   - conduction coefficient functions proposed by Perona & Malik:
%                          1 - c(x,y,t) = exp(-(nablaI/kappa).^2),
%                              privileges high-contrast edges over low-contrast ones. 
%                          2 - c(x,y,t) = 1./(1 + (nablaI/kappa).^2),
%                              privileges wide regions over smaller ones. 
% 
%       OUTPUT DESCRIPTION:
%                DIFF_IM - (diffused) image with the largest scale-space parameter.
% 
%   Example
%   -------------
%   s = phantom(512) + randn(512);
%   num_iter = 15;
%   delta_t = 1/7;
%   kappa = 30;
%   option = 2;
%   ad = anisodiff2D(s,num_iter,delta_t,kappa,option);
%   figure, subplot 121, imshow(s,[]), subplot 122, imshow(ad,[])
% 
% See also anisodiff1D, anisodiff3D.

% References: 
%   P. Perona and J. Malik. 
%   Scale-Space and Edge Detection Using Anisotropic Diffusion.
%   IEEE Transactions on Pattern Analysis and Machine Intelligence, 
%   12(7):629-639, July 1990.
% 
%   G. Grieg, O. Kubler, R. Kikinis, and F. A. Jolesz.
%   Nonlinear Anisotropic Filtering of MRI Data.
%   IEEE Transactions on Medical Imaging,
%   11(2):221-232, June 1992.
% 
%   MATLAB implementation based on Peter Kovesi's anisodiff(.):
%   P. D. Kovesi. MATLAB and Octave Functions for Computer Vision and Image Processing.
%   School of Computer Science & Software Engineering,
%   The University of Western Australia. Available from:
%   <http://www.csse.uwa.edu.au/~pk/research/matlabfns/>.
% 
% Credits:
% Daniel Simoes Lopes
% ICIST
% Instituto Superior Tecnico - Universidade Tecnica de Lisboa
% danlopes (at) civil ist utl pt
% http://www.civil.ist.utl.pt/~danlopes
%
% May 2007 original version.

% Convert input image to double.
im = double(im);

% PDE (partial differential equation) initial condition.
diff_im = im;

% Center pixel distances.
dx = 1;
dy = 1;
dd = sqrt(2);

% 2D convolution masks - finite differences.
hN = [0 1 0; 0 -1 0; 0 0 0];
hS = [0 0 0; 0 -1 0; 0 1 0];
hE = [0 0 0; 0 -1 1; 0 0 0];
hW = [0 0 0; 1 -1 0; 0 0 0];
hNE = [0 0 1; 0 -1 0; 0 0 0];
hSE = [0 0 0; 0 -1 0; 0 0 1];
hSW = [0 0 0; 0 -1 0; 1 0 0];
hNW = [1 0 0; 0 -1 0; 0 0 0];

nablaN = zeros(1072,1912);
nablaS = zeros(1072,1912);
nablaW = zeros(1072,1912);
nablaE = zeros(1072,1912);
nablaNE = zeros(1072,1912);
nablaSE = zeros(1072,1912);
nablaSW = zeros(1072,1912);
nablaNW = zeros(1072,1912);

% Anisotropic diffusion.
for t = 1:num_iter

        % Finite differences. [imfilter(.,.,'conv') can be replaced by conv2(.,.,'same')]
        % nablaN = conv2(diff_im,hN)(2:1081,2:1921);
        nablaN = conv2(diff_im,hN)(2:1073,2:1913);
        nablaS = conv2(diff_im,hS)(2:1073,2:1913);
        nablaW = conv2(diff_im,hW)(2:1073,2:1913);
        nablaE = conv2(diff_im,hE)(2:1073,2:1913);
        nablaNE = conv2(diff_im,hNE)(2:1073,2:1913);
        nablaSE = conv2(diff_im,hSE)(2:1073,2:1913);
        nablaSW = conv2(diff_im,hSW)(2:1073,2:1913);
        nablaNW = conv2(diff_im,hNW)(2:1073,2:1913);

        % Diffusion function.
        if option == 1
            cN = exp(-(nablaN/kappa).^2);
            cS = exp(-(nablaS/kappa).^2);
            cW = exp(-(nablaW/kappa).^2);
            cE = exp(-(nablaE/kappa).^2);
            cNE = exp(-(nablaNE/kappa).^2);
            cSE = exp(-(nablaSE/kappa).^2);
            cSW = exp(-(nablaSW/kappa).^2);
            cNW = exp(-(nablaNW/kappa).^2);
        elseif option == 2
            cN = 1./(1 + (nablaN/kappa).^2);
            cS = 1./(1 + (nablaS/kappa).^2);
            cW = 1./(1 + (nablaW/kappa).^2);
            cE = 1./(1 + (nablaE/kappa).^2);
            cNE = 1./(1 + (nablaNE/kappa).^2);
            cSE = 1./(1 + (nablaSE/kappa).^2);
            cSW = 1./(1 + (nablaSW/kappa).^2);
            cNW = 1./(1 + (nablaNW/kappa).^2);
        end

        % Discrete PDE solution.
        diff_im = diff_im + ...
                  delta_t*(...
                  (1/(dy^2))*cN.*nablaN + (1/(dy^2))*cS.*nablaS + ...
                  (1/(dx^2))*cW.*nablaW + (1/(dx^2))*cE.*nablaE + ...
                  (1/(dd^2))*cNE.*nablaNE + (1/(dd^2))*cSE.*nablaSE + ...
                  (1/(dd^2))*cSW.*nablaSW + (1/(dd^2))*cNW.*nablaNW );

        % Iteration warning.
        fprintf('\rIteration %d\n',t);        
end

HLSL

texture2D Input0;
sampler2D Input0Sampler = sampler_state
{
    Texture = <Input0>;
    MinFilter = Point;
    MagFilter = Point;
    MipFilter = Point;
    AddressU = Clamp;
    AddressV = Clamp;
};

struct VertexShaderInput
{
    float4 Position : POSITION0;
    float2 TextureCoordinate : TEXCOORD0;
};

struct VertexShaderOutput
{
    float4 Position : POSITION0;
    float2 TextureCoordinate : TEXCOORD0;
};

struct PixelShaderOutput
{
    // TODO: Optionally add/remove output indices to match GPUProcessor.numOutputs
    float4 Index0 : COLOR0;
};

// input texture dimensions
static float w = 1920 - 8;
static float h = 1080 - 8;

static const float2 pixel = float2(1.0 / w, 1.0 / h);
static const float2 halfPixel = float2(pixel.x / 2, pixel.y / 2);

static const float3x3 hN =
{
    0,  1, 0,
    0, -1, 0,
    0,  0, 0
};
static const float3x3 hS =
{
    0,  0, 0,
    0, -1, 0,
    0,  1, 0
};
static const float3x3 hE =
{
    0,  0, 0,
    0, -1, 1,
    0,  0, 0
};
static const float3x3 hW =
{
    0,  0, 0,
    1, -1, 0,
    0,  0, 0
};
static const float3x3 hNE =
{
    0,  0, 1,
    0, -1, 0,
    0,  0, 0
};
static const float3x3 hSE =
{
    0,  0, 0,
    0, -1, 0,
    0,  0, 1
};
static const float3x3 hSW =
{
    0,  0, 0,
    0, -1, 0,
    1,  0, 0
};
static const float3x3 hNW =
{
    1,  0, 0,
    0, -1, 0,
    0,  0, 0
};

VertexShaderOutput VertexShaderFunction(VertexShaderInput vsInput)
{
    //VertexShaderOutput output;

    //output.Position = vsInput.Position;
    //output.TextureCoordinate = vsInput.TextureCoordinate;

    VertexShaderOutput output;
    vsInput.Position.x =  vsInput.Position.x - 2*halfPixel.x;
    vsInput.Position.y =  vsInput.Position.y + 2*halfPixel.y;
    output.Position = vsInput.Position;
    output.TextureCoordinate = vsInput.TextureCoordinate;
    return output;

    //return output;
}

float Convolution(VertexShaderOutput input, float3x3 kernel)
{
    //float4 pixel = float4(0.0f, 0.0f, 0.0f, 0.0f);
    float pixel = 0.0L;

    for (int i = -1; i <= 1; ++i)
    {
        for (int j = -1; j <= 1; ++j)
        {
            pixel += (kernel[i+1][j+1] * tex2D(Input0Sampler, input.TextureCoordinate + float2(i,j))).x;
        };
    };

    return pixel;
}

PixelShaderOutput PixelShaderFunction(VertexShaderOutput psInput)
{       
    PixelShaderOutput output;
    //output.Index0 = float4(0,0,0,0);//tex2D(Input0Sampler, psInput.TextureCoordinate);
    //output.Index0 = tex2D(Input0Sampler, psInput.TextureCoordinate);

    float big = 100000.0f;
    float total = tex2D(Input0Sampler, psInput.TextureCoordinate);
    float dx, dy, dd;
    dx = 1; dy = 1; dd = pow(2.0L, 0.5L);
    float delta_t = 1/7;//0.25f;
    float k = 30.0;//0.015f;

    float nablaN = Convolution(psInput, hN);
    float nablaS = Convolution(psInput, hS);
    float nablaW = Convolution(psInput, hW);
    float nablaE = Convolution(psInput, hE);
    float nablaNE = Convolution(psInput, hNE);
    float nablaSE = Convolution(psInput, hSE);
    float nablaSW = Convolution(psInput, hSW);
    float nablaNW = Convolution(psInput, hNW);


    float cN  = ( 1 + (nablaN / k)) * ( 1 + (nablaN / k));
    float cS  = ( 1 + (nablaS / k)) * ( 1 + (nablaS / k));
    float cW  = ( 1 + (nablaW / k)) * ( 1 + (nablaW / k));
    float cE  = ( 1 + (nablaE / k)) * ( 1 + (nablaE / k));
    float cNE = ( 1 + (nablaNE / k)) * ( 1 + (nablaNE / k));
    float cSE = ( 1 + (nablaSE / k)) * ( 1 + (nablaSE / k));
    float cSW = ( 1 + (nablaSW / k)) * ( 1 + (nablaSW / k));
    float cNW = ( 1 + (nablaNW / k)) * ( 1 + (nablaNW / k));

    /*  
    float4 cN  = exp(-(nablaN/k)*(nablaN/k));
    float4 cS  = exp(-(nablaS/k)*(nablaS/k));
    float4 cW  = exp(-(nablaW/k)*(nablaW/k));
    float4 cE  = exp(-(nablaE/k)*(nablaE/k));
    float4 cNE = exp(-(nablaNE/k)*(nablaNE/k));
    float4 cSE = exp(-(nablaSE/k)*(nablaSE/k));
    float4 cSW = exp(-(nablaSW/k)*(nablaSW/k));
    float4 cNW = exp(-(nablaNW/k)*(nablaNW/k));
    */

    total += delta_t * 
    (
        //(mul(cN, nablaN)/(dy*dy)) + (mul(cS, nablaS)/(dy*dy)) + (mul(cW, nablaW)/(dx*dx)) + (mul(cE, nablaE)/(dx*dx)) + (dd*dd)*(mul(cNE, nablaNE) + mul(cSE, nablaSE) + mul(cSW, nablaSW) + mul(cNW, nablaNW))
        (nablaN/cN) + (nablaS/cS) + (nablaW/cW) + (nablaE/cE) + 2*((nablaNE/cNE) + (nablaSE/cSE) + (nablaSW/cSW) + (nablaNW/cNW))
    );


    output.Index0 = total;

    return output;
}

technique PeronaMalik
{
    pass pass1
    {
        VertexShader = compile vs_3_0 VertexShaderFunction();
        PixelShader  = compile ps_3_0 PixelShaderFunction();
    }
}

开放式

float4 Convolution(__read_only image2d_t srcImg, int2 point, float * kern)
{
    const sampler_t smp = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP_TO_EDGE | CLK_FILTER_LINEAR;
    int maskSize = 1;
    float4 sum = (float4)(0.0f,0.0f,0.0f,0.0f);
    for (int i = -maskSize; i <= maskSize; i++)
    {
        for(int j = -maskSize; j <= maskSize; j++) 
        {
            int2 delta = (int2)(i+maskSize,j+maskSize);                 
            sum += kern[(delta.y*maskSize) + delta.x] * convert_float4(read_imageui(srcImg, smp, point + delta));
        }
    }
    return sum;
}


__kernel void imagingTest(__read_only  image2d_t srcImg, __write_only image2d_t dstImg)
{
    float k = 30;
    float delta_t = 1/7;

    float hN[9];
    hN[0] = 0; hN[1] = 1; hN[2] = 0;
    hN[3] = 0; hN[4] =-1; hN[5] = 0; 
    hN[6] = 0; hN[7] = 0; hN[8] = 0;

    float hS[9];
    hS[0] = 0; hS[1] = 0; hS[2] = 0;
    hS[3] = 0; hS[4] =-1; hS[5] = 0; 
    hS[6] = 0; hS[7] = 1; hS[8] = 0;

    float hE[9];
    hE[0] = 0; hE[1] = 0; hE[2] = 0;
    hE[3] = 0; hE[4] =-1; hE[5] = 1; 
    hE[6] = 0; hE[7] = 0; hE[8] = 0;

    float hW[9];
    hW[0] = 0; hW[1] = 0; hW[2] = 0;
    hW[3] = 1; hW[4] =-1; hW[5] = 0; 
    hW[6] = 0; hW[7] = 0; hW[8] = 0;

    float hNE[9];
    hNE[0] = 0; hNE[1] = 0; hNE[2] = 1;
    hNE[3] = 0; hNE[4] =-1; hNE[5] = 0; 
    hNE[6] = 0; hNE[7] = 0; hNE[8] = 0;

    float hSE[9];
    hSE[0] = 0; hSE[1] = 0; hSE[2] = 0;
    hSE[3] = 0; hSE[4] =-1; hSE[5] = 0; 
    hSE[6] = 0; hSE[7] = 0; hSE[8] = 1;

    float hSW[9];
    hSW[0] = 0; hSW[1] = 0; hSW[2] = 0;
    hSW[3] = 0; hSW[4] =-1; hSW[5] = 0; 
    hSW[6] = 1; hSW[7] = 0; hSW[8] = 0;

    float hNW[9];
    hNW[0] = 1; hNW[1] = 0; hNW[2] = 0;
    hNW[3] = 0; hNW[4] =-1; hNW[5] = 0; 
    hNW[6] = 0; hNW[7] = 0; hNW[8] = 0;

    const sampler_t smp = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP_TO_EDGE | CLK_FILTER_LINEAR;

    int2 coord = (int2)(get_global_id(0), get_global_id(1));

    uint4 bgra = read_imageui(srcImg, smp, coord); 

    float4 nablaN = Convolution(srcImg, coord, hN);
    float4 nablaS = Convolution(srcImg, coord, hS);
    float4 nablaE = Convolution(srcImg, coord, hE);
    float4 nablaW = Convolution(srcImg, coord, hW);

    float4 nablaNE = Convolution(srcImg, coord, hNE);
    float4 nablaNW = Convolution(srcImg, coord, hNW);
    float4 nablaSE = Convolution(srcImg, coord, hSE);
    float4 nablaSW = Convolution(srcImg, coord, hSW);

    float4 cN  = exp(-(nablaN/k)*(nablaN/k));
    float4 cS  = exp(-(nablaS/k)*(nablaS/k));
    float4 cW  = exp(-(nablaW/k)*(nablaW/k));
    float4 cE  = exp(-(nablaE/k)*(nablaE/k));
    float4 cNE = exp(-(nablaNE/k)*(nablaNE/k));
    float4 cSE = exp(-(nablaSE/k)*(nablaSE/k));
    float4 cSW = exp(-(nablaSW/k)*(nablaSW/k));
    float4 cNW = exp(-(nablaNW/k)*(nablaNW/k));

    float4 sum = delta_t * ((nablaN/cN) + (nablaS/cS) + (nablaW/cW) + (nablaE/cE) + 2*((nablaNE/cNE) + (nablaSE/cSE) + (nablaSW/cSW) + (nablaNW/cNW)));

    bgra.x = bgra.y = bgra.z = convert_int(sum.x);

    bgra.w = 255;
    write_imageui(dstImg, coord, bgra);
}

它在freemat中运行良好,它真的很慢。OpenCL 和 HLSL 版本非常快,但它们没有给出正确的结果。

2个回答

nablaN整数吗?因为如果是,则表达式1 / pow( 1 + (nablaN / k), 2);被认为是整数除以整数,如果nablaN大于 2,则整数将为零。这些只是 C 编程语言的规则。

看起来我已经想通了,至少对于 OpenCL ......

#ifdef cl_khr_fp64
    #pragma OPENCL EXTENSION cl_khr_fp64 : enable
#elif defined(cl_amd_fp64)
    #pragma OPENCL EXTENSION cl_amd_fp64 : enable
#else
    #error "Double precision doubleing point not supported by OpenCL implementation."
#endif

double4 Convolution(__read_only image2d_t srcImg, int2 point, double * kern)
{
    const sampler_t smp = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP_TO_EDGE | CLK_FILTER_LINEAR;
    int maskSize = 1;
    double4 sum = (double4)(0.0f,0.0f,0.0f,0.0f);
    for (int i = -maskSize; i <= maskSize; i++)
    {
        for(int j = -maskSize; j <= maskSize; j++) 
        {
            int2 delta = (int2)(i+maskSize,j+maskSize); 
            int2 pos = (int2)(i,j);
            sum += kern[(delta.y*3) + delta.x] * convert_double4(read_imageui(srcImg, smp, point + pos));
        }
    }
    return sum;
}


__kernel void imagingTest(__read_only  image2d_t srcImg, __write_only image2d_t dstImg)
{

    double k = 30.0L;
    double delta_t = 0.14285714285714285714285714285714L; // 1/7

    double hN[9];
    hN[0] = 0; hN[1] = 1; hN[2] = 0;
    hN[3] = 0; hN[4] =-1; hN[5] = 0; 
    hN[6] = 0; hN[7] = 0; hN[8] = 0;

    double hS[9];
    hS[0] = 0; hS[1] = 0; hS[2] = 0;
    hS[3] = 0; hS[4] =-1; hS[5] = 0; 
    hS[6] = 0; hS[7] = 1; hS[8] = 0;

    double hE[9];
    hE[0] = 0; hE[1] = 0; hE[2] = 0;
    hE[3] = 0; hE[4] =-1; hE[5] = 1; 
    hE[6] = 0; hE[7] = 0; hE[8] = 0;

    double hW[9];
    hW[0] = 0; hW[1] = 0; hW[2] = 0;
    hW[3] = 1; hW[4] =-1; hW[5] = 0; 
    hW[6] = 0; hW[7] = 0; hW[8] = 0;

    double hNE[9];
    hNE[0] = 0; hNE[1] = 0; hNE[2] = 1;
    hNE[3] = 0; hNE[4] =-1; hNE[5] = 0; 
    hNE[6] = 0; hNE[7] = 0; hNE[8] = 0;

    double hSE[9];
    hSE[0] = 0; hSE[1] = 0; hSE[2] = 0;
    hSE[3] = 0; hSE[4] =-1; hSE[5] = 0; 
    hSE[6] = 0; hSE[7] = 0; hSE[8] = 1;

    double hSW[9];
    hSW[0] = 0; hSW[1] = 0; hSW[2] = 0;
    hSW[3] = 0; hSW[4] =-1; hSW[5] = 0; 
    hSW[6] = 1; hSW[7] = 0; hSW[8] = 0;

    double hNW[9];
    hNW[0] = 1; hNW[1] = 0; hNW[2] = 0;
    hNW[3] = 0; hNW[4] =-1; hNW[5] = 0; 
    hNW[6] = 0; hNW[7] = 0; hNW[8] = 0;

    const sampler_t smp = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP_TO_EDGE | CLK_FILTER_LINEAR;



    int2 coord = (int2)(get_global_id(0), get_global_id(1));

    uint4 bgra = read_imageui(srcImg, smp, coord); 

    double4 nablaN = Convolution(srcImg, coord, hN);
    double4 nablaS = Convolution(srcImg, coord, hS);
    double4 nablaE = Convolution(srcImg, coord, hE);
    double4 nablaW = Convolution(srcImg, coord, hW);

    double4 nablaNE = Convolution(srcImg, coord, hNE);
    double4 nablaNW = Convolution(srcImg, coord, hNW);
    double4 nablaSE = Convolution(srcImg, coord, hSE);
    double4 nablaSW = Convolution(srcImg, coord, hSW);

    double4 cN  = exp(-(nablaN /k) * (nablaN /k));
    double4 cS  = exp(-(nablaS /k) * (nablaS /k));
    double4 cW  = exp(-(nablaW /k) * (nablaW /k));
    double4 cE  = exp(-(nablaE /k) * (nablaE /k));
    double4 cNE = exp(-(nablaNE/k) * (nablaNE/k));
    double4 cSE = exp(-(nablaSE/k) * (nablaSE/k));
    double4 cSW = exp(-(nablaSW/k) * (nablaSW/k));
    double4 cNW = exp(-(nablaNW/k) * (nablaNW/k));


    double4 sum = 0.5 * (nablaNE * cNE) + (nablaSE * cSE) + (nablaSW * cSW) + (nablaNW * cNW);
    sum += (nablaN * cN) + (nablaS * cS) + (nablaW * cW) + (nablaE * cE);
    sum *= delta_t;

    bgra.x = bgra.y = bgra.z = convert_int(sum.x);

    bgra.w = 255;
    write_imageui(dstImg, coord, bgra);
}

这似乎给出了很好的结果。我不确定这是否是 HLSL 的限制。