WebGL Read pixels from floating point render targe

2019-01-08 18:02发布

There is some confusion e.g. in terms of support levels for rendering to floating point textures in WebGL. The OES_texture_float extension does not seem to mandate it per se, as per Optional support for FLOAT textures as FBO attachments (deprecated), but it looks like some vendors went ahead and implement it. Therefore my basic understanding is that rendering to floating point textures actually works in non-ES desktop environments. I have not been able to read from the floating point render target directly though.

My question is whether there is a way to read from a floating point texture using a WebGLContext::readPixels() call and a Float32Array destination? Thanks in advance.

Attached is a script that succeeds reading from a byte texture, but fails for a float texture:

<html>
<head>
<script>
function run_test(use_float) {
    // Create canvas and context
    var canvas = document.createElement('canvas');
    document.body.appendChild(canvas);
    var gl = canvas.getContext("experimental-webgl");

    // Decide on types to user for texture
    var texType, bufferFmt;
    if (use_float) {
        texType = gl.FLOAT;
        bufferFmt = Float32Array;
    } else {
        texType = gl.UNSIGNED_BYTE;
        bufferFmt = Uint8Array;
    }

    // Query extension
    var OES_texture_float = gl.getExtension('OES_texture_float');
    if (!OES_texture_float) {
        throw new Error("No support for OES_texture_float");
    }

    // Clear
    gl.viewport(0, 0, canvas.width, canvas.height);
    gl.clearColor(1.0, 0.0, 0.0, 1.0);
    gl.clear(gl.COLOR_BUFFER_BIT);

    // Create texture
    var texture = gl.createTexture();
    gl.bindTexture(gl.TEXTURE_2D, texture);
    gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.NEAREST);
    gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.NEAREST);
    gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE);
    gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE);
    gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, 512, 512, 0, gl.RGBA, texType, null);

    // Create and attach frame buffer
    var fbo = gl.createFramebuffer();
    gl.bindFramebuffer(gl.FRAMEBUFFER, fbo);
    gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, texture, 0);
    gl.bindTexture(gl.TEXTURE_2D, null);
    if (gl.checkFramebufferStatus(gl.FRAMEBUFFER) != gl.FRAMEBUFFER_COMPLETE) {
        throw new Error("gl.checkFramebufferStatus(gl.FRAMEBUFFER) != gl.FRAMEBUFFER_COMPLETE");
    }

    // Clear
    gl.viewport(0, 0, 512, 512);
    gl.clear(gl.COLOR_BUFFER_BIT);
    var pixels = new bufferFmt(4 * 512 * 512);
    gl.readPixels(0, 0, 512, 512, gl.RGBA, texType, pixels);

    if (pixels[0] !== (use_float ? 1.0 : 255)) {
        throw new Error("pixels[0] === " + pixels[0].toString());
    }
}

function main() {
    run_test(false);
    console.log('Test passed using GL_UNSIGNED_BYTE');
    run_test(true);
    console.log('Test passed using GL_FLOAT');
}
</script>
</head>
<body onload='main()'>
</body>
</html>

标签: webgl
3条回答
2楼-- · 2019-01-08 18:27

Unfortunately it still seems that reading out RGBA components as bytes is the only way for WebGL. If you need to encode a float into a pixel value you can use the following:

In your fractal shader (GLSL/HLSL):

float shift_right (float v, float amt) { 
    v = floor(v) + 0.5; 
    return floor(v / exp2(amt)); 
}
float shift_left (float v, float amt) { 
    return floor(v * exp2(amt) + 0.5); 
}
float mask_last (float v, float bits) { 
    return mod(v, shift_left(1.0, bits)); 
}
float extract_bits (float num, float from, float to) { 
    from = floor(from + 0.5); to = floor(to + 0.5); 
    return mask_last(shift_right(num, from), to - from); 
}
vec4 encode_float (float val) { 
    if (val == 0.0) return vec4(0, 0, 0, 0); 
    float sign = val > 0.0 ? 0.0 : 1.0; 
    val = abs(val); 
    float exponent = floor(log2(val)); 
    float biased_exponent = exponent + 127.0; 
    float fraction = ((val / exp2(exponent)) - 1.0) * 8388608.0; 
    float t = biased_exponent / 2.0; 
    float last_bit_of_biased_exponent = fract(t) * 2.0; 
    float remaining_bits_of_biased_exponent = floor(t); 
    float byte4 = extract_bits(fraction, 0.0, 8.0) / 255.0; 
    float byte3 = extract_bits(fraction, 8.0, 16.0) / 255.0; 
    float byte2 = (last_bit_of_biased_exponent * 128.0 + extract_bits(fraction, 16.0, 23.0)) / 255.0; 
    float byte1 = (sign * 128.0 + remaining_bits_of_biased_exponent) / 255.0; 
    return vec4(byte4, byte3, byte2, byte1); 
}

 // (the following inside main(){}) return your float as the fragment color
 float myFloat = 420.420;
 gl_FragColor = encode_float(myFloat);

Then back on the JavaScript side, after your draw call has been made you can extract the encoded float value of each pixel with the following:

var pixels = new Uint8Array(CANVAS.width * CANVAS.height * 4);
gl.readPixels(0, 0, CANVAS.width, CANVAS.height, gl.RGBA, gl.UNSIGNED_BYTE, pixels);
pixels = new Float32Array(pixels.buffer);

// pixels now contains an array of floats, 1 float for each pixel
查看更多
冷血范
3楼-- · 2019-01-08 18:33

I'm adding my recent discoveries: Chrome will let you read floats, as part of the implementation defined format (search for "readPixels" in the spec), Firefox implements the WEBGL_color_buffer_float extension, so you can just load the extension and read your floats, I have not been able to read floats with Safari.

查看更多
一夜七次
4楼-- · 2019-01-08 18:40

The readPixels is limited to the RGBA format and the UNSIGNED_BYTE type (WebGL specification). However there are some methods for "packing" floats into RGBA/UNSIGNED_BYTE described here:

http://concord-consortium.github.io/lab/experiments/webgl-gpgpu/webgl.html

查看更多
登录 后发表回答