I’m a beginner in WebGPU and want to process an image with multiple processing stages in separate shader modules:
- Desaturation
- Edge detection
- Compression
Step 4 then computes the compressed texture and converts it into ASCII.
I’ve accomplished this in Three.js by running multiple render passes in an EffectComposer, but I’m unsure how to do this in WebGPU. My assumption is that I need to specify an output somewhere, and use that as a binding for the texture.
How do I “link” the resulting textures and throw them into the next shader module? Is this even the right approach? Is it a bad idea to separate shaders based on smaller functionalities?
Here’s the code that I’m currently working with:
// SETUP
if (!navigator.gpu) {
throw new Error('WebGPU not supported on this browser :(');
}
const adapter = await navigator.gpu.requestAdapter();
if (!adapter) {
throw new Error('No appropriate GPUAdapter found :(')
}
const device = await adapter.requestDevice();
const canvas = document.querySelector('canvas');
const context = canvas.getContext('webgpu');
const canvasFormat = navigator.gpu.getPreferredCanvasFormat();
context.configure({
device: device,
format: canvasFormat,
});
// DO STUFF!
// IMAGE -> TEXTURE for the sampler
const url = './someImg.jpg'
async function loadImageBitmap(url) {
const res = await fetch(url);
const blob = await res.blob();
return await createImageBitmap(blob, { colorSpaceConversion: 'none' });
}
const source = await loadImageBitmap(url);
canvas.style.width = source.width + 'px' // adjusts the canvas based on img resolution
canvas.style.height = source.height + 'px'
canvas.width = source.width
canvas.height = source.height
// texture
const texture = device.createTexture({
label: 'imgTexture',
format: 'rgba8unorm',
size: [source.width, source.height],
usage:
GPUTextureUsage.TEXTURE_BINDING |
GPUTextureUsage.COPY_DST |
GPUTextureUsage.RENDER_ATTACHMENT,
})
device.queue.copyExternalImageToTexture(
{ source, flipY: true },
{ texture },
{ width: source.width, height: source.height },
);
// SHADER #1 (desaturation)
// module
const module = device.createShaderModule({
label: 'monochrome filter shader module',
code: monoFilter, // WGSL file
});
// render pipeline
const pipeline = device.createRenderPipeline({
label: 'monoFilter render pipeline',
layout: 'auto',
vertex: {
module,
targets: [{ format: canvasFormat }],
},
fragment: {
module,
targets: [{ format: canvasFormat }],
}
})
// sampler
const sampler = device.createSampler({
magFilter: 'linear',
minFilter: 'linear',
})
// resolution buffer (vec2(x,y) for the shader)
const resolutionArray = new Float32Array([source.width, source.height])
const resolutionBuffer = device.createBuffer({
label: 'resolution buffer',
size: resolutionArray.byteLength,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST,
})
device.queue.writeBuffer(resolutionBuffer, 0, resolutionArray)
// bindgroup
const bindGroup = device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: sampler },
{ binding: 1, resource: texture.createView() },
{ binding: 2, resource: { buffer: resolutionBuffer } }
]
})
// SHADER #2 (edge detection)
// module, pipeline, bindgroup ...
// CREATE AND DRAW RENDER PASS
function render() {
const encoder = device.createCommandEncoder({
label: 'render quad encoder',
});
const pass = encoder.beginRenderPass({
colorAttachments: [{
view: context.getCurrentTexture().createView(),
clearValue: [0.2, 0.0, 0.3, 1.0],
loadOp: 'clear',
storeOp: 'store',
}],
});
pass.setPipeline(pipeline);
pass.setBindGroup(0, bindGroup);
pass.draw(6);
pass.end();
// render pass for the next processing step ...
device.queue.submit([encoder.finish()]);
}
render()
Most of this I learned from a tutorial at https://codelabs.developers.google.com/your-first-webgpu-app#0
For some background, I’ve accomplished a single-pass render with a WGSL shader with custom bindings for the sampler, texture and other arbitrary array buffers. Other than that, I’m still learning the fundamentals of the API.
Gira is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.