I am experimenting whith webgpu
. I found out that transferring data between cpu and gpu can be quite slow.
My goal is to simulate particle physics on the gpu using webgpu’s compute shaders, then use the ease of use of Three.js
or Babylon.js
to display the result.
Currently, my simulation works, but I transfer the data from and to the GPU every frame, for computing the new position, and then for displaying using Three.js, as you can see in the code below.
import { Cloth } from "./physim.js";
import { OrbitControls } from 'three/addons/controls/OrbitControls.js';
import { InitDevice } from './webgpu/webgpu.js';
import * as THREE from 'three';
import WebGPURenderer from 'three/examples/jsm/renderers/webgpu/WebGPURenderer.js';
export async function Simulate() {
const gpu = await InitDevice();
const cloth = new Cloth(gpu);
const width = window.innerWidth * 0.9;
const height = window.innerHeight * 0.9;
const scene = new THREE.Scene();
const camera = new THREE.PerspectiveCamera(75, width / height, 0.1, 1000);
const renderer = new WebGPURenderer({
antialias: true,
forceWebGL: false,
});
renderer.setSize(width, height);
document.body.appendChild(renderer.domElement);
const controls = new OrbitControls(camera, renderer.domElement);
const geometry = new THREE.BufferGeometry();
// Get a Float32Array reference representing the vertex positions
const vertices = cloth.GetPositions();
// Get a Uint32Array reference representing the vertex indices
const indices = cloth.GetVertexIndices();
geometry.setIndex(indices);
geometry.setAttribute('position', new THREE.BufferAttribute(vertices, 4));
const material = new THREE.MeshBasicMaterial({ color: 0xff0000, wireframe: true });
const mesh = new THREE.Mesh(geometry, material);
scene.add(mesh);
// Calculate the bounding box of the mesh
const box = new THREE.Box3().setFromObject(mesh);
// Get the center of the bounding box
const center = box.getCenter(new THREE.Vector3());
// Set the camera's position to the center of the bounding box
camera.position.copy(center);
// Move the camera back so the object isn't too large in the view
camera.position.z += 5;
// Make the controls look at the center of the bounding box
controls.target.copy(center);
async function animate() {
// uodate the vertices positions buffer. It is quite slow because the buffer is transffered to and from the GPU every frame
await cloth.Update(0.001);
geometry.getAttribute("position").needsUpdate = true;
await renderer.render(scene, camera);
controls.update();
requestAnimationFrame(animate);
}
await animate()
}
Is there a way to tell Three.js or Babylon.js to use a buffer (as vertex positions) that is already on the GPU, in order to maximize performance? I would like to keep using wgsl
shaders instead of library specific languages for the computing part.
In other terms, specifically for Three.js, can I use a GPUBuffer as a BufferAttribute?