import { useEffect, useState } from "react";
import NavBar from "../../components/navBar";
import { Button } from '@chakra-ui/react'
import { FilesetResolver, PoseLandmarker } from "@mediapipe/tasks-vision";
// import model from "resolve360_admin/app/shared/models/pose_landmarker_lite.task"
import { Canvas, useGraph} from "@react-three/fiber";
import { useGLTF } from "@react-three/drei";
function ExerciseAddition() {
let video
let poseLandmarker
let lastVideoTime = -1
const [enablePrediction, setEnablePrediction] = useState(true)
const setup = async () => {
const vision = await FilesetResolver.forVisionTasks(
"https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@latest/wasm"
);
poseLandmarker = await PoseLandmarker.createFromOptions(
vision,
{
baseOptions: {
modelAssetPath: "https://storage.googleapis.com/mediapipe-models/pose_landmarker/pose_landmarker_full/float16/1/pose_landmarker_full.task",
delegate: "GPU"
},
runningMode: "VIDEO",
outputSegmentationMasks: true
});
video = document.getElementById('video');
navigator.mediaDevices.getUserMedia({ video: { width: 1280, height: 720 } })
.then((stream) => {
video.srcObject = stream;
video.addEventListener("loadeddata", predict)
})
.catch((err) => {
console.error(err);
});
}
const predict = () => {
const nowInMs = performance.now();
if (lastVideoTime !== video.currentTime) {
lastVideoTime = video.currentTime
const result = poseLandmarker.detectForVideo(video, nowInMs)
console.log(result)
}
requestAnimationFrame(predict)
}
useEffect(() => {
setup();
}, []);
console.log("Hello from ExerciseAddition");
return (
<div className="bg-white flex flex-col justify-center items-center w-screen">
<NavBar />
<div className="p-4 bg-white gap-10">
<video autoPlay id="video" className="h-2/3 w-2/3 mb-1" />
<Button>Click on me!</Button>
<Canvas>
<ambientLight intensity={0.5} />
<pointLight position={[1, 1, 1]} intensity={0.5} color={[1,0,0]} />
<pointLight position={[-1, 0, 1]} intensity={0.5} color={[0,1,0]} />
<Avatar />
</Canvas>
</div>
</div>
);
}
function Avatar () {
const avatar = useGLTF("https://models.readyplayer.me/66543fffe9a54d63948effa9.glb?morphTargets=ARKit")
const {nodes} = useGraph(avatar.scene)
return(
<primitive object={nodes.Avatar} />
)
}
export default ExerciseAddition
OS : MacOS Sonoma
Running this for 3 to 4 mins results in my mac throwing “Device has run out of application memory”
On looking into the console carefully this was one of the warnings:
You seem to be creating MPMask instances without invoking .close(). This leaks resources
I am looking for the part which is taking so much memory
New contributor
Kalyan Tarafdar is a new contributor to this site. Take care in asking for clarification, commenting, and answering.
Check out our Code of Conduct.