Compiled with problems:
ERROR in
./src/pages/Yoga/Yoga.js 108:56-88
export
'Supporteds' (imported as 'poseDetection') was not found in '@tensorflow-models/pose-detection' (possible exports: SupportedModels, TrackerType, calculators, createDetector, movenet, util)
My code looks like this :
import * as poseDetection from '@tensorflow-models/pose-detection';
import * as tf from '@tensorflow/tfjs';
import React, { useRef, useState, useEffect } from 'react'
//import backend from '@tensorflow/tfjs-backend-webgl'
import Webcam from 'react-webcam'
import { count } from '../../utils/music';
import Instructions from '../../components/Instructions/Instructions';
import './Yoga.css'
import DropDown from '../../components/DropDown/DropDown';
import { poseImages } from '../../utils/pose_images';
import { POINTS, keypointConnections } from '../../utils/data';
import { drawPoint, drawSegment } from '../../utils/helper'
let skeletonColor = 'rgb(255,255,255)'
let poseList = [
'boat','camel','cow','downdog','eagle','fish','garland','halfmoon','peacock','plank','plow','sitting','tree','warrior2'
]
let interval
// flag variable is used to help capture the time when AI just detect
// the pose as correct(probability more than threshold)
let flag = false
function Yoga() {
const webcamRef = useRef(null)
const canvasRef = useRef(null)
const [startingTime, setStartingTime] = useState(0)
const [currentTime, setCurrentTime] = useState(0)
const [poseTime, setPoseTime] = useState(0)
const [bestPerform, setBestPerform] = useState(0)
const [currentPose, setCurrentPose] = useState('tree')
const [isStartPose, setIsStartPose] = useState(false)
useEffect(() => {
const timeDiff = (currentTime - startingTime)/1000
if(flag) {
setPoseTime(timeDiff)
}
if((currentTime - startingTime)/1000 > bestPerform) {
setBestPerform(timeDiff)
}
}, [bestPerform, currentTime, startingTime])
useEffect(() => {
setCurrentTime(0)
setPoseTime(0)
setBestPerform(0)
}, [currentPose])
const CLASS_NO = {
boat: 0,
camel: 1,
cow: 2,
downdog: 3,
eagle: 4,
fish: 5,
garland: 6,
goddess:7,
halfmoon:8,
peacock:9,
plank:10,
plow:11,
sitting:12,
tree: 13,
warrior:14,
}
function get_center_point(landmarks, left_bodypart, right_bodypart) {
let left = tf.gather(landmarks, left_bodypart, 1)
let right = tf.gather(landmarks, right_bodypart, 1)
const center = tf.add(tf.mul(left, 0.5), tf.mul(right, 0.5))
return center
}
function get_pose_size(landmarks, torso_size_multiplier=2.5) {
let hips_center = get_center_point(landmarks, POINTS.LEFT_HIP, POINTS.RIGHT_HIP)
let shoulders_center = get_center_point(landmarks,POINTS.LEFT_SHOULDER, POINTS.RIGHT_SHOULDER)
let torso_size = tf.norm(tf.sub(shoulders_center, hips_center))
let pose_center_new = get_center_point(landmarks, POINTS.LEFT_HIP, POINTS.RIGHT_HIP)
pose_center_new = tf.expandDims(pose_center_new, 1)
pose_center_new = tf.broadcastTo(pose_center_new,
[1, 17, 2]
)
// return: shape(17,2)
let d = tf.gather(tf.sub(landmarks, pose_center_new), 0, 0)
let max_dist = tf.max(tf.norm(d,'euclidean', 0))
// normalize scale
let pose_size = tf.maximum(tf.mul(torso_size, torso_size_multiplier), max_dist)
return pose_size
}
function normalize_pose_landmarks(landmarks) {
let pose_center = get_center_point(landmarks, POINTS.LEFT_HIP, POINTS.RIGHT_HIP)
pose_center = tf.expandDims(pose_center, 1)
pose_center = tf.broadcastTo(pose_center,
[1, 17, 2]
)
landmarks = tf.sub(landmarks, pose_center)
let pose_size = get_pose_size(landmarks)
landmarks = tf.div(landmarks, pose_size)
return landmarks
}
function landmarks_to_embedding(landmarks) {
// normalize landmarks 2D
landmarks = normalize_pose_landmarks(tf.expandDims(landmarks, 0))
let embedding = tf.reshape(landmarks, [1,34])
return embedding
}
const runMovenet = async () => {
const detectorConfig = {modelType: poseDetection.movenet.modelType.SINGLEPOSE_THUNDER};
const detector = await poseDetection.createDetector(poseDetection.Supporteds.MoveNet, detectorConfig);
const poseClassifier = await tf.loadLayersModel('https://models.s3.jp-tok.cloud-object-storage.appdomain.cloud/model.json')
const countAudio = new Audio(count)
countAudio.loop = true
interval = setInterval(() => {
detectPose(detector, poseClassifier, countAudio)
}, 100)
}
const detectPose = async (detector, poseClassifier, countAudio) => {
if (
typeof webcamRef.current !== "undefined" &&
webcamRef.current !== null &&
webcamRef.current.video.readyState === 4
) {
let notDetected = 0
const video = webcamRef.current.video
const pose = await detector.estimatePoses(video)
const ctx = canvasRef.current.getContext('2d')
ctx.clearRect(0, 0, canvasRef.current.width, canvasRef.current.height);
try {
const keypoints = pose[0].keypoints
let input = keypoints.map((keypoint) => {
if(keypoint.score > 0.4) {
if(!(keypoint.name === 'left_eye' || keypoint.name === 'right_eye')) {
drawPoint(ctx, keypoint.x, keypoint.y, 8, 'rgb(255,255,255)')
let connections = keypointConnections[keypoint.name]
try {
connections.forEach((connection) => {
let conName = connection.toUpperCase()
drawSegment(ctx, [keypoint.x, keypoint.y],
[keypoints[POINTS[conName]].x,
keypoints[POINTS[conName]].y]
, skeletonColor)
})
} catch(err) {
}
}
} else {
notDetected += 1
}
return [keypoint.x, keypoint.y]
})
if(notDetected > 4) {
skeletonColor = 'rgb(255,255,255)'
return
}
const processedInput = landmarks_to_embedding(input)
const classification = poseClassifier.predict(processedInput)
classification.array().then((data) => {
const classNo = CLASS_NO[currentPose]
console.log(data[0][classNo])
if(data[0][classNo] > 0.97) {
if(!flag) {
countAudio.play()
setStartingTime(new Date(Date()).getTime())
flag = true
}
setCurrentTime(new Date(Date()).getTime())
skeletonColor = 'rgb(0,255,0)'
} else {
flag = false
skeletonColor = 'rgb(255,255,255)'
countAudio.pause()
countAudio.currentTime = 0
}
})
} catch(err) {
console.log(err)
}
}
}
function startYoga(){
setIsStartPose(true)
runMovenet()
}
function stopPose() {
setIsStartPose(false)
clearInterval(interval)
}
if(isStartPose) {
return (
<div className="yoga-container">
<div className="performance-container">
<div className="pose-performance">
<h4>Pose Time: {poseTime} s</h4>
</div>
<div className="pose-performance">
<h4>Best: {bestPerform} s</h4>
</div>
</div>
<div>
<Webcam
width='640px'
height='480px'
id="webcam"
ref={webcamRef}
style={{
position: 'absolute',
left: 120,
top: 100,
padding: '0px',
}}
/>
<canvas
ref={canvasRef}
id="my-canvas"
width='640px'
height='480px'
style={{
position: 'absolute',
left: 120,
top: 100,
zIndex: 1
}}
>
</canvas>
<div>
<img
src={poseImages[currentPose]}
className="pose-img"
alt="" />
</div>
</div>
<button
onClick={stopPose}
className="secondary-btn"
>Stop Pose</button>
</div>
)
}
return (
<div
className="yoga-container"
>
<DropDown
poseList={poseList}
currentPose={currentPose}
setCurrentPose={setCurrentPose}
/>
<Instructions
currentPose={currentPose}
/>
<button
onClick={startYoga}
className="secondary-btn"
>Start Pose</button>
</div>
)
}
export default Yoga;
My package.json looks like this :
{
"name": "yoga_guru",
"version": "0.1.0",
"private": true,
"dependencies": {
"@tensorflow-models/pose-detection": "^2.0.0",
"@tensorflow/tfjs-converter": "^4.1.0",
"@tensorflow/tfjs-core": "^4.1.0",
"@testing-library/jest-dom": "^5.16.5",
"@testing-library/react": "^13.4.0",
"@testing-library/user-event": "^13.5.0",
"react": "^18.2.0",
"react-dom": "^18.2.0",
"react-scripts": "5.0.1",
"web-vitals": "^2.1.4"
},
"scripts": {
"start": "react-scripts start",
"build": "react-scripts build",
"test": "react-scripts test",
"eject": "react-scripts eject"
},
"eslintConfig": {
"extends": [
"react-app",
"react-app/jest"
]
},
"browserslist": {
"production": [
">0.2%",
"not dead",
"not op_mini all"
],
"development": [
"last 1 chrome version",
"last 1 firefox version",
"last 1 safari version"
]
}
}
Any suggestions for how to resolve that error?