Spaces:
Running
Running
| // Copyright 2023 The MediaPipe Authors. | |
| // Licensed under the Apache License, Version 2.0 (the "License"); | |
| // you may not use this file except in compliance with the License. | |
| // You may obtain a copy of the License at | |
| // http://www.apache.org/licenses/LICENSE-2.0 | |
| // Unless required by applicable law or agreed to in writing, software | |
| // distributed under the License is distributed on an "AS IS" BASIS, | |
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| // See the License for the specific language governing permissions and | |
| // limitations under the License. | |
| import vision from "https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@0.10.3"; | |
| const { FaceLandmarker, FilesetResolver, DrawingUtils } = vision; | |
| const demosSection = document.getElementById("demos"); | |
| const imageBlendShapes = document.getElementById("image-blend-shapes"); | |
| const videoBlendShapes = document.getElementById("video-blend-shapes"); | |
| let faceLandmarker; | |
| let runningMode: "IMAGE" | "VIDEO" = "IMAGE"; | |
| let enableWebcamButton: HTMLButtonElement; | |
| let webcamRunning: Boolean = false; | |
| const videoWidth = 480; | |
| // Before we can use HandLandmarker class we must wait for it to finish | |
| // loading. Machine Learning models can be large and take a moment to | |
| // get everything needed to run. | |
| async function createFaceLandmarker() { | |
| const filesetResolver = await FilesetResolver.forVisionTasks( | |
| "https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@0.10.3/wasm" | |
| ); | |
| faceLandmarker = await FaceLandmarker.createFromOptions(filesetResolver, { | |
| baseOptions: { | |
| modelAssetPath: `https://storage.googleapis.com/mediapipe-models/face_landmarker/face_landmarker/float16/1/face_landmarker.task`, | |
| delegate: "GPU" | |
| }, | |
| outputFaceBlendshapes: true, | |
| runningMode, | |
| numFaces: 1 | |
| }); | |
| demosSection.classList.remove("invisible"); | |
| } | |
| createFaceLandmarker(); | |
| /******************************************************************** | |
| // Demo 1: Grab a bunch of images from the page and detection them | |
| // upon click. | |
| ********************************************************************/ | |
| // In this demo, we have put all our clickable images in divs with the | |
| // CSS class 'detectionOnClick'. Lets get all the elements that have | |
| // this class. | |
| const imageContainers = document.getElementsByClassName("detectOnClick"); | |
| // Now let's go through all of these and add a click event listener. | |
| for (let imageContainer of imageContainers) { | |
| // Add event listener to the child element whichis the img element. | |
| imageContainer.children[0].addEventListener("click", handleClick); | |
| } | |
| // When an image is clicked, let's detect it and display results! | |
| async function handleClick(event) { | |
| if (!faceLandmarker) { | |
| console.log("Wait for faceLandmarker to load before clicking!"); | |
| return; | |
| } | |
| if (runningMode === "VIDEO") { | |
| runningMode = "IMAGE"; | |
| await faceLandmarker.setOptions({ runningMode }); | |
| } | |
| // Remove all landmarks drawed before | |
| const allCanvas = event.target.parentNode.getElementsByClassName("canvas"); | |
| for (var i = allCanvas.length - 1; i >= 0; i--) { | |
| const n = allCanvas[i]; | |
| n.parentNode.removeChild(n); | |
| } | |
| // We can call faceLandmarker.detect as many times as we like with | |
| // different image data each time. This returns a promise | |
| // which we wait to complete and then call a function to | |
| // print out the results of the prediction. | |
| const faceLandmarkerResult = faceLandmarker.detect(event.target); | |
| const canvas = document.createElement("canvas") as HTMLCanvasElement; | |
| canvas.setAttribute("class", "canvas"); | |
| canvas.setAttribute("width", event.target.naturalWidth + "px"); | |
| canvas.setAttribute("height", event.target.naturalHeight + "px"); | |
| canvas.style.left = "0px"; | |
| canvas.style.top = "0px"; | |
| canvas.style.width = `${event.target.width}px`; | |
| canvas.style.height = `${event.target.height}px`; | |
| event.target.parentNode.appendChild(canvas); | |
| const ctx = canvas.getContext("2d"); | |
| const drawingUtils = new DrawingUtils(ctx); | |
| for (const landmarks of faceLandmarkerResult.faceLandmarks) { | |
| drawingUtils.drawConnectors( | |
| landmarks, | |
| FaceLandmarker.FACE_LANDMARKS_TESSELATION, | |
| { color: "#C0C0C070", lineWidth: 1 } | |
| ); | |
| drawingUtils.drawConnectors( | |
| landmarks, | |
| FaceLandmarker.FACE_LANDMARKS_RIGHT_EYE, | |
| { color: "#FF3030" } | |
| ); | |
| drawingUtils.drawConnectors( | |
| landmarks, | |
| FaceLandmarker.FACE_LANDMARKS_RIGHT_EYEBROW, | |
| { color: "#FF3030" } | |
| ); | |
| drawingUtils.drawConnectors( | |
| landmarks, | |
| FaceLandmarker.FACE_LANDMARKS_LEFT_EYE, | |
| { color: "#30FF30" } | |
| ); | |
| drawingUtils.drawConnectors( | |
| landmarks, | |
| FaceLandmarker.FACE_LANDMARKS_LEFT_EYEBROW, | |
| { color: "#30FF30" } | |
| ); | |
| drawingUtils.drawConnectors( | |
| landmarks, | |
| FaceLandmarker.FACE_LANDMARKS_FACE_OVAL, | |
| { color: "#E0E0E0" } | |
| ); | |
| drawingUtils.drawConnectors(landmarks, FaceLandmarker.FACE_LANDMARKS_LIPS, { | |
| color: "#E0E0E0" | |
| }); | |
| drawingUtils.drawConnectors( | |
| landmarks, | |
| FaceLandmarker.FACE_LANDMARKS_RIGHT_IRIS, | |
| { color: "#FF3030" } | |
| ); | |
| drawingUtils.drawConnectors( | |
| landmarks, | |
| FaceLandmarker.FACE_LANDMARKS_LEFT_IRIS, | |
| { color: "#30FF30" } | |
| ); | |
| } | |
| drawBlendShapes(imageBlendShapes, faceLandmarkerResult.faceBlendshapes); | |
| } | |
| /******************************************************************** | |
| // Demo 2: Continuously grab image from webcam stream and detect it. | |
| ********************************************************************/ | |
| const video = document.getElementById("webcam") as HTMLVideoElement; | |
| const canvasElement = document.getElementById( | |
| "output_canvas" | |
| ) as HTMLCanvasElement; | |
| const canvasCtx = canvasElement.getContext("2d"); | |
| // Check if webcam access is supported. | |
| function hasGetUserMedia() { | |
| return !!(navigator.mediaDevices && navigator.mediaDevices.getUserMedia); | |
| } | |
| // If webcam supported, add event listener to button for when user | |
| // wants to activate it. | |
| if (hasGetUserMedia()) { | |
| enableWebcamButton = document.getElementById( | |
| "webcamButton" | |
| ) as HTMLButtonElement; | |
| enableWebcamButton.addEventListener("click", enableCam); | |
| } else { | |
| console.warn("getUserMedia() is not supported by your browser"); | |
| } | |
| // Enable the live webcam view and start detection. | |
| function enableCam(event) { | |
| if (!faceLandmarker) { | |
| console.log("Wait! faceLandmarker not loaded yet."); | |
| return; | |
| } | |
| if (webcamRunning === true) { | |
| webcamRunning = false; | |
| enableWebcamButton.innerText = "ENABLE PREDICTIONS"; | |
| } else { | |
| webcamRunning = true; | |
| enableWebcamButton.innerText = "DISABLE PREDICTIONS"; | |
| } | |
| // getUsermedia parameters. | |
| const constraints = { | |
| video: true | |
| }; | |
| // Activate the webcam stream. | |
| navigator.mediaDevices.getUserMedia(constraints).then((stream) => { | |
| video.srcObject = stream; | |
| video.addEventListener("loadeddata", predictWebcam); | |
| }); | |
| } | |
| let lastVideoTime = -1; | |
| let results = undefined; | |
| const drawingUtils = new DrawingUtils(canvasCtx); | |
| async function predictWebcam() { | |
| const radio = video.videoHeight / video.videoWidth; | |
| video.style.width = videoWidth + "px"; | |
| video.style.height = videoWidth * radio + "px"; | |
| canvasElement.style.width = videoWidth + "px"; | |
| canvasElement.style.height = videoWidth * radio + "px"; | |
| canvasElement.width = video.videoWidth; | |
| canvasElement.height = video.videoHeight; | |
| // Now let's start detecting the stream. | |
| if (runningMode === "IMAGE") { | |
| runningMode = "VIDEO"; | |
| await faceLandmarker.setOptions({ runningMode: runningMode }); | |
| } | |
| let startTimeMs = performance.now(); | |
| if (lastVideoTime !== video.currentTime) { | |
| lastVideoTime = video.currentTime; | |
| results = faceLandmarker.detectForVideo(video, startTimeMs); | |
| } | |
| if (results.faceLandmarks) { | |
| for (const landmarks of results.faceLandmarks) { | |
| drawingUtils.drawConnectors( | |
| landmarks, | |
| FaceLandmarker.FACE_LANDMARKS_TESSELATION, | |
| { color: "#C0C0C070", lineWidth: 1 } | |
| ); | |
| drawingUtils.drawConnectors( | |
| landmarks, | |
| FaceLandmarker.FACE_LANDMARKS_RIGHT_EYE, | |
| { color: "#FF3030" } | |
| ); | |
| drawingUtils.drawConnectors( | |
| landmarks, | |
| FaceLandmarker.FACE_LANDMARKS_RIGHT_EYEBROW, | |
| { color: "#FF3030" } | |
| ); | |
| drawingUtils.drawConnectors( | |
| landmarks, | |
| FaceLandmarker.FACE_LANDMARKS_LEFT_EYE, | |
| { color: "#30FF30" } | |
| ); | |
| drawingUtils.drawConnectors( | |
| landmarks, | |
| FaceLandmarker.FACE_LANDMARKS_LEFT_EYEBROW, | |
| { color: "#30FF30" } | |
| ); | |
| drawingUtils.drawConnectors( | |
| landmarks, | |
| FaceLandmarker.FACE_LANDMARKS_FACE_OVAL, | |
| { color: "#E0E0E0" } | |
| ); | |
| drawingUtils.drawConnectors( | |
| landmarks, | |
| FaceLandmarker.FACE_LANDMARKS_LIPS, | |
| { color: "#E0E0E0" } | |
| ); | |
| drawingUtils.drawConnectors( | |
| landmarks, | |
| FaceLandmarker.FACE_LANDMARKS_RIGHT_IRIS, | |
| { color: "#FF3030" } | |
| ); | |
| drawingUtils.drawConnectors( | |
| landmarks, | |
| FaceLandmarker.FACE_LANDMARKS_LEFT_IRIS, | |
| { color: "#30FF30" } | |
| ); | |
| } | |
| } | |
| drawBlendShapes(videoBlendShapes, results.faceBlendshapes); | |
| // Call this function again to keep predicting when the browser is ready. | |
| if (webcamRunning === true) { | |
| window.requestAnimationFrame(predictWebcam); | |
| } | |
| } | |
| function drawBlendShapes(el: HTMLElement, blendShapes: any[]) { | |
| if (!blendShapes.length) { | |
| return; | |
| } | |
| console.log(blendShapes[0]); | |
| let htmlMaker = ""; | |
| blendShapes[0].categories.map((shape) => { | |
| htmlMaker += ` | |
| <li class="blend-shapes-item"> | |
| <span class="blend-shapes-label">${ | |
| shape.displayName || shape.categoryName | |
| }</span> | |
| <span class="blend-shapes-value" style="width: calc(${ | |
| +shape.score * 100 | |
| }% - 120px)">${(+shape.score).toFixed(4)}</span> | |
| </li> | |
| `; | |
| }); | |
| el.innerHTML = htmlMaker; | |
| } | |