Arsooo commited on
Commit
416e35c
·
1 Parent(s): 79c3ef9

Create script.js

Browse files
Files changed (1) hide show
  1. script.js +300 -0
script.js ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2023 The MediaPipe Authors.
2
+
3
+ // Licensed under the Apache License, Version 2.0 (the "License");
4
+ // you may not use this file except in compliance with the License.
5
+ // You may obtain a copy of the License at
6
+
7
+ // http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ // Unless required by applicable law or agreed to in writing, software
10
+ // distributed under the License is distributed on an "AS IS" BASIS,
11
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ // See the License for the specific language governing permissions and
13
+ // limitations under the License.
14
+
15
+ import vision from "https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@0.10.3";
16
+ const { FaceLandmarker, FilesetResolver, DrawingUtils } = vision;
17
+ const demosSection = document.getElementById("demos");
18
+ const imageBlendShapes = document.getElementById("image-blend-shapes");
19
+ const videoBlendShapes = document.getElementById("video-blend-shapes");
20
+
21
+ let faceLandmarker;
22
+ let runningMode: "IMAGE" | "VIDEO" = "IMAGE";
23
+ let enableWebcamButton: HTMLButtonElement;
24
+ let webcamRunning: Boolean = false;
25
+ const videoWidth = 480;
26
+
27
+ // Before we can use HandLandmarker class we must wait for it to finish
28
+ // loading. Machine Learning models can be large and take a moment to
29
+ // get everything needed to run.
30
+ async function createFaceLandmarker() {
31
+ const filesetResolver = await FilesetResolver.forVisionTasks(
32
+ "https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@0.10.3/wasm"
33
+ );
34
+ faceLandmarker = await FaceLandmarker.createFromOptions(filesetResolver, {
35
+ baseOptions: {
36
+ modelAssetPath: `https://storage.googleapis.com/mediapipe-models/face_landmarker/face_landmarker/float16/1/face_landmarker.task`,
37
+ delegate: "GPU"
38
+ },
39
+ outputFaceBlendshapes: true,
40
+ runningMode,
41
+ numFaces: 1
42
+ });
43
+ demosSection.classList.remove("invisible");
44
+ }
45
+ createFaceLandmarker();
46
+
47
+ /********************************************************************
48
+ // Demo 1: Grab a bunch of images from the page and detection them
49
+ // upon click.
50
+ ********************************************************************/
51
+
52
+ // In this demo, we have put all our clickable images in divs with the
53
+ // CSS class 'detectionOnClick'. Lets get all the elements that have
54
+ // this class.
55
+ const imageContainers = document.getElementsByClassName("detectOnClick");
56
+
57
+ // Now let's go through all of these and add a click event listener.
58
+ for (let imageContainer of imageContainers) {
59
+ // Add event listener to the child element whichis the img element.
60
+ imageContainer.children[0].addEventListener("click", handleClick);
61
+ }
62
+
63
+ // When an image is clicked, let's detect it and display results!
64
+ async function handleClick(event) {
65
+ if (!faceLandmarker) {
66
+ console.log("Wait for faceLandmarker to load before clicking!");
67
+ return;
68
+ }
69
+
70
+ if (runningMode === "VIDEO") {
71
+ runningMode = "IMAGE";
72
+ await faceLandmarker.setOptions({ runningMode });
73
+ }
74
+ // Remove all landmarks drawed before
75
+ const allCanvas = event.target.parentNode.getElementsByClassName("canvas");
76
+ for (var i = allCanvas.length - 1; i >= 0; i--) {
77
+ const n = allCanvas[i];
78
+ n.parentNode.removeChild(n);
79
+ }
80
+
81
+ // We can call faceLandmarker.detect as many times as we like with
82
+ // different image data each time. This returns a promise
83
+ // which we wait to complete and then call a function to
84
+ // print out the results of the prediction.
85
+ const faceLandmarkerResult = faceLandmarker.detect(event.target);
86
+ const canvas = document.createElement("canvas") as HTMLCanvasElement;
87
+ canvas.setAttribute("class", "canvas");
88
+ canvas.setAttribute("width", event.target.naturalWidth + "px");
89
+ canvas.setAttribute("height", event.target.naturalHeight + "px");
90
+ canvas.style.left = "0px";
91
+ canvas.style.top = "0px";
92
+ canvas.style.width = `${event.target.width}px`;
93
+ canvas.style.height = `${event.target.height}px`;
94
+
95
+ event.target.parentNode.appendChild(canvas);
96
+ const ctx = canvas.getContext("2d");
97
+ const drawingUtils = new DrawingUtils(ctx);
98
+ for (const landmarks of faceLandmarkerResult.faceLandmarks) {
99
+ drawingUtils.drawConnectors(
100
+ landmarks,
101
+ FaceLandmarker.FACE_LANDMARKS_TESSELATION,
102
+ { color: "#C0C0C070", lineWidth: 1 }
103
+ );
104
+ drawingUtils.drawConnectors(
105
+ landmarks,
106
+ FaceLandmarker.FACE_LANDMARKS_RIGHT_EYE,
107
+ { color: "#FF3030" }
108
+ );
109
+ drawingUtils.drawConnectors(
110
+ landmarks,
111
+ FaceLandmarker.FACE_LANDMARKS_RIGHT_EYEBROW,
112
+ { color: "#FF3030" }
113
+ );
114
+ drawingUtils.drawConnectors(
115
+ landmarks,
116
+ FaceLandmarker.FACE_LANDMARKS_LEFT_EYE,
117
+ { color: "#30FF30" }
118
+ );
119
+ drawingUtils.drawConnectors(
120
+ landmarks,
121
+ FaceLandmarker.FACE_LANDMARKS_LEFT_EYEBROW,
122
+ { color: "#30FF30" }
123
+ );
124
+ drawingUtils.drawConnectors(
125
+ landmarks,
126
+ FaceLandmarker.FACE_LANDMARKS_FACE_OVAL,
127
+ { color: "#E0E0E0" }
128
+ );
129
+ drawingUtils.drawConnectors(landmarks, FaceLandmarker.FACE_LANDMARKS_LIPS, {
130
+ color: "#E0E0E0"
131
+ });
132
+ drawingUtils.drawConnectors(
133
+ landmarks,
134
+ FaceLandmarker.FACE_LANDMARKS_RIGHT_IRIS,
135
+ { color: "#FF3030" }
136
+ );
137
+ drawingUtils.drawConnectors(
138
+ landmarks,
139
+ FaceLandmarker.FACE_LANDMARKS_LEFT_IRIS,
140
+ { color: "#30FF30" }
141
+ );
142
+ }
143
+ drawBlendShapes(imageBlendShapes, faceLandmarkerResult.faceBlendshapes);
144
+ }
145
+
146
+ /********************************************************************
147
+ // Demo 2: Continuously grab image from webcam stream and detect it.
148
+ ********************************************************************/
149
+
150
+ const video = document.getElementById("webcam") as HTMLVideoElement;
151
+ const canvasElement = document.getElementById(
152
+ "output_canvas"
153
+ ) as HTMLCanvasElement;
154
+
155
+ const canvasCtx = canvasElement.getContext("2d");
156
+
157
+ // Check if webcam access is supported.
158
+ function hasGetUserMedia() {
159
+ return !!(navigator.mediaDevices && navigator.mediaDevices.getUserMedia);
160
+ }
161
+
162
+ // If webcam supported, add event listener to button for when user
163
+ // wants to activate it.
164
+ if (hasGetUserMedia()) {
165
+ enableWebcamButton = document.getElementById(
166
+ "webcamButton"
167
+ ) as HTMLButtonElement;
168
+ enableWebcamButton.addEventListener("click", enableCam);
169
+ } else {
170
+ console.warn("getUserMedia() is not supported by your browser");
171
+ }
172
+
173
+ // Enable the live webcam view and start detection.
174
+ function enableCam(event) {
175
+ if (!faceLandmarker) {
176
+ console.log("Wait! faceLandmarker not loaded yet.");
177
+ return;
178
+ }
179
+
180
+ if (webcamRunning === true) {
181
+ webcamRunning = false;
182
+ enableWebcamButton.innerText = "ENABLE PREDICTIONS";
183
+ } else {
184
+ webcamRunning = true;
185
+ enableWebcamButton.innerText = "DISABLE PREDICTIONS";
186
+ }
187
+
188
+ // getUsermedia parameters.
189
+ const constraints = {
190
+ video: true
191
+ };
192
+
193
+ // Activate the webcam stream.
194
+ navigator.mediaDevices.getUserMedia(constraints).then((stream) => {
195
+ video.srcObject = stream;
196
+ video.addEventListener("loadeddata", predictWebcam);
197
+ });
198
+ }
199
+
200
+ let lastVideoTime = -1;
201
+ let results = undefined;
202
+ const drawingUtils = new DrawingUtils(canvasCtx);
203
+ async function predictWebcam() {
204
+ const radio = video.videoHeight / video.videoWidth;
205
+ video.style.width = videoWidth + "px";
206
+ video.style.height = videoWidth * radio + "px";
207
+ canvasElement.style.width = videoWidth + "px";
208
+ canvasElement.style.height = videoWidth * radio + "px";
209
+ canvasElement.width = video.videoWidth;
210
+ canvasElement.height = video.videoHeight;
211
+ // Now let's start detecting the stream.
212
+ if (runningMode === "IMAGE") {
213
+ runningMode = "VIDEO";
214
+ await faceLandmarker.setOptions({ runningMode: runningMode });
215
+ }
216
+ let startTimeMs = performance.now();
217
+ if (lastVideoTime !== video.currentTime) {
218
+ lastVideoTime = video.currentTime;
219
+ results = faceLandmarker.detectForVideo(video, startTimeMs);
220
+ }
221
+ if (results.faceLandmarks) {
222
+ for (const landmarks of results.faceLandmarks) {
223
+ drawingUtils.drawConnectors(
224
+ landmarks,
225
+ FaceLandmarker.FACE_LANDMARKS_TESSELATION,
226
+ { color: "#C0C0C070", lineWidth: 1 }
227
+ );
228
+ drawingUtils.drawConnectors(
229
+ landmarks,
230
+ FaceLandmarker.FACE_LANDMARKS_RIGHT_EYE,
231
+ { color: "#FF3030" }
232
+ );
233
+ drawingUtils.drawConnectors(
234
+ landmarks,
235
+ FaceLandmarker.FACE_LANDMARKS_RIGHT_EYEBROW,
236
+ { color: "#FF3030" }
237
+ );
238
+ drawingUtils.drawConnectors(
239
+ landmarks,
240
+ FaceLandmarker.FACE_LANDMARKS_LEFT_EYE,
241
+ { color: "#30FF30" }
242
+ );
243
+ drawingUtils.drawConnectors(
244
+ landmarks,
245
+ FaceLandmarker.FACE_LANDMARKS_LEFT_EYEBROW,
246
+ { color: "#30FF30" }
247
+ );
248
+ drawingUtils.drawConnectors(
249
+ landmarks,
250
+ FaceLandmarker.FACE_LANDMARKS_FACE_OVAL,
251
+ { color: "#E0E0E0" }
252
+ );
253
+ drawingUtils.drawConnectors(
254
+ landmarks,
255
+ FaceLandmarker.FACE_LANDMARKS_LIPS,
256
+ { color: "#E0E0E0" }
257
+ );
258
+ drawingUtils.drawConnectors(
259
+ landmarks,
260
+ FaceLandmarker.FACE_LANDMARKS_RIGHT_IRIS,
261
+ { color: "#FF3030" }
262
+ );
263
+ drawingUtils.drawConnectors(
264
+ landmarks,
265
+ FaceLandmarker.FACE_LANDMARKS_LEFT_IRIS,
266
+ { color: "#30FF30" }
267
+ );
268
+ }
269
+ }
270
+ drawBlendShapes(videoBlendShapes, results.faceBlendshapes);
271
+
272
+ // Call this function again to keep predicting when the browser is ready.
273
+ if (webcamRunning === true) {
274
+ window.requestAnimationFrame(predictWebcam);
275
+ }
276
+ }
277
+
278
+ function drawBlendShapes(el: HTMLElement, blendShapes: any[]) {
279
+ if (!blendShapes.length) {
280
+ return;
281
+ }
282
+
283
+ console.log(blendShapes[0]);
284
+
285
+ let htmlMaker = "";
286
+ blendShapes[0].categories.map((shape) => {
287
+ htmlMaker += `
288
+ <li class="blend-shapes-item">
289
+ <span class="blend-shapes-label">${
290
+ shape.displayName || shape.categoryName
291
+ }</span>
292
+ <span class="blend-shapes-value" style="width: calc(${
293
+ +shape.score * 100
294
+ }% - 120px)">${(+shape.score).toFixed(4)}</span>
295
+ </li>
296
+ `;
297
+ });
298
+
299
+ el.innerHTML = htmlMaker;
300
+ }