HuggingFace-SK commited on
Commit
d20e063
·
1 Parent(s): acae62a

test scroll fix for iOS

Browse files
templates/browser-detect.html CHANGED
@@ -39,7 +39,7 @@
39
 
40
  </head>
41
 
42
- <body translate="no">
43
  <!-- For Android
44
  <script src="../assets/ipc/androidjs.js"></script>
45
  <script src="http://127.0.0.1:8125/assets/static/drawing_utils.js" crossorigin="anonymous"></script>
@@ -1192,4 +1192,4 @@
1192
 
1193
  </body>
1194
 
1195
- </html>
 
39
 
40
  </head>
41
 
42
+ <body translate="no" style="overflow: scroll;>
43
  <!-- For Android
44
  <script src="../assets/ipc/androidjs.js"></script>
45
  <script src="http://127.0.0.1:8125/assets/static/drawing_utils.js" crossorigin="anonymous"></script>
 
1192
 
1193
  </body>
1194
 
1195
+ </html>
templates/browserdetect2.html DELETED
@@ -1,623 +0,0 @@
1
- <!DOCTYPE html>
2
- <html lang="en">
3
-
4
- <head></head>
5
- <meta charset="UTF-8">
6
- <meta name="viewport" content="width=device-width, initial-scale=1.0, maximum-scale=1.0, user-scalable=0" />
7
-
8
- <title>Sign Language Interpreter</title>
9
-
10
-
11
- <script>
12
- window.console = window.console || function (t) { };
13
- </script>
14
- <!-- For Android
15
- <link rel="stylesheet" type="text/css" href="http://127.0.0.1:8125/assets/static/browser_detect.css" />
16
- -->
17
- <!-- For Web -->
18
- <link rel="stylesheet" type="text/css" href="static/browser_detect.css" />
19
-
20
-
21
- </head>
22
-
23
- <body translate="no">
24
- <!-- For Android
25
- <script src="../assets/ipc/androidjs.js"></script>
26
- <script src="http://127.0.0.1:8125/assets/static/drawing_utils.js" crossorigin="anonymous"></script>
27
- <script src="http://127.0.0.1:8125/assets/static/hands.js" crossorigin="anonymous"></script>
28
- <script src="http://127.0.0.1:8125/assets/static/tfjs-core"></script>
29
- <script src="http://127.0.0.1:8125/assets/static/tfjs-backend-cpu"></script>
30
- <script src="http://127.0.0.1:8125/assets/static/tf-tflite.min.js"></script>
31
- <script src="http://127.0.0.1:8125/assets/static/vision_wasm_internal.js" crossorigin="anonymous"></script>
32
- -->
33
-
34
- <!-- For Web -->
35
- <script src="https://cdn.jsdelivr.net/npm/@mediapipe/drawing_utils/drawing_utils.js"
36
- crossorigin="anonymous"></script>
37
- <script src="https://cdn.jsdelivr.net/npm/@mediapipe/hands/hands.js" crossorigin="anonymous"></script>
38
- <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-core"></script>
39
- <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-backend-cpu"></script>
40
- <script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs-tflite/dist/tf-tflite.min.js"></script>
41
-
42
-
43
-
44
- <div class="container">
45
-
46
- <video id="webcam" style="display:none" autoplay playsinline muted></video>
47
- <div class="canvas_wrapper" id="canvas_wrapper">
48
- <button id="switch-camera" style="display:none; position: absolute; top:10px; left:10px; padding:5px; height:40px; width:40px; text-align: center; border-radius: 12.25px; font-size: 20px; font-weight: 900; border:none; background-color: #f2f2f2; color:black;
49
- box-shadow: 0px 4px 20px 4px rgba(0, 0, 0, 0.38); z-index:100">
50
- <span>⟳</span>
51
- </button>
52
- <canvas class="output_canvas" id="output_canvas" width="100%" height="300%"></canvas>
53
- <center>
54
- <button id="webcamButton" style="font-weight: 600; color:black;">
55
- <span>Enable Webcam</span>
56
- </button>
57
- </center>
58
- </div>
59
- </div>
60
- <center>
61
- <img id="output_image" style="display:none"></img>
62
- <div class="wrapper_result">
63
- <div id="predicted_result">></div>
64
- </div>
65
- <div class="wrapper_text">
66
- <textarea id="text" onkeyup="set_output_array(this.value)"></textarea>
67
- <button id="text-to-speech" onclick="speak(document.getElementById('text').value)">
68
- <span>Listen 🔊</span>
69
- </button>
70
-
71
- <audio id="audioPlayer">-</audio>
72
- </div>
73
- <div id="logUI">
74
-
75
- </div>
76
- <center>
77
- <script>
78
- var speechSupported = true
79
- var prevSpeech = ""
80
-
81
- logUI = document.getElementById("logUI")
82
-
83
- function logMessage(msg) {
84
- const span = document.createElement('span');
85
- span.textContent = msg;
86
- logUI.appendChild(span);
87
- logUI.appendChild(document.createElement('br')); // Add a line break
88
- }
89
-
90
- const originalFetch = window.fetch;
91
-
92
- // Override the fetch function
93
- window.fetch = async function (input, init) {
94
- // Convert input to URL if it's a Request object
95
- const url = typeof input === 'string' ? input : input.url;
96
- var newUrl = url
97
- if (url == 'https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@0.10.0/wasm/vision_wasm_internal.wasm') {
98
- // newUrl = 'http://127.0.0.1:8125/assets/static/vision_wasm_internal.wasm' //For Android
99
- newUrl = 'https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@0.10.0/wasm/vision_wasm_internal.wasm' // For Web
100
-
101
- }
102
- console.log("This was FETCHED: ", newUrl)
103
- // Call the original fetch function with the new URL
104
- return originalFetch(newUrl, init);
105
- };
106
-
107
-
108
- var synthesis = window.speechSynthesis;
109
-
110
- if ('speechSynthesis' in window) {
111
-
112
- var synthesis = window.speechSynthesis;
113
-
114
- // Get the first `en` language voice in the list
115
- var voice = synthesis.getVoices().filter(function (voice) {
116
- return voice.lang === 'en';
117
- })[0];
118
-
119
- // Create an utterance object
120
-
121
- } else {
122
- speechSupported = false;
123
- console.log('Text-to-speech not supported.');
124
- }
125
-
126
- function speak(text) {
127
- console.log("speech api support", speechSupported)
128
- console.log("condition: ", !speechSupported)
129
- console.log("condition2: ", speechSupported == false)
130
- if (!speechSupported) {
131
- console.log("speech api support", speechSupported)
132
- const audioPlayer = document.getElementById('audioPlayer');
133
- if (prevSpeech != text) {
134
- prevSpeech = text
135
- audioPlayer.src = 'http://127.0.0.1:8125/speech?t=' + text; // Set the audio source
136
- console.log("Set src: ", audioPlayer.src)
137
- }
138
-
139
- audioPlayer.play() // Play the audio
140
- .then(() => {
141
-
142
- console.log('Audio is playing');
143
- })
144
- .catch(error => {
145
- console.error('Error playing audio:', error);
146
- prevSpeech = ''
147
- });
148
- } else
149
- if ('speechSynthesis' in window) {
150
- var utterance = new SpeechSynthesisUtterance(text);
151
- utterance.voice = voice;
152
- utterance.pitch = 0.6;
153
- utterance.rate = 0.8;
154
- utterance.volume = 0.8;
155
- synthesis.speak(utterance);
156
- } else {
157
- console.log("Text to speech is now not supported")
158
- }
159
- }
160
- var word_list = []
161
-
162
-
163
- function set_output_array(text) {
164
- console.log(text)
165
- word_list = text.split("");
166
- console.log(word_list)
167
- }
168
-
169
- </script>
170
-
171
- <script type="module">
172
-
173
-
174
- //import { HandLandmarker, FilesetResolver } from "http://127.0.0.1:8125/assets/static/tasks-vision@0.10.0" // For Android
175
- import { HandLandmarker, FilesetResolver } from "https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@0.10.0"; // For Web
176
- let handLandmarker = undefined;
177
- let runningMode = "IMAGE";
178
- let enableWebcamButton;
179
- let webcamRunning = false;
180
- var time_since_letter = 0
181
- var last_letter_time = 0
182
- var is_first_run = 1
183
- // Before we can use HandLandmarker class we must wait for it to finish
184
- // loading. Machine Learning models can be large and take a moment to
185
- // get everything needed to run.
186
- const createHandLandmarker = async () => {
187
- const vision = await FilesetResolver.forVisionTasks("https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@0.10.0/wasm"); // This doesnt really matter as this is already imported somewhere else, and the code runs fine without the request
188
- handLandmarker = await HandLandmarker.createFromOptions(vision, {
189
- baseOptions: {
190
- // modelAssetPath: `http://127.0.0.1:8125/assets/static/hand_landmarker.task`, // For Android
191
- modelAssetPath: `https://storage.googleapis.com/mediapipe-models/hand_landmarker/hand_landmarker/float16/1/hand_landmarker.task`, // For Web
192
- delegate: "GPU"
193
- },
194
- runningMode: runningMode,
195
- numHands: 1
196
- });
197
- };
198
- createHandLandmarker();
199
-
200
- // const MODEL_PATH = "http://127.0.0.1:8125/assets/static/model.tflite" // For Android
201
- const MODEL_PATH = "/exported" // For Web
202
- var objectDetector = tflite.loadTFLiteModel(MODEL_PATH);
203
-
204
- /********************************************************************
205
- // Continuously grab images
206
- ********************************************************************/
207
- var global_res = 0;
208
- const video = document.getElementById("webcam");
209
- const canvasElement = document.getElementById("output_canvas");
210
- const canvasCtx = canvasElement.getContext("2d");
211
- var x_array = []
212
- var y_array = []
213
- var video_facing_mode = "user"
214
- // Check if webcam access is supported.
215
- const hasGetUserMedia = () => { var _a; return !!((_a = navigator.mediaDevices) === null || _a === void 0 ? void 0 : _a.getUserMedia); };
216
- // If webcam supported, add event listener to button for when user
217
- // wants to activate it.
218
- if (hasGetUserMedia()) {
219
- enableWebcamButton = document.getElementById("webcamButton");
220
- enableWebcamButton.addEventListener("click", enableCam);
221
- document.getElementById("switch-camera").addEventListener("click", switch_camera);
222
- }
223
- else {
224
- console.warn("getUserMedia() is not supported by your browser");
225
- }
226
- async function switch_camera() {
227
- if (video_facing_mode == 'user') {
228
- webcamRunning = false
229
- video_facing_mode = 'environment'
230
- await load_camera()
231
- webcamRunning = true
232
- }
233
- else {
234
- webcamRunning = false
235
- video_facing_mode = 'user'
236
- await load_camera()
237
- webcamRunning = true
238
- }
239
- }
240
- // Enable the live webcam view and start detection.
241
- function enableCam(event) {
242
- if (!handLandmarker) {
243
- console.log("Wait! objectDetector not loaded yet.");
244
- return;
245
- }
246
- if (webcamRunning === true) {
247
- webcamRunning = false;
248
- enableWebcamButton.innerText = "ENABLE PREDICTIONS";
249
- }
250
- else {
251
- webcamRunning = true;
252
- enableWebcamButton.style = "display:none"
253
- document.getElementById("switch-camera").style.display = "block"
254
-
255
- }
256
- // getUsermedia parameters.
257
- load_camera()
258
- }
259
- function load_camera() {
260
- const constraints = {
261
- video: {
262
- facingMode: video_facing_mode
263
- }
264
- };
265
- // Activate the webcam stream.
266
- navigator.mediaDevices.getUserMedia(constraints)
267
- .then((stream) => {
268
- video.srcObject = stream;
269
- video.play();
270
- video.addEventListener("loadeddata", predictWebcam);
271
- })
272
- .catch((error) => {
273
- console.error("Error accessing the camera: ", error.name, error.message, error.code);
274
- });
275
- }
276
- let lastVideoTime = -1;
277
- let results = undefined;
278
- console.log(video);
279
- async function predictWebcam() {
280
- if (video.videoHeight == 0) {
281
- return
282
- }
283
- canvasElement.width = window.innerWidth;
284
- // Start detecting the stream.
285
- if (runningMode === "IMAGE") {
286
- runningMode = "VIDEO";
287
- await handLandmarker.setOptions({ runningMode: "VIDEO" });
288
- }
289
- let startTimeMs = performance.now();
290
- if (lastVideoTime !== video.currentTime) {
291
- lastVideoTime = video.currentTime;
292
- results = handLandmarker.detectForVideo(video, startTimeMs);
293
- }
294
- canvasCtx.save();
295
- canvasCtx.clearRect(0, 0, canvasElement.width, canvasElement.height);
296
- canvasCtx.drawImage(video, 0, 0, canvasElement.width, (video.videoHeight / video.videoWidth) * canvasElement.width)
297
- if (is_first_run == 1) {
298
- var elem_rect = document.getElementById("output_canvas").getBoundingClientRect()
299
- console.log(elem_rect.height | 0);
300
- document.getElementById("canvas_wrapper").style.height = (elem_rect.height | 0).toString() + "px"
301
-
302
- is_first_run = 0
303
- }
304
-
305
- if (results.landmarks && results.handednesses[0]) {
306
- var current_time = Math.round(Date.now())
307
- document.getElementById("predicted_result").style.width = String((current_time - last_letter_time) / 10) + "%"
308
- if (results.handednesses[0][0].categoryName == "Left") {
309
- annotateImage()
310
- console.log("LEFT")
311
- //detectSign()
312
- } else {
313
- console.log("RIGHT")
314
- var current_result = "_"
315
- var previous_result = document.getElementById("predicted_result").innerText
316
- document.getElementById("predicted_result").innerText = current_result
317
-
318
-
319
- if (previous_result == current_result) {
320
- if (current_time - last_letter_time > 1000) {
321
- last_letter_time = current_time
322
- word_list.push(" ")
323
- console.log(word_list)
324
- document.getElementById("text").value = word_list.join('')
325
- }
326
- }
327
- else {
328
- last_letter_time = current_time
329
- }
330
- }
331
- }
332
- else {
333
- if (30 > calculateCanvasBrightness(canvasElement)) {
334
-
335
- var current_result = "<"
336
- var previous_result = document.getElementById("predicted_result").innerText
337
- document.getElementById("predicted_result").innerText = current_result
338
- var current_time = Math.round(Date.now())
339
- console.log(current_time - last_letter_time)
340
- if (previous_result == current_result) {
341
- if (current_time - last_letter_time > 400) {
342
- last_letter_time = current_time
343
- word_list.pop()
344
- console.log(word_list)
345
- document.getElementById("text").value = word_list.join('')
346
- }
347
- }
348
- else {
349
- last_letter_time = current_time
350
- }
351
- } else {
352
- last_letter_time = Math.round(Date.now())
353
-
354
- document.getElementById("predicted_result").style.width = String(0) + "%"
355
- }
356
- }
357
-
358
- canvasCtx.restore();
359
- // Kepp predicting
360
- if (webcamRunning === true) {
361
- window.requestAnimationFrame(predictWebcam);
362
- }
363
- }
364
- function annotateImage() {
365
-
366
- //console.log(results.landmarks)
367
- if (results.landmarks[0]) {
368
- x_array = []
369
- y_array = []
370
- results.landmarks[0].forEach(iterate)
371
- //console.log(x_array)
372
- var image_height = (video.videoHeight / video.videoWidth) * canvasElement.width
373
- var image_width = canvasElement.width
374
- var min_x = Math.min(...x_array) * image_width
375
- var min_y = Math.min(...y_array) * image_height
376
- var max_x = Math.max(...x_array) * image_width
377
- var max_y = Math.max(...y_array) * image_height
378
-
379
- var sect_height = max_y - (min_y)
380
- var sect_width = max_x - (min_x)
381
- var center_x = (min_x + max_x) / 2
382
- var center_y = (min_y + max_y) / 2
383
-
384
- var sect_diameter = 50
385
- if (sect_height > sect_width) {
386
- sect_diameter = sect_height
387
- //console.log("sect_height", sect_diameter)
388
- }
389
- if (sect_height < sect_width) {
390
- sect_diameter = sect_width
391
- // console.log("sect_width", sect_diameter)
392
- }
393
-
394
- sect_diameter = sect_diameter + 50
395
- var sect_radius = sect_diameter / 2
396
- var crop_top = center_y - sect_radius
397
- var crop_bottom = center_y + sect_radius
398
- var crop_left = center_x - sect_radius
399
- var crop_right = center_x + sect_radius
400
- if (crop_top < 0) {
401
- crop_top = 0
402
- }
403
- if (crop_left < 0) {
404
- crop_left = 0
405
- }
406
- if (crop_right > image_width) {
407
- crop_right = image_width
408
- }
409
- if (crop_bottom > image_height) {
410
- crop_bottom = image_height
411
- }
412
-
413
- canvasCtx.beginPath();
414
- canvasCtx.rect(crop_left, crop_top, crop_right - crop_left, crop_bottom - crop_top);
415
- canvasCtx.stroke();
416
-
417
-
418
- }
419
- /* for (const landmarks of results.multiHandLandmarks) {
420
- drawConnectors(canvasCtx, landmarks, HAND_CONNECTIONS, {
421
- color: "#00FF00",
422
- lineWidth: 5
423
- });
424
- drawLandmarks(canvasCtx, landmarks, { color: "#FF0000", lineWidth: 2 });
425
- }*/
426
- // console.log(results)
427
- const landmarks = results.landmarks;
428
- if (landmarks[0]) {
429
- var hand = landmarks[0]
430
-
431
- // Thumb connections
432
- drawConnection(hand[4], hand[3], '#ffe5b4', 5); // 4-3
433
- drawConnection(hand[3], hand[2], '#ffe5b4', 5); // 3-2
434
- drawConnection(hand[2], hand[1], '#ffe5b4', 5); // 2-1
435
-
436
- // Index connections
437
- drawConnection(hand[8], hand[7], '#804080', 5); // 8-7
438
- drawConnection(hand[7], hand[6], '#804080', 5); // 7-6
439
- drawConnection(hand[6], hand[5], '#804080', 5); // 6-5
440
-
441
- // Middle connections
442
- drawConnection(hand[12], hand[11], '#ffcc00', 5); // 12-11
443
- drawConnection(hand[11], hand[10], '#ffcc00', 5); // 11-10
444
- drawConnection(hand[10], hand[9], '#ffcc00', 5); // 10-9
445
-
446
- // Ring connections
447
- drawConnection(hand[16], hand[15], '#30ff30', 5); // 16-15
448
- drawConnection(hand[15], hand[14], '#30ff30', 5); // 15-14
449
- drawConnection(hand[14], hand[13], '#30ff30', 5); // 14-13
450
-
451
- // Pinky connections
452
- drawConnection(hand[20], hand[19], '#1565c0', 5); // 20-19
453
- drawConnection(hand[19], hand[18], '#1565c0', 5); // 19-18
454
- drawConnection(hand[18], hand[17], '#1565c0', 5); // 18-17
455
-
456
- drawConnection(hand[0], hand[1], '#808080', 5); // 0-1
457
- drawConnection(hand[0], hand[5], '#808080', 5); // 0-5
458
- drawConnection(hand[0], hand[17], '#808080', 5); // 0-17
459
- drawConnection(hand[5], hand[9], '#808080', 5); // 5-9
460
- drawConnection(hand[9], hand[13], '#808080', 5); // 9-13
461
- drawConnection(hand[13], hand[17], '#808080', 5); // 13-17
462
-
463
- // Thumb
464
- drawLandmarks(canvasCtx, hand[2], '#ffe5b4'); // Thumb tip (2)
465
- drawLandmarks(canvasCtx, hand[3], '#ffe5b4'); // Thumb base (3)
466
- drawLandmarks(canvasCtx, hand[4], '#ffe5b4'); // Thumb base (4)
467
-
468
- // Index
469
- drawLandmarks(canvasCtx, hand[6], '#804080'); // Index tip (6)
470
- drawLandmarks(canvasCtx, hand[7], '#804080'); // Index base (7)
471
- drawLandmarks(canvasCtx, hand[8], '#804080'); // Index base (8)
472
-
473
- // Middle
474
- drawLandmarks(canvasCtx, hand[10], '#ffcc00'); // Middle tip (10)
475
- drawLandmarks(canvasCtx, hand[11], '#ffcc00'); // Middle base (11)
476
- drawLandmarks(canvasCtx, hand[12], '#ffcc00'); // Middle base (12)
477
-
478
- // Ring
479
- drawLandmarks(canvasCtx, hand[14], '#30ff30'); // Ring tip (14)
480
- drawLandmarks(canvasCtx, hand[15], '#30ff30'); // Ring base (15)
481
- drawLandmarks(canvasCtx, hand[16], '#30ff30'); // Ring base (16)
482
-
483
- // Pinky
484
- drawLandmarks(canvasCtx, hand[18], '#1565c0'); // Pinky tip (18)
485
- drawLandmarks(canvasCtx, hand[19], '#1565c0'); // Pinky base (19)
486
- drawLandmarks(canvasCtx, hand[20], '#1565c0'); // Pinky base (20)
487
-
488
- drawLandmarks(canvasCtx, hand[0], '#ff3030'); // Wrist (0)
489
-
490
- drawLandmarks(canvasCtx, hand[1], '#ff3030'); // Palm base (1)
491
-
492
- drawLandmarks(canvasCtx, hand[5], '#ff3030'); // Index palm (5)
493
-
494
- drawLandmarks(canvasCtx, hand[9], '#ff3030'); // Middle palm (9)
495
-
496
- drawLandmarks(canvasCtx, hand[13], '#ff3030'); // Ring palm (13)
497
-
498
- drawLandmarks(canvasCtx, hand[17], '#ff3030'); // Pinky palm (17)
499
- cropCanvas(canvasElement, crop_left, crop_top, crop_right - crop_left, crop_bottom - crop_top)
500
- }
501
- // Add more drawing calls for each landmark collection as needed
502
-
503
-
504
-
505
-
506
- //# sourceURL=pen.js
507
- }
508
-
509
-
510
- function iterate(x, y) {
511
- x_array.push(x.x)
512
- y_array.push(x.y)
513
- }
514
-
515
- const cropCanvas = (sourceCanvas, left, top, width, height) => {
516
- let destCanvas = document.createElement('canvas');
517
- destCanvas.width = 224;
518
- var cropAspectRatio = width / height;
519
-
520
- destCanvas.height = 224 / cropAspectRatio
521
- destCanvas.getContext("2d").drawImage(
522
- sourceCanvas,
523
- left, top, width, height, // source rect with content to crop
524
- 0, 0, 224, destCanvas.height); // newCanvas, same size as source
525
- var predictionInput = tf.browser.fromPixels(destCanvas.getContext("2d").getImageData(0, 0, 224, 224))
526
-
527
- predict(tf.expandDims(predictionInput, 0));
528
- }
529
- async function predict(inputTensor) {
530
-
531
- //console.log("in predict")
532
- var letter_list = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z", "#"]
533
- objectDetector.then(function (res) {
534
- var prediction = res.predict(inputTensor);
535
- var outputArray = prediction.dataSync(); // Get the output as an array
536
- var predictedClass = outputArray.indexOf(Math.max(...outputArray)); // Get the index
537
- var current_result = letter_list[predictedClass]
538
- var previous_result = document.getElementById("predicted_result").innerText
539
- document.getElementById("predicted_result").innerText = current_result
540
- var current_time = Math.round(Date.now())
541
-
542
- if (previous_result == current_result) {
543
- if (current_time - last_letter_time > 1000) {
544
- last_letter_time = current_time
545
- word_list.push(current_result)
546
- console.log(word_list)
547
- document.getElementById("text").value = word_list.join('')
548
- }
549
- }
550
- else {
551
- last_letter_time = current_time
552
- }
553
- console.log(letter_list[predictedClass]);
554
- }, function (err) {
555
- console.log(err);
556
- });
557
-
558
- }
559
-
560
- function drawLandmarks(canvasCtx, landmarks, color) {
561
- var image_height = (video.videoHeight / video.videoWidth) * canvasElement.width
562
- var image_width = canvasElement.width
563
-
564
- canvasCtx.fillStyle = color;
565
- canvasCtx.strokeStyle = 'white';
566
- canvasCtx.lineWidth = 1;
567
- canvasCtx.beginPath();
568
- canvasCtx.arc(landmarks.x * image_width, landmarks.y * image_height, 6, 0, 2 * Math.PI);
569
- canvasCtx.fill();
570
- canvasCtx.stroke();
571
-
572
- }
573
-
574
- function drawConnection(startNode, endNode, strokeColor, strokeWidth) {
575
-
576
- var image_height = (video.videoHeight / video.videoWidth) * canvasElement.width
577
- var image_width = canvasElement.width
578
-
579
- canvasCtx.strokeStyle = strokeColor;
580
- canvasCtx.lineWidth = strokeWidth;
581
- canvasCtx.beginPath();
582
- canvasCtx.moveTo(startNode.x * image_width, startNode.y * image_height);
583
- canvasCtx.lineTo(endNode.x * image_width, endNode.y * image_height);
584
- canvasCtx.stroke();
585
- }
586
- function calculateCanvasBrightness(canvas) {
587
- const context = canvas.getContext('2d');
588
-
589
- // Get the image data from the canvas
590
- const imageData = context.getImageData(0, 0, canvas.width, canvas.height);
591
- const data = imageData.data;
592
-
593
- let totalBrightness = 0;
594
- let pixelCount = 0;
595
-
596
- // Loop through each pixel
597
- for (let i = 0; i < data.length; i += 4) {
598
- const r = data[i]; // Red
599
- const g = data[i + 1]; // Green
600
- const b = data[i + 2]; // Blue
601
-
602
- // Calculate brightness for this pixel
603
- const brightness = 0.299 * r + 0.587 * g + 0.114 * b;
604
- totalBrightness += brightness;
605
- pixelCount++;
606
- }
607
-
608
- // Calculate average brightness
609
- const averageBrightness = totalBrightness / pixelCount;
610
-
611
- return averageBrightness;
612
- }
613
- </script>
614
-
615
- <script src="https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@0.10.0/wasm/vision_wasm_internal.js"
616
- crossorigin="anonymous"></script>
617
-
618
-
619
-
620
-
621
- </body>
622
-
623
- </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
templates/index.html DELETED
@@ -1,85 +0,0 @@
1
- <!DOCTYPE html>
2
- <html>
3
-
4
- <head>
5
- <title>Camera to Data URL</title>
6
- <script src="https://cdnjs.cloudflare.com/ajax/libs/socket.io/4.1.3/socket.io.min.js"></script>
7
- </head>
8
-
9
- <body>
10
- <video style="display:none" id="video" width="640" height="480" autoplay></video>
11
- <canvas id="canvas" style="display:none;"></canvas>
12
- <img id="image"></img>
13
- <p id="result"></p>
14
- <script>
15
-
16
- globalData=0
17
- // Get access to the user's camera
18
- navigator.mediaDevices.getUserMedia({ video: true })
19
- .then(function (stream) {
20
- // Attach the stream to the video element
21
- var video = document.getElementById('video');
22
- video.srcObject = stream;
23
- })
24
- .catch(function (err) {
25
- console.log('Error accessing camera:', err);
26
- });
27
-
28
-
29
- function dataURItoBlob(dataURI) {
30
- var byteString = atob(dataURI.split(',')[1]);
31
- var mimeString = dataURI.split(',')[0].split(':')[1].split(';')[0];
32
- var ab = new ArrayBuffer(byteString.length);
33
- var ia = new Uint8Array(ab);
34
- for (var i = 0; i < byteString.length; i++) {
35
- ia[i] = byteString.charCodeAt(i);
36
- }
37
- return new Blob([ab], { type: mimeString });
38
- }
39
-
40
- function snap() {
41
- var canvas = document.getElementById('canvas');
42
- canvas.width = video.videoWidth;
43
- canvas.height = video.videoHeight;
44
- canvas.getContext('2d').drawImage(video, 0, 0, canvas.width, canvas.height);
45
-
46
- // Convert the canvas image to a data URL
47
- var dataUrl = canvas.toDataURL("image/jpeg",0.2);
48
- //var jpeg = dataURItoBlob(dataUrl);
49
- //console.log(jpeg)
50
- //console.log(jpeg)
51
- // Do something with the data URL, such as display it in an image element
52
- //document.getElementById('image').src = dataUrl;
53
- send_frame(dataUrl)
54
- console.log("emitted")
55
- setTimeout(snap, 100)
56
- };
57
- function send_frame(dataUrl) {
58
- const dataToSend = {
59
- key: dataUrl
60
- };
61
-
62
- fetch('/api/data', {
63
- method: 'POST',
64
- headers: {
65
- 'Content-Type': 'application/json'
66
- },
67
- body: JSON.stringify(dataToSend) // Convert the data to JSON
68
- })
69
- .then(response => response.json())
70
- .then(data => {
71
- //console.log(data.frame)
72
- document.getElementById("image").src=data.frame
73
- globalData=data
74
- document.getElementById("result").innerText=data.result
75
- })
76
- .catch((error) => {
77
- console.error('Error:', error);
78
- });
79
- }
80
- </script>
81
- <button id="snap" onclick="snap()">Snap</button>
82
- <img id="image" />
83
- </body>
84
-
85
- </html>