HuggingFace-SK commited on
Commit
6e5a7ed
·
1 Parent(s): 3454de4

update back to android

Browse files
Files changed (1) hide show
  1. templates/browser-detect.html +22 -21
templates/browser-detect.html CHANGED
@@ -298,7 +298,7 @@
298
 
299
  </div>
300
  <script>
301
-
302
  var speechSupported = true
303
  var prevSpeech = ""
304
  var prevSettings = ""
@@ -313,7 +313,7 @@
313
  const pitchInput = document.getElementById('pitch');
314
  const rateValue = document.getElementById('rateValue');
315
  const pitchValue = document.getElementById('pitchValue');
316
- var word_list=[]
317
  let voices = [];
318
 
319
  const languageNames = {
@@ -641,7 +641,7 @@
641
  console.log("Text to speech is now not supported")
642
  }
643
  }
644
- word_list = []
645
 
646
 
647
  function set_output_array(text) {
@@ -657,7 +657,7 @@
657
  </script>
658
 
659
  <script type="module">
660
- document.getElementById("info-").addEventListener("click", switchPage.bind(null, "info-"));
661
  document.getElementById("home-").addEventListener("click", switchPage.bind(null, "home-"));
662
  document.getElementById("settings-").addEventListener("click", switchPage.bind(null, "settings-"));
663
 
@@ -690,8 +690,8 @@
690
  const vision = await FilesetResolver.forVisionTasks("https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@0.10.0/wasm"); // This doesnt really matter as this is already imported somewhere else, and the code runs fine without the request
691
  handLandmarker = await HandLandmarker.createFromOptions(vision, {
692
  baseOptions: {
693
- // modelAssetPath: `http://127.0.0.1:8125/assets/static/hand_landmarker.task`, // For Android
694
- modelAssetPath: `https://storage.googleapis.com/mediapipe-models/hand_landmarker/hand_landmarker/float16/1/hand_landmarker.task`, // For Web
695
  delegate: "GPU"
696
  },
697
  runningMode: runningMode,
@@ -761,9 +761,8 @@
761
  }
762
  // Enable the live webcam view and start detection.
763
  function enableCam(event) {
764
- if (!handLandmarker || !letterDetector) {
765
  console.log("Wait! objectDetector not loaded yet.");
766
- document.getElementById('webcamButton').innerHTML='<span>Retry</span>'
767
  return;
768
  }
769
  if (webcamRunning === true) {
@@ -845,9 +844,6 @@
845
  let lastVideoTime = -1;
846
  let results = undefined;
847
  console.log(video);
848
-
849
- // Main function that loops per frame
850
-
851
  async function predictWebcam() {
852
  if (video.videoHeight == 0) {
853
  return
@@ -989,16 +985,21 @@
989
  if (crop_bottom > image_height) {
990
  crop_bottom = image_height
991
  }
992
- if(firstA == true){
993
- canvasCtx.beginPath();
994
- canvasCtx.rect(crop_left, crop_top, crop_right - crop_left, crop_bottom - crop_top);
995
- canvasCtx.stroke();
996
- }else{
997
- canvasCtx.beginPath();
998
- canvasCtx.rect(crop_left, crop_top, crop_right - crop_left, crop_bottom - crop_top);
999
- canvasCtx.stroke();
1000
- }
1001
-
 
 
 
 
 
1002
 
1003
 
1004
  }
 
298
 
299
  </div>
300
  <script>
301
+ var word_list = []
302
  var speechSupported = true
303
  var prevSpeech = ""
304
  var prevSettings = ""
 
313
  const pitchInput = document.getElementById('pitch');
314
  const rateValue = document.getElementById('rateValue');
315
  const pitchValue = document.getElementById('pitchValue');
316
+
317
  let voices = [];
318
 
319
  const languageNames = {
 
641
  console.log("Text to speech is now not supported")
642
  }
643
  }
644
+
645
 
646
 
647
  function set_output_array(text) {
 
657
  </script>
658
 
659
  <script type="module">
660
+ document.getElementById("info-").addEventListener("click", switchPage.bind(null, "info-"));
661
  document.getElementById("home-").addEventListener("click", switchPage.bind(null, "home-"));
662
  document.getElementById("settings-").addEventListener("click", switchPage.bind(null, "settings-"));
663
 
 
690
  const vision = await FilesetResolver.forVisionTasks("https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@0.10.0/wasm"); // This doesnt really matter as this is already imported somewhere else, and the code runs fine without the request
691
  handLandmarker = await HandLandmarker.createFromOptions(vision, {
692
  baseOptions: {
693
+ //modelAssetPath: `http://127.0.0.1:8125/assets/static/hand_landmarker.task`, // For Android
694
+ modelAssetPath: `https://storage.googleapis.com/mediapipe-models/hand_landmarker/hand_landmarker/float16/1/hand_landmarker.task`, // For Web
695
  delegate: "GPU"
696
  },
697
  runningMode: runningMode,
 
761
  }
762
  // Enable the live webcam view and start detection.
763
  function enableCam(event) {
764
+ if (!handLandmarker) {
765
  console.log("Wait! objectDetector not loaded yet.");
 
766
  return;
767
  }
768
  if (webcamRunning === true) {
 
844
  let lastVideoTime = -1;
845
  let results = undefined;
846
  console.log(video);
 
 
 
847
  async function predictWebcam() {
848
  if (video.videoHeight == 0) {
849
  return
 
985
  if (crop_bottom > image_height) {
986
  crop_bottom = image_height
987
  }
988
+ if (firstA == true) {
989
+ canvasCtx.beginPath();
990
+ canvasCtx.rect(crop_left, crop_top, crop_right - crop_left, crop_bottom - crop_top);
991
+ canvasCtx.stroke();
992
+ if (!Array.from(document.getElementById("modeSelector").classList).includes('right')) {
993
+ canvasCtx.fillStyle = "#ffffff"
994
+ canvasCtx.fill()
995
+ }
996
+
997
+ } else {
998
+ canvasCtx.beginPath();
999
+ canvasCtx.rect(crop_left, crop_top, crop_right - crop_left, crop_bottom - crop_top);
1000
+ canvasCtx.stroke();
1001
+ }
1002
+
1003
 
1004
 
1005
  }