danielr-ceva commited on
Commit
44cdb79
·
verified ·
1 Parent(s): abacc42

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -3
app.py CHANGED
@@ -1,6 +1,9 @@
1
  import io
 
2
  import math
 
3
  import tempfile
 
4
  from dataclasses import dataclass
5
  from pathlib import Path
6
  from typing import Dict, Optional, Tuple
@@ -11,6 +14,7 @@ import matplotlib.pyplot as plt
11
  import numpy as np
12
  import onnxruntime as ort
13
  import soundfile as sf
 
14
  from PIL import Image
15
 
16
  # -----------------------------
@@ -18,6 +22,11 @@ from PIL import Image
18
  # -----------------------------
19
  MAX_SECONDS = 10.0
20
  ONNX_DIR = Path("./onnx")
 
 
 
 
 
21
 
22
 
23
  @dataclass(frozen=True)
@@ -27,6 +36,43 @@ class ModelSpec:
27
  onnx_path: str
28
 
29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  # -----------------------------
31
  # Model discovery and metadata
32
  # -----------------------------
@@ -390,6 +436,22 @@ def run_enhancement(
390
  return noisy_out, enh_out, noisy_img, enh_img, status
391
 
392
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
393
  def set_source_visibility(source: str):
394
  return (
395
  gr.update(visible=(source == "Microphone")),
@@ -418,7 +480,7 @@ THEME = gr.themes.Soft(
418
 
419
  CSS = """
420
  .gradio-container{
421
- max-width: 1040px !important;
422
  margin: 0 auto !important;
423
  font-family: Arial, ui-sans-serif, system-ui, -apple-system, Segoe UI, Roboto, Helvetica Neue, Noto Sans, Liberation Sans, sans-serif !important;
424
  }
@@ -485,6 +547,7 @@ with gr.Blocks(theme=THEME, css=CSS, title="DPDFNet Speech Enhancement") as demo
485
  </div>
486
  """
487
  )
 
488
 
489
  with gr.Row():
490
  model_key = gr.Dropdown(
@@ -536,11 +599,12 @@ with gr.Blocks(theme=THEME, css=CSS, title="DPDFNet Speech Enhancement") as demo
536
 
537
  source.change(fn=set_source_visibility, inputs=source, outputs=[mic_audio, file_audio])
538
  run_btn.click(
539
- fn=run_enhancement,
540
  inputs=[source, mic_audio, file_audio, model_key],
541
- outputs=[out_noisy, out_enh, img_noisy, img_enh, status],
542
  api_name="enhance",
543
  )
 
544
 
545
  if __name__ == "__main__":
546
  demo.queue(max_size=32).launch()
 
1
  import io
2
+ import json
3
  import math
4
+ import os
5
  import tempfile
6
+ from contextlib import contextmanager
7
  from dataclasses import dataclass
8
  from pathlib import Path
9
  from typing import Dict, Optional, Tuple
 
14
  import numpy as np
15
  import onnxruntime as ort
16
  import soundfile as sf
17
+ from filelock import FileLock
18
  from PIL import Image
19
 
20
  # -----------------------------
 
22
  # -----------------------------
23
  MAX_SECONDS = 10.0
24
  ONNX_DIR = Path("./onnx")
25
+ COUNTER_DIR = Path("./data") if Path("./data").exists() else Path("./")
26
+ COUNTER_PATH = COUNTER_DIR / "dpdfnet_usage_counter.json"
27
+ TMP_PATH = COUNTER_DIR / "dpdfnet_usage_counter.json.tmp"
28
+ LOCK_PATH = str(COUNTER_PATH) + ".lock"
29
+ print(COUNTER_PATH)
30
 
31
 
32
  @dataclass(frozen=True)
 
36
  onnx_path: str
37
 
38
 
39
+ @contextmanager
40
+ def _maybe_lock():
41
+ with FileLock(LOCK_PATH):
42
+ yield
43
+
44
+
45
+ def _read_count() -> int:
46
+ try:
47
+ if not COUNTER_PATH.exists():
48
+ return 0
49
+ with COUNTER_PATH.open("r", encoding="utf-8") as f:
50
+ data = json.load(f)
51
+ return int(data.get("enhance_runs", 0))
52
+ except Exception:
53
+ return 0
54
+
55
+
56
+ def _atomic_write_json(obj: dict) -> None:
57
+ COUNTER_DIR.mkdir(parents=True, exist_ok=True)
58
+ with TMP_PATH.open("w", encoding="utf-8") as f:
59
+ json.dump(obj, f, separators=(",", ":"))
60
+ f.flush()
61
+ os.fsync(f.fileno())
62
+ os.replace(TMP_PATH, COUNTER_PATH)
63
+
64
+
65
+ def increment_and_get_count() -> int:
66
+ with _maybe_lock():
67
+ new_value = _read_count() + 1
68
+ _atomic_write_json({"enhance_runs": new_value})
69
+ return new_value
70
+
71
+
72
+ def usage_markdown_text() -> str:
73
+ return f"**Total enhancements run:** {_read_count():,}"
74
+
75
+
76
  # -----------------------------
77
  # Model discovery and metadata
78
  # -----------------------------
 
436
  return noisy_out, enh_out, noisy_img, enh_img, status
437
 
438
 
439
+ def run_enhancement_with_count(
440
+ source: str,
441
+ mic_path: Optional[str],
442
+ file_path: Optional[str],
443
+ model_key: str,
444
+ ):
445
+ noisy_out, enh_out, noisy_img, enh_img, status = run_enhancement(
446
+ source=source,
447
+ mic_path=mic_path,
448
+ file_path=file_path,
449
+ model_key=model_key,
450
+ )
451
+ total = increment_and_get_count()
452
+ return noisy_out, enh_out, noisy_img, enh_img, status, f"**Total enhancements run:** {total:,}"
453
+
454
+
455
  def set_source_visibility(source: str):
456
  return (
457
  gr.update(visible=(source == "Microphone")),
 
480
 
481
  CSS = """
482
  .gradio-container{
483
+ max-width: min(96vw, 1500px) !important;
484
  margin: 0 auto !important;
485
  font-family: Arial, ui-sans-serif, system-ui, -apple-system, Segoe UI, Roboto, Helvetica Neue, Noto Sans, Liberation Sans, sans-serif !important;
486
  }
 
547
  </div>
548
  """
549
  )
550
+ usage_md = gr.Markdown(usage_markdown_text())
551
 
552
  with gr.Row():
553
  model_key = gr.Dropdown(
 
599
 
600
  source.change(fn=set_source_visibility, inputs=source, outputs=[mic_audio, file_audio])
601
  run_btn.click(
602
+ fn=run_enhancement_with_count,
603
  inputs=[source, mic_audio, file_audio, model_key],
604
+ outputs=[out_noisy, out_enh, img_noisy, img_enh, status, usage_md],
605
  api_name="enhance",
606
  )
607
+ demo.load(fn=usage_markdown_text, outputs=usage_md)
608
 
609
  if __name__ == "__main__":
610
  demo.queue(max_size=32).launch()