admin commited on
Commit
72cbf6e
·
1 Parent(s): c229fbb
Files changed (5) hide show
  1. README.md +1 -1
  2. app.py +4 -7
  3. model.py +10 -7
  4. requirements.txt +0 -1
  5. utils.py +9 -7
README.md CHANGED
@@ -4,7 +4,7 @@ emoji: 🪕🎶
4
  colorFrom: yellow
5
  colorTo: blue
6
  sdk: gradio
7
- sdk_version: 5.22.0
8
  app_file: app.py
9
  pinned: true
10
  license: mit
 
4
  colorFrom: yellow
5
  colorTo: blue
6
  sdk: gradio
7
+ sdk_version: 6.3.0
8
  app_file: app.py
9
  pinned: true
10
  license: mit
app.py CHANGED
@@ -394,12 +394,9 @@ if __name__ == "__main__":
394
  gr.Dropdown(choices=models, label=_L("选择模型"), value=models[0]),
395
  ],
396
  outputs=[
397
- gr.Textbox(label=_L("状态栏"), show_copy_button=True),
398
- gr.Textbox(label=_L("音频文件名"), show_copy_button=True),
399
- gr.Textbox(
400
- label=_L("中国乐器识别"),
401
- show_copy_button=True,
402
- ),
403
  ],
404
  examples=examples,
405
  cache_examples=False,
@@ -426,4 +423,4 @@ if __name__ == "__main__":
426
  ```"""
427
  )
428
 
429
- demo.launch()
 
394
  gr.Dropdown(choices=models, label=_L("选择模型"), value=models[0]),
395
  ],
396
  outputs=[
397
+ gr.Textbox(label=_L("状态栏"), buttons=["copy"]),
398
+ gr.Textbox(label=_L("音频文件名"), buttons=["copy"]),
399
+ gr.Textbox(label=_L("中国乐器识别"), buttons=["copy"]),
 
 
 
400
  ],
401
  examples=examples,
402
  cache_examples=False,
 
423
  ```"""
424
  )
425
 
426
+ demo.launch(css="#gradio-share-link-button-0 { display: none; }")
model.py CHANGED
@@ -1,8 +1,6 @@
1
  import torch
2
  import torch.nn as nn
3
  import torchvision.models as models
4
- from modelscope.msdatasets import MsDataset
5
- from datasets import load_dataset
6
  from utils import MODEL_DIR, EN_US
7
 
8
 
@@ -39,11 +37,16 @@ class EvalNet:
39
  return backbone_list[0]
40
 
41
  def _model_info(self, m_ver: str):
42
- backbone_list = (
43
- load_dataset("monetjoe/cv_backbones", split="train")
44
- if EN_US
45
- else MsDataset.load("monetjoe/cv_backbones", split="train")
46
- )
 
 
 
 
 
47
  backbone = self._get_backbone(m_ver, backbone_list)
48
  m_type = str(backbone["type"])
49
  input_size = int(backbone["input_size"])
 
1
  import torch
2
  import torch.nn as nn
3
  import torchvision.models as models
 
 
4
  from utils import MODEL_DIR, EN_US
5
 
6
 
 
37
  return backbone_list[0]
38
 
39
  def _model_info(self, m_ver: str):
40
+ if EN_US:
41
+ from datasets import load_dataset
42
+
43
+ backbone_list = load_dataset("monetjoe/cv_backbones", split="train")
44
+
45
+ else:
46
+ from modelscope.msdatasets import MsDataset
47
+
48
+ backbone_list = MsDataset.load("monetjoe/cv_backbones", split="train")
49
+
50
  backbone = self._get_backbone(m_ver, backbone_list)
51
  m_type = str(backbone["type"])
52
  input_size = int(backbone["input_size"])
requirements.txt CHANGED
@@ -1,5 +1,4 @@
1
  librosa
2
  matplotlib
3
- modelscope[framework]
4
  torch
5
  torchvision
 
1
  librosa
2
  matplotlib
 
3
  torch
4
  torchvision
utils.py CHANGED
@@ -1,8 +1,6 @@
1
  import os
2
  import torch
3
  import torchvision.transforms as transforms
4
- import huggingface_hub
5
- import modelscope
6
  from PIL import Image
7
 
8
  EN_US = os.getenv("LANG") != "zh_CN.UTF-8"
@@ -17,17 +15,21 @@ ZH2EN = {
17
  "引用": "Cite",
18
  }
19
 
20
- MODEL_DIR = (
21
- huggingface_hub.snapshot_download(
 
 
22
  "ccmusic-database/CTIS",
23
  cache_dir="./__pycache__",
24
  )
25
- if EN_US
26
- else modelscope.snapshot_download(
 
 
 
27
  "ccmusic-database/CTIS",
28
  cache_dir="./__pycache__",
29
  )
30
- )
31
 
32
 
33
  def _L(zh_txt: str):
 
1
  import os
2
  import torch
3
  import torchvision.transforms as transforms
 
 
4
  from PIL import Image
5
 
6
  EN_US = os.getenv("LANG") != "zh_CN.UTF-8"
 
15
  "引用": "Cite",
16
  }
17
 
18
+ if EN_US:
19
+ import huggingface_hub
20
+
21
+ MODEL_DIR = huggingface_hub.snapshot_download(
22
  "ccmusic-database/CTIS",
23
  cache_dir="./__pycache__",
24
  )
25
+
26
+ else:
27
+ import modelscope
28
+
29
+ MODEL_DIR = modelscope.snapshot_download(
30
  "ccmusic-database/CTIS",
31
  cache_dir="./__pycache__",
32
  )
 
33
 
34
 
35
  def _L(zh_txt: str):