Spaces:
Running
Running
admin commited on
Commit ·
94ce38a
1
Parent(s): 8becfc8
sync ms
Browse files- README.md +1 -1
- app.py +3 -3
- model.py +20 -12
- requirements.txt +0 -1
- utils.py +9 -7
README.md
CHANGED
|
@@ -4,7 +4,7 @@ emoji: ☰
|
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: pink
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version:
|
| 8 |
app_file: app.py
|
| 9 |
pinned: true
|
| 10 |
license: mit
|
|
|
|
| 4 |
colorFrom: blue
|
| 5 |
colorTo: pink
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: 6.3.0
|
| 8 |
app_file: app.py
|
| 9 |
pinned: true
|
| 10 |
license: mit
|
app.py
CHANGED
|
@@ -183,8 +183,8 @@ if __name__ == "__main__":
|
|
| 183 |
gr.Dropdown(choices=models, label=_L("选择模型"), value=models[0]),
|
| 184 |
],
|
| 185 |
outputs=[
|
| 186 |
-
gr.Textbox(label=_L("状态栏"),
|
| 187 |
-
gr.Textbox(label=_L("音频文件名"),
|
| 188 |
gr.Dataframe(label=_L("古筝演奏技法逐帧检测")),
|
| 189 |
],
|
| 190 |
examples=examples,
|
|
@@ -212,4 +212,4 @@ if __name__ == "__main__":
|
|
| 212 |
```"""
|
| 213 |
)
|
| 214 |
|
| 215 |
-
demo.launch()
|
|
|
|
| 183 |
gr.Dropdown(choices=models, label=_L("选择模型"), value=models[0]),
|
| 184 |
],
|
| 185 |
outputs=[
|
| 186 |
+
gr.Textbox(label=_L("状态栏"), buttons=["copy"]),
|
| 187 |
+
gr.Textbox(label=_L("音频文件名"), buttons=["copy"]),
|
| 188 |
gr.Dataframe(label=_L("古筝演奏技法逐帧检测")),
|
| 189 |
],
|
| 190 |
examples=examples,
|
|
|
|
| 212 |
```"""
|
| 213 |
)
|
| 214 |
|
| 215 |
+
demo.launch(css="#gradio-share-link-button-0 { display: none; }")
|
model.py
CHANGED
|
@@ -3,8 +3,6 @@ import torch.nn as nn
|
|
| 3 |
import torch.nn.functional as F
|
| 4 |
import torchvision.models as models
|
| 5 |
import numpy as np
|
| 6 |
-
from modelscope.msdatasets import MsDataset
|
| 7 |
-
from datasets import load_dataset
|
| 8 |
from utils import EN_US
|
| 9 |
|
| 10 |
|
|
@@ -81,11 +79,16 @@ class EvalNet:
|
|
| 81 |
raise ValueError("[Backbone not found] Please check if --model is correct!")
|
| 82 |
|
| 83 |
def _model_info(self, backbone: str):
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
backbone_info = self._get_backbone(backbone, backbone_list)
|
| 90 |
return (
|
| 91 |
str(backbone_info["type"]),
|
|
@@ -234,11 +237,16 @@ class t_EvalNet:
|
|
| 234 |
raise ValueError("[Backbone not found] Please check if --model is correct!")
|
| 235 |
|
| 236 |
def _model_info(self, backbone: str):
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 242 |
backbone_info = self._get_backbone(backbone, backbone_list)
|
| 243 |
return (
|
| 244 |
str(backbone_info["type"]),
|
|
|
|
| 3 |
import torch.nn.functional as F
|
| 4 |
import torchvision.models as models
|
| 5 |
import numpy as np
|
|
|
|
|
|
|
| 6 |
from utils import EN_US
|
| 7 |
|
| 8 |
|
|
|
|
| 79 |
raise ValueError("[Backbone not found] Please check if --model is correct!")
|
| 80 |
|
| 81 |
def _model_info(self, backbone: str):
|
| 82 |
+
if EN_US:
|
| 83 |
+
from datasets import load_dataset
|
| 84 |
+
|
| 85 |
+
backbone_list = load_dataset("monetjoe/cv_backbones", split="train")
|
| 86 |
+
|
| 87 |
+
else:
|
| 88 |
+
from modelscope.msdatasets import MsDataset
|
| 89 |
+
|
| 90 |
+
backbone_list = MsDataset.load("monetjoe/cv_backbones", split="train")
|
| 91 |
+
|
| 92 |
backbone_info = self._get_backbone(backbone, backbone_list)
|
| 93 |
return (
|
| 94 |
str(backbone_info["type"]),
|
|
|
|
| 237 |
raise ValueError("[Backbone not found] Please check if --model is correct!")
|
| 238 |
|
| 239 |
def _model_info(self, backbone: str):
|
| 240 |
+
if EN_US:
|
| 241 |
+
from datasets import load_dataset
|
| 242 |
+
|
| 243 |
+
backbone_list = load_dataset("monetjoe/cv_backbones", split="train")
|
| 244 |
+
|
| 245 |
+
else:
|
| 246 |
+
from modelscope.msdatasets import MsDataset
|
| 247 |
+
|
| 248 |
+
backbone_list = MsDataset.load("monetjoe/cv_backbones", split="v1")
|
| 249 |
+
|
| 250 |
backbone_info = self._get_backbone(backbone, backbone_list)
|
| 251 |
return (
|
| 252 |
str(backbone_info["type"]),
|
requirements.txt
CHANGED
|
@@ -1,5 +1,4 @@
|
|
| 1 |
librosa
|
| 2 |
matplotlib
|
| 3 |
-
modelscope[framework]
|
| 4 |
torch
|
| 5 |
torchvision
|
|
|
|
| 1 |
librosa
|
| 2 |
matplotlib
|
|
|
|
| 3 |
torch
|
| 4 |
torchvision
|
utils.py
CHANGED
|
@@ -1,7 +1,5 @@
|
|
| 1 |
import os
|
| 2 |
import torch
|
| 3 |
-
import modelscope
|
| 4 |
-
import huggingface_hub
|
| 5 |
import numpy as np
|
| 6 |
from torchvision.transforms import Compose, Resize, Normalize
|
| 7 |
|
|
@@ -26,17 +24,21 @@ ZH2EN = {
|
|
| 26 |
"技法": "Tech",
|
| 27 |
}
|
| 28 |
|
| 29 |
-
|
| 30 |
-
huggingface_hub
|
|
|
|
|
|
|
| 31 |
"ccmusic-database/Guzheng_Tech99",
|
| 32 |
cache_dir="./__pycache__",
|
| 33 |
)
|
| 34 |
-
|
| 35 |
-
|
|
|
|
|
|
|
|
|
|
| 36 |
"ccmusic-database/Guzheng_Tech99",
|
| 37 |
cache_dir="./__pycache__",
|
| 38 |
)
|
| 39 |
-
)
|
| 40 |
|
| 41 |
|
| 42 |
def _L(zh_txt: str):
|
|
|
|
| 1 |
import os
|
| 2 |
import torch
|
|
|
|
|
|
|
| 3 |
import numpy as np
|
| 4 |
from torchvision.transforms import Compose, Resize, Normalize
|
| 5 |
|
|
|
|
| 24 |
"技法": "Tech",
|
| 25 |
}
|
| 26 |
|
| 27 |
+
if EN_US:
|
| 28 |
+
import huggingface_hub
|
| 29 |
+
|
| 30 |
+
MODEL_DIR = huggingface_hub.snapshot_download(
|
| 31 |
"ccmusic-database/Guzheng_Tech99",
|
| 32 |
cache_dir="./__pycache__",
|
| 33 |
)
|
| 34 |
+
|
| 35 |
+
else:
|
| 36 |
+
import modelscope
|
| 37 |
+
|
| 38 |
+
MODEL_DIR = modelscope.snapshot_download(
|
| 39 |
"ccmusic-database/Guzheng_Tech99",
|
| 40 |
cache_dir="./__pycache__",
|
| 41 |
)
|
|
|
|
| 42 |
|
| 43 |
|
| 44 |
def _L(zh_txt: str):
|