Spaces:
Sleeping
Sleeping
SeptAlfauzan commited on
Commit ·
713f67e
1
Parent(s): 22cbc5d
add: model from training with hyper parameter epoch=120, batch=32 for SCB dataset and my own custom dataset
Browse files- app.py +10 -4
- models/OWN-DATASET-640-e120-b32-best.pt +3 -0
- models/SCB-640-e120-b32-best.pt +3 -0
- stream.py +55 -0
app.py
CHANGED
|
@@ -1,4 +1,5 @@
|
|
| 1 |
import gradio as gr
|
|
|
|
| 2 |
import torch
|
| 3 |
from ultralyticsplus import YOLO, render_result
|
| 4 |
|
|
@@ -11,23 +12,28 @@ def launch(
|
|
| 11 |
):
|
| 12 |
try:
|
| 13 |
model_path = "./models/student-behaviour-best.pt"
|
| 14 |
-
model = YOLO(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
results = model.predict(
|
| 17 |
image, conf=conf_threshold, iou=iou_threshold, imgsz=image_size
|
| 18 |
)
|
| 19 |
box = results[0].boxes
|
| 20 |
-
print(box)
|
|
|
|
| 21 |
render = render_result(model=model, image=image, result=results[0])
|
| 22 |
return render
|
| 23 |
except Exception as e:
|
| 24 |
-
print(e)
|
| 25 |
return "./download.jpeg"
|
| 26 |
|
| 27 |
|
| 28 |
inputs = [
|
| 29 |
gr.Image(type="filepath", label="Input Image"),
|
| 30 |
-
gr.Slider(minimum=
|
| 31 |
gr.Slider(
|
| 32 |
minimum=0.0, maximum=1.0, value=0.4, step=0.1, label="Confidence Threshold"
|
| 33 |
),
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
from PIL import Image
|
| 3 |
import torch
|
| 4 |
from ultralyticsplus import YOLO, render_result
|
| 5 |
|
|
|
|
| 12 |
):
|
| 13 |
try:
|
| 14 |
model_path = "./models/student-behaviour-best.pt"
|
| 15 |
+
model = YOLO(
|
| 16 |
+
"./student-behaviour-test-deploy/models/OWN-DATASET-640-e120-b32-best.pt"
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
# pil_image = Image.fromarray(image)
|
| 20 |
|
| 21 |
results = model.predict(
|
| 22 |
image, conf=conf_threshold, iou=iou_threshold, imgsz=image_size
|
| 23 |
)
|
| 24 |
box = results[0].boxes
|
| 25 |
+
# print(box)
|
| 26 |
+
|
| 27 |
render = render_result(model=model, image=image, result=results[0])
|
| 28 |
return render
|
| 29 |
except Exception as e:
|
| 30 |
+
print("error", e)
|
| 31 |
return "./download.jpeg"
|
| 32 |
|
| 33 |
|
| 34 |
inputs = [
|
| 35 |
gr.Image(type="filepath", label="Input Image"),
|
| 36 |
+
gr.Slider(minimum=256, maximum=1280, value=640, step=32, label="Image Size"),
|
| 37 |
gr.Slider(
|
| 38 |
minimum=0.0, maximum=1.0, value=0.4, step=0.1, label="Confidence Threshold"
|
| 39 |
),
|
models/OWN-DATASET-640-e120-b32-best.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5b433715b811e36de8e8e83e89bc84e808a66b68055eb2e4925a9dd5c95f78da
|
| 3 |
+
size 6260441
|
models/SCB-640-e120-b32-best.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c3297bff6efde17917d641eacf69137eee010ba34c17e837eff8721a3acc1ee0
|
| 3 |
+
size 6260697
|
stream.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
from pydub import AudioSegment
|
| 3 |
+
from time import sleep
|
| 4 |
+
|
| 5 |
+
with gr.Blocks() as demo:
|
| 6 |
+
input_audio = gr.Audio(label="Input Audio", type="filepath", format="mp3")
|
| 7 |
+
with gr.Row():
|
| 8 |
+
with gr.Column():
|
| 9 |
+
stream_as_file_btn = gr.Button("Stream as File")
|
| 10 |
+
format = gr.Radio(["wav", "mp3"], value="wav", label="Format")
|
| 11 |
+
stream_as_file_output = gr.Audio(streaming=True)
|
| 12 |
+
|
| 13 |
+
def stream_file(audio_file, format):
|
| 14 |
+
audio = AudioSegment.from_file(audio_file)
|
| 15 |
+
i = 0
|
| 16 |
+
chunk_size = 1000
|
| 17 |
+
while chunk_size * i < len(audio):
|
| 18 |
+
chunk = audio[chunk_size * i : chunk_size * (i + 1)]
|
| 19 |
+
i += 1
|
| 20 |
+
if chunk:
|
| 21 |
+
file = f"/tmp/{i}.{format}"
|
| 22 |
+
chunk.export(file, format=format)
|
| 23 |
+
yield file
|
| 24 |
+
sleep(0.5)
|
| 25 |
+
|
| 26 |
+
stream_as_file_btn.click(
|
| 27 |
+
stream_file, [input_audio, format], stream_as_file_output
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
gr.Examples(
|
| 31 |
+
[["audio/cantina.wav", "wav"], ["audio/cantina.wav", "mp3"]],
|
| 32 |
+
[input_audio, format],
|
| 33 |
+
fn=stream_file,
|
| 34 |
+
outputs=stream_as_file_output,
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
with gr.Column():
|
| 38 |
+
stream_as_bytes_btn = gr.Button("Stream as Bytes")
|
| 39 |
+
stream_as_bytes_output = gr.Audio(format="bytes", streaming=True)
|
| 40 |
+
|
| 41 |
+
def stream_bytes(audio_file):
|
| 42 |
+
chunk_size = 20_000
|
| 43 |
+
with open(audio_file, "rb") as f:
|
| 44 |
+
while True:
|
| 45 |
+
chunk = f.read(chunk_size)
|
| 46 |
+
if chunk:
|
| 47 |
+
yield chunk
|
| 48 |
+
sleep(1)
|
| 49 |
+
else:
|
| 50 |
+
break
|
| 51 |
+
|
| 52 |
+
stream_as_bytes_btn.click(stream_bytes, input_audio, stream_as_bytes_output)
|
| 53 |
+
|
| 54 |
+
if __name__ == "__main__":
|
| 55 |
+
demo.queue().launch()
|