Spaces:
Runtime error
Runtime error
piperod commited on
Commit ·
cd0d6f2
1
Parent(s): 5bbee66
adding examples
Browse files
app.py
CHANGED
|
@@ -2,9 +2,11 @@ import gradio as gr
|
|
| 2 |
import os
|
| 3 |
import subprocess
|
| 4 |
|
|
|
|
| 5 |
if os.getenv('SYSTEM') == 'spaces':
|
| 6 |
|
| 7 |
subprocess.call('pip install -U openmim'.split())
|
|
|
|
| 8 |
subprocess.call('pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cu113'.split())
|
| 9 |
subprocess.call('mim install mmcv>=2.0.0'.split())
|
| 10 |
subprocess.call('mim install mmengine'.split())
|
|
@@ -14,22 +16,27 @@ if os.getenv('SYSTEM') == 'spaces':
|
|
| 14 |
|
| 15 |
|
| 16 |
import cv2
|
| 17 |
-
|
|
|
|
| 18 |
import numpy as np
|
| 19 |
import gradio as gr
|
| 20 |
|
| 21 |
from inference import inference_frame
|
| 22 |
import os
|
|
|
|
| 23 |
|
| 24 |
def analize_video(x):
|
| 25 |
-
|
| 26 |
path = '/tmp/test/'
|
| 27 |
os.makedirs(path, exist_ok=True)
|
| 28 |
videos = len(os.listdir(path))
|
| 29 |
path = f'{path}{videos}'
|
| 30 |
os.makedirs(path, exist_ok=True)
|
| 31 |
outname = f'{path}_processed.mp4'
|
| 32 |
-
|
|
|
|
|
|
|
|
|
|
| 33 |
counter = 0
|
| 34 |
while(cap.isOpened()):
|
| 35 |
ret, frame = cap.read()
|
|
@@ -38,14 +45,19 @@ def analize_video(x):
|
|
| 38 |
frame = inference_frame(frame)
|
| 39 |
# write the flipped frame
|
| 40 |
cv2.imwrite(name, frame)
|
|
|
|
| 41 |
counter +=1
|
| 42 |
else:
|
| 43 |
break
|
| 44 |
# Release everything if job is finished
|
| 45 |
print(path)
|
| 46 |
-
os.system(f'''ffmpeg -framerate 20 -pattern_type glob -i '{path}/*.png' -c:v libx264 -pix_fmt yuv420p {outname}''')
|
| 47 |
return outname
|
| 48 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 49 |
with gr.Blocks(title='Shark Patrol',theme=gr.themes.Soft(),live=True,) as demo:
|
| 50 |
gr.Markdown("Initial DEMO.")
|
| 51 |
with gr.Tab("Shark Detector"):
|
|
@@ -56,12 +68,22 @@ with gr.Blocks(title='Shark Patrol',theme=gr.themes.Soft(),live=True,) as demo:
|
|
| 56 |
#video_output.style(witdh='50%',height='50%')
|
| 57 |
|
| 58 |
video_button = gr.Button("Analyze")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
|
| 60 |
|
| 61 |
with gr.Accordion("Open for More!"):
|
| 62 |
gr.Markdown("Place holder for detection")
|
| 63 |
|
| 64 |
video_button.click(analize_video, inputs=video_input, outputs=video_output)
|
| 65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
demo.queue()
|
| 67 |
-
|
|
|
|
|
|
| 2 |
import os
|
| 3 |
import subprocess
|
| 4 |
|
| 5 |
+
|
| 6 |
if os.getenv('SYSTEM') == 'spaces':
|
| 7 |
|
| 8 |
subprocess.call('pip install -U openmim'.split())
|
| 9 |
+
subprocess.call('pip install python-dotenv'.split())
|
| 10 |
subprocess.call('pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 torchaudio==0.12.1 --extra-index-url https://download.pytorch.org/whl/cu113'.split())
|
| 11 |
subprocess.call('mim install mmcv>=2.0.0'.split())
|
| 12 |
subprocess.call('mim install mmengine'.split())
|
|
|
|
| 16 |
|
| 17 |
|
| 18 |
import cv2
|
| 19 |
+
import dotenv
|
| 20 |
+
dotenv.load_dotenv()
|
| 21 |
import numpy as np
|
| 22 |
import gradio as gr
|
| 23 |
|
| 24 |
from inference import inference_frame
|
| 25 |
import os
|
| 26 |
+
import pathlib
|
| 27 |
|
| 28 |
def analize_video(x):
|
| 29 |
+
print(x)
|
| 30 |
path = '/tmp/test/'
|
| 31 |
os.makedirs(path, exist_ok=True)
|
| 32 |
videos = len(os.listdir(path))
|
| 33 |
path = f'{path}{videos}'
|
| 34 |
os.makedirs(path, exist_ok=True)
|
| 35 |
outname = f'{path}_processed.mp4'
|
| 36 |
+
if os.path.exists(outname):
|
| 37 |
+
print('video already processed')
|
| 38 |
+
return outname
|
| 39 |
+
cap = cv2.VideoCapture(x)
|
| 40 |
counter = 0
|
| 41 |
while(cap.isOpened()):
|
| 42 |
ret, frame = cap.read()
|
|
|
|
| 45 |
frame = inference_frame(frame)
|
| 46 |
# write the flipped frame
|
| 47 |
cv2.imwrite(name, frame)
|
| 48 |
+
|
| 49 |
counter +=1
|
| 50 |
else:
|
| 51 |
break
|
| 52 |
# Release everything if job is finished
|
| 53 |
print(path)
|
| 54 |
+
os.system(f'''ffmpeg -framerate 20 -pattern_type glob -i '{path}/*.png' -c:v libx264 -pix_fmt yuv420p {outname} -y''')
|
| 55 |
return outname
|
| 56 |
|
| 57 |
+
def set_example_image(example: list) -> dict:
|
| 58 |
+
return gr.Video.update(value=example[0])
|
| 59 |
+
|
| 60 |
+
|
| 61 |
with gr.Blocks(title='Shark Patrol',theme=gr.themes.Soft(),live=True,) as demo:
|
| 62 |
gr.Markdown("Initial DEMO.")
|
| 63 |
with gr.Tab("Shark Detector"):
|
|
|
|
| 68 |
#video_output.style(witdh='50%',height='50%')
|
| 69 |
|
| 70 |
video_button = gr.Button("Analyze")
|
| 71 |
+
with gr.Row():
|
| 72 |
+
paths = sorted(pathlib.Path('videos_example').rglob('*.mp4'))
|
| 73 |
+
example_images = gr.Dataset(components=[video_input],
|
| 74 |
+
samples=[[path.as_posix()]
|
| 75 |
+
for path in paths])
|
| 76 |
|
| 77 |
|
| 78 |
with gr.Accordion("Open for More!"):
|
| 79 |
gr.Markdown("Place holder for detection")
|
| 80 |
|
| 81 |
video_button.click(analize_video, inputs=video_input, outputs=video_output)
|
| 82 |
+
|
| 83 |
+
example_images.click(fn=set_example_image,
|
| 84 |
+
inputs=example_images,
|
| 85 |
+
outputs=video_input)
|
| 86 |
+
|
| 87 |
demo.queue()
|
| 88 |
+
#if os.getenv('SYSTEM') == 'spaces':
|
| 89 |
+
demo.launch(width='40%',auth=(os.environ.get('SHARK_USERNAME'), os.environ.get('SHARK_PASSWORD')))
|