Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -24,13 +24,11 @@ body {
|
|
| 24 |
animation: gradient 15s ease infinite;
|
| 25 |
}
|
| 26 |
@keyframes gradient { 0% { background-position: 0% 50%; } 50% { background-position: 100% 50%; } 100% { background-position: 0% 50%; } }
|
| 27 |
-
|
| 28 |
/* General Layout & Typography */
|
| 29 |
.gradio-container { max-width: 1320px !important; margin: auto !important; }
|
| 30 |
#title { text-align: center; font-size: 3rem !important; font-weight: 700; color: #FFF; margin-bottom: 0.5rem; }
|
| 31 |
#subtitle { text-align: center; color: #bebebe; margin-top: 0; margin-bottom: 40px; font-size: 1.2rem; font-weight: 300; }
|
| 32 |
.gr-button { font-weight: bold !important; }
|
| 33 |
-
|
| 34 |
/* Prediction Bar Styling */
|
| 35 |
#predictions-column { background-color: rgba(255, 255, 255, 0.05); border-radius: 12px; padding: 1.5rem; }
|
| 36 |
#predictions-column > .gr-label { display: none; }
|
|
@@ -80,28 +78,31 @@ def live_detection_stream(frame):
|
|
| 80 |
return annotated_frame, create_prediction_html(probabilities)
|
| 81 |
|
| 82 |
def process_image(image):
|
| 83 |
-
if image is None:
|
|
|
|
| 84 |
annotated_frame, probabilities = predictor.process_frame(image)
|
| 85 |
return annotated_frame, create_prediction_html(probabilities)
|
| 86 |
|
| 87 |
def process_video(video_path, progress=gr.Progress(track_tqdm=True)):
|
| 88 |
-
if video_path is None:
|
|
|
|
| 89 |
try:
|
| 90 |
cap = cv2.VideoCapture(video_path)
|
| 91 |
-
frame_count = int(cap.get(
|
| 92 |
output_path = "processed_video.mp4"
|
| 93 |
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
| 94 |
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 95 |
-
width = int(cap.get(
|
| 96 |
-
height = int(cap.get(
|
| 97 |
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
| 98 |
for _ in progress.tqdm(range(frame_count), desc="Processing Video"):
|
| 99 |
ret, frame = cap.read()
|
| 100 |
-
if not ret:
|
| 101 |
-
|
|
|
|
| 102 |
annotated_frame, _ = predictor.process_frame(frame_rgb)
|
| 103 |
if annotated_frame is not None:
|
| 104 |
-
out.write(cv2.cvtColor(annotated_frame,
|
| 105 |
cap.release()
|
| 106 |
out.release()
|
| 107 |
return output_path
|
|
@@ -118,8 +119,10 @@ with gr.Blocks(css=CSS, theme=gr.themes.Base()) as demo:
|
|
| 118 |
with gr.TabItem("Live Detection"):
|
| 119 |
with gr.Row(equal_height=True):
|
| 120 |
with gr.Column(scale=3):
|
| 121 |
-
#
|
| 122 |
-
live_feed = gr.Image(source="webcam", streaming=True, type="numpy", label="Live Feed",
|
|
|
|
|
|
|
| 123 |
with gr.Column(scale=2, elem_id="predictions-column"):
|
| 124 |
gr.Markdown("### Emotion Probabilities")
|
| 125 |
live_predictions = gr.HTML()
|
|
@@ -143,20 +146,21 @@ with gr.Blocks(css=CSS, theme=gr.themes.Base()) as demo:
|
|
| 143 |
|
| 144 |
# --- EVENT LISTENERS ---
|
| 145 |
|
| 146 |
-
#
|
| 147 |
-
# It automatically handles starting and stopping. There is no need for separate buttons.
|
| 148 |
live_feed.stream(
|
| 149 |
fn=live_detection_stream,
|
| 150 |
-
inputs=
|
| 151 |
-
outputs=[
|
| 152 |
)
|
| 153 |
|
|
|
|
| 154 |
image_button.click(process_image, [image_input], [image_input, image_predictions])
|
|
|
|
|
|
|
| 155 |
video_button.click(process_video, [video_input], [video_output])
|
| 156 |
|
| 157 |
# --- LAUNCH THE APP ---
|
| 158 |
if predictor:
|
| 159 |
-
# Enabling the queue is essential for the video processing progress bar
|
| 160 |
demo.queue().launch(debug=True)
|
| 161 |
else:
|
| 162 |
-
print("\n[FATAL ERROR] Could not start the application.")
|
|
|
|
| 24 |
animation: gradient 15s ease infinite;
|
| 25 |
}
|
| 26 |
@keyframes gradient { 0% { background-position: 0% 50%; } 50% { background-position: 100% 50%; } 100% { background-position: 0% 50%; } }
|
|
|
|
| 27 |
/* General Layout & Typography */
|
| 28 |
.gradio-container { max-width: 1320px !important; margin: auto !important; }
|
| 29 |
#title { text-align: center; font-size: 3rem !important; font-weight: 700; color: #FFF; margin-bottom: 0.5rem; }
|
| 30 |
#subtitle { text-align: center; color: #bebebe; margin-top: 0; margin-bottom: 40px; font-size: 1.2rem; font-weight: 300; }
|
| 31 |
.gr-button { font-weight: bold !important; }
|
|
|
|
| 32 |
/* Prediction Bar Styling */
|
| 33 |
#predictions-column { background-color: rgba(255, 255, 255, 0.05); border-radius: 12px; padding: 1.5rem; }
|
| 34 |
#predictions-column > .gr-label { display: none; }
|
|
|
|
| 78 |
return annotated_frame, create_prediction_html(probabilities)
|
| 79 |
|
| 80 |
def process_image(image):
|
| 81 |
+
if image is None:
|
| 82 |
+
return None, create_prediction_html({})
|
| 83 |
annotated_frame, probabilities = predictor.process_frame(image)
|
| 84 |
return annotated_frame, create_prediction_html(probabilities)
|
| 85 |
|
| 86 |
def process_video(video_path, progress=gr.Progress(track_tqdm=True)):
|
| 87 |
+
if video_path is None:
|
| 88 |
+
return None
|
| 89 |
try:
|
| 90 |
cap = cv2.VideoCapture(video_path)
|
| 91 |
+
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 92 |
output_path = "processed_video.mp4"
|
| 93 |
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
| 94 |
fps = cap.get(cv2.CAP_PROP_FPS)
|
| 95 |
+
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
| 96 |
+
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
| 97 |
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
| 98 |
for _ in progress.tqdm(range(frame_count), desc="Processing Video"):
|
| 99 |
ret, frame = cap.read()
|
| 100 |
+
if not ret:
|
| 101 |
+
break
|
| 102 |
+
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 103 |
annotated_frame, _ = predictor.process_frame(frame_rgb)
|
| 104 |
if annotated_frame is not None:
|
| 105 |
+
out.write(cv2.cvtColor(annotated_frame, cv2.COLOR_RGB2BGR))
|
| 106 |
cap.release()
|
| 107 |
out.release()
|
| 108 |
return output_path
|
|
|
|
| 119 |
with gr.TabItem("Live Detection"):
|
| 120 |
with gr.Row(equal_height=True):
|
| 121 |
with gr.Column(scale=3):
|
| 122 |
+
# Webcam input
|
| 123 |
+
live_feed = gr.Image(source="webcam", streaming=True, type="numpy", label="Live Feed", height=550)
|
| 124 |
+
# Annotated output
|
| 125 |
+
live_output = gr.Image(type="numpy", label="Annotated Feed", interactive=False, height=550)
|
| 126 |
with gr.Column(scale=2, elem_id="predictions-column"):
|
| 127 |
gr.Markdown("### Emotion Probabilities")
|
| 128 |
live_predictions = gr.HTML()
|
|
|
|
| 146 |
|
| 147 |
# --- EVENT LISTENERS ---
|
| 148 |
|
| 149 |
+
# Webcam -> Annotated + Predictions
|
|
|
|
| 150 |
live_feed.stream(
|
| 151 |
fn=live_detection_stream,
|
| 152 |
+
inputs=live_feed,
|
| 153 |
+
outputs=[live_output, live_predictions],
|
| 154 |
)
|
| 155 |
|
| 156 |
+
# Image processing
|
| 157 |
image_button.click(process_image, [image_input], [image_input, image_predictions])
|
| 158 |
+
|
| 159 |
+
# Video processing
|
| 160 |
video_button.click(process_video, [video_input], [video_output])
|
| 161 |
|
| 162 |
# --- LAUNCH THE APP ---
|
| 163 |
if predictor:
|
|
|
|
| 164 |
demo.queue().launch(debug=True)
|
| 165 |
else:
|
| 166 |
+
print("\n[FATAL ERROR] Could not start the application.")
|