Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -78,8 +78,9 @@ def process_frame(frame, selected_model):
|
|
| 78 |
ax.set_title('Top 5 Emotions')
|
| 79 |
ax.invert_yaxis() # Invert y-axis to have the highest probability at the top
|
| 80 |
|
| 81 |
-
# Adjust x-axis labels
|
| 82 |
-
ax.
|
|
|
|
| 83 |
|
| 84 |
# Ensure all labels are fully visible
|
| 85 |
plt.tight_layout()
|
|
@@ -126,15 +127,15 @@ with gr.Blocks() as app:
|
|
| 126 |
model_dropdown_video = gr.Dropdown(choices=["ViT-B/32", "ViT-B/16", "ViT-L/14"], label="Model", value="ViT-B/16")
|
| 127 |
gr.Markdown("Upload a video to detect faces and recognize emotions.")
|
| 128 |
video_input = gr.Video()
|
| 129 |
-
frame_slider = gr.Slider(minimum=0, maximum=100, step=1, label="Frame Index", value=0)
|
| 130 |
output_frame = gr.Image(label="Processed Frame")
|
|
|
|
| 131 |
output_graph = gr.Image(label="Results Graph")
|
| 132 |
|
| 133 |
def update_slider_and_process(video):
|
| 134 |
cap = cv2.VideoCapture(video)
|
| 135 |
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 136 |
cap.release()
|
| 137 |
-
processed_frame, graph = process_video(video, "ViT-
|
| 138 |
return gr.update(maximum=total_frames-1), processed_frame, graph
|
| 139 |
|
| 140 |
def update_frame(video, model, frame_idx):
|
|
|
|
| 78 |
ax.set_title('Top 5 Emotions')
|
| 79 |
ax.invert_yaxis() # Invert y-axis to have the highest probability at the top
|
| 80 |
|
| 81 |
+
# Adjust x-axis labels to show only 3 decimal places
|
| 82 |
+
ax.set_xticks(ax.get_xticks())
|
| 83 |
+
ax.set_xticklabels([f'{x:.3f}' for x in ax.get_xticks()], rotation=0, ha='center')
|
| 84 |
|
| 85 |
# Ensure all labels are fully visible
|
| 86 |
plt.tight_layout()
|
|
|
|
| 127 |
model_dropdown_video = gr.Dropdown(choices=["ViT-B/32", "ViT-B/16", "ViT-L/14"], label="Model", value="ViT-B/16")
|
| 128 |
gr.Markdown("Upload a video to detect faces and recognize emotions.")
|
| 129 |
video_input = gr.Video()
|
|
|
|
| 130 |
output_frame = gr.Image(label="Processed Frame")
|
| 131 |
+
frame_slider = gr.Slider(minimum=0, maximum=100, step=1, label="Frame Index", value=0)
|
| 132 |
output_graph = gr.Image(label="Results Graph")
|
| 133 |
|
| 134 |
def update_slider_and_process(video):
|
| 135 |
cap = cv2.VideoCapture(video)
|
| 136 |
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 137 |
cap.release()
|
| 138 |
+
processed_frame, graph = process_video(video, "ViT-B/16", 0)
|
| 139 |
return gr.update(maximum=total_frames-1), processed_frame, graph
|
| 140 |
|
| 141 |
def update_frame(video, model, frame_idx):
|