Spaces:
Runtime error
Runtime error
| import gradio as gr | |
| import mlflow | |
| import os | |
| import time | |
| import tempfile | |
| from ultralytics import YOLO | |
| from PIL import Image | |
| import cv2 | |
| import numpy as np | |
| # ============================== | |
| # MLflow Configuration using Secrets | |
| # ============================== | |
| tracking_uri = os.getenv("MLFLOW_TRACKING_URI") | |
| username = os.getenv("MLFLOW_TRACKING_USERNAME") | |
| password = os.getenv("MLFLOW_TRACKING_PASSWORD") | |
| if not all([tracking_uri, username, password]): | |
| raise ValueError("MLflow Secrets are not configured! Go to Space Settings β Secrets and verify the names") | |
| os.environ["MLFLOW_TRACKING_URI"] = tracking_uri | |
| os.environ["MLFLOW_TRACKING_USERNAME"] = username | |
| os.environ["MLFLOW_TRACKING_PASSWORD"] = password | |
| mlflow.set_experiment("YOLOv12s_Inference_Logs") | |
| print("β MLflow configured successfully using secrets!") | |
| # ============================== | |
| # Load Model | |
| # ============================== | |
| model = YOLO("Yolo12s.pt") | |
| # ============================== | |
| # Inference with Full MLflow Tracking | |
| # ============================== | |
| def run_inference(media_file, media_type): | |
| if media_file is None: | |
| return None, None, None, None, "β οΈ Please upload a file first" | |
| media_path = media_file.name | |
| with mlflow.start_run(run_name=f"Inference_{int(time.time())}") as run: | |
| mlflow.log_param("media_type", media_type) | |
| mlflow.log_param("model", "YOLOv12s") | |
| mlflow.log_param("timestamp", time.strftime("%Y-%m-%d %H:%M:%S")) | |
| if media_type == "Image": | |
| img = Image.open(media_path).convert("RGB") | |
| img_array = np.array(img) | |
| results = model(img_array)[0] | |
| annotated = results.plot() | |
| output_img = Image.fromarray(annotated[..., ::-1]) | |
| with tempfile.TemporaryDirectory() as tmpdir: | |
| in_path = os.path.join(tmpdir, "input.jpg") | |
| out_path = os.path.join(tmpdir, "output.jpg") | |
| img.save(in_path) | |
| output_img.save(out_path) | |
| mlflow.log_artifact(in_path, artifact_path="input") | |
| mlflow.log_artifact(out_path, artifact_path="output") | |
| detections = len(results.boxes) if results.boxes is not None else 0 | |
| mlflow.log_metric("detections_count", detections) | |
| return img, output_img, None, None, f"β **Detection Complete!**\n\nπ **Objects Detected:** {detections}" | |
| else: # Video | |
| cap = cv2.VideoCapture(media_path) | |
| fps = cap.get(cv2.CAP_PROP_FPS) or 30 | |
| w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) | |
| h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) | |
| output_video = "output_video.mp4" | |
| writer = cv2.VideoWriter(output_video, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) | |
| frame_count = 0 | |
| total_detections = 0 | |
| while cap.isOpened(): | |
| ret, frame = cap.read() | |
| if not ret: | |
| break | |
| results = model(frame)[0] | |
| annotated = results.plot() | |
| writer.write(annotated) | |
| frame_count += 1 | |
| total_detections += len(results.boxes) if results.boxes is not None else 0 | |
| cap.release() | |
| writer.release() | |
| mlflow.log_artifact(media_path, artifact_path="input_video") | |
| mlflow.log_artifact(output_video, artifact_path="output_video") | |
| mlflow.log_metric("frames_processed", frame_count) | |
| mlflow.log_metric("total_detections", total_detections) | |
| mlflow.log_metric("avg_detections_per_frame", total_detections / frame_count if frame_count > 0 else 0) | |
| result_message = f"β **Video Processing Complete!**\n\nπΉ **Frames Processed:** {frame_count}\nπ **Total Detections:** {total_detections}\nπ **Average per Frame:** {total_detections / frame_count:.2f}" | |
| # Return both input and output video paths | |
| result_video = output_video | |
| return None, None, media_path, result_video, result_message | |
| # ============================== | |
| # Modern Aesthetic UI | |
| # ============================== | |
| css = """ | |
| @import url('https://fonts.googleapis.com/css2?family=Inter:wght@400;600;700&display=swap'); | |
| * { | |
| font-family: 'Inter', sans-serif; | |
| } | |
| body { | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
| } | |
| .gradio-container { | |
| max-width: 1400px !important; | |
| margin: 40px auto !important; | |
| background: rgba(255, 255, 255, 0.95) !important; | |
| border-radius: 24px !important; | |
| box-shadow: 0 20px 60px rgba(0, 0, 0, 0.3) !important; | |
| padding: 0 !important; | |
| overflow: hidden !important; | |
| } | |
| /* Header Styling */ | |
| .header-container { | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
| padding: 48px 40px; | |
| text-align: center; | |
| border-radius: 24px 24px 0 0; | |
| } | |
| .header-container h1 { | |
| color: white !important; | |
| font-size: 2.8em !important; | |
| font-weight: 700 !important; | |
| margin: 0 0 12px 0 !important; | |
| text-shadow: 0 2px 4px rgba(0, 0, 0, 0.1); | |
| } | |
| .header-container p { | |
| color: rgba(255, 255, 255, 0.95) !important; | |
| font-size: 1.1em !important; | |
| margin: 0 !important; | |
| } | |
| /* Main Content Area */ | |
| .main-content { | |
| padding: 48px 40px; | |
| } | |
| /* File Upload Area */ | |
| .file-upload-area { | |
| background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%); | |
| border-radius: 16px; | |
| padding: 32px; | |
| border: 2px dashed #667eea; | |
| transition: all 0.3s ease; | |
| } | |
| .file-upload-area:hover { | |
| border-color: #764ba2; | |
| transform: translateY(-2px); | |
| box-shadow: 0 8px 16px rgba(102, 126, 234, 0.2); | |
| } | |
| /* Buttons */ | |
| button.primary { | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%) !important; | |
| border: none !important; | |
| color: white !important; | |
| font-weight: 600 !important; | |
| font-size: 1.1em !important; | |
| padding: 16px 48px !important; | |
| border-radius: 12px !important; | |
| cursor: pointer !important; | |
| transition: all 0.3s ease !important; | |
| box-shadow: 0 4px 12px rgba(102, 126, 234, 0.4) !important; | |
| } | |
| button.primary:hover { | |
| transform: translateY(-2px) !important; | |
| box-shadow: 0 8px 20px rgba(102, 126, 234, 0.6) !important; | |
| } | |
| /* Radio Buttons */ | |
| .radio-group label { | |
| background: white; | |
| padding: 12px 24px; | |
| border-radius: 8px; | |
| border: 2px solid #e5e7eb; | |
| cursor: pointer; | |
| transition: all 0.3s ease; | |
| } | |
| .radio-group label:hover { | |
| border-color: #667eea; | |
| background: #f5f7fa; | |
| } | |
| /* Output Areas */ | |
| .output-image, .output-video { | |
| border-radius: 16px; | |
| overflow: hidden; | |
| box-shadow: 0 8px 24px rgba(0, 0, 0, 0.1); | |
| background: white; | |
| } | |
| /* Info Box */ | |
| .info-box { | |
| background: linear-gradient(135deg, #f5f7fa 0%, #c3cfe2 100%); | |
| border-radius: 12px; | |
| padding: 24px; | |
| border-left: 4px solid #667eea; | |
| } | |
| /* Custom Scrollbar */ | |
| ::-webkit-scrollbar { | |
| width: 8px; | |
| } | |
| ::-webkit-scrollbar-track { | |
| background: #f1f1f1; | |
| } | |
| ::-webkit-scrollbar-thumb { | |
| background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | |
| border-radius: 4px; | |
| } | |
| ::-webkit-scrollbar-thumb:hover { | |
| background: #764ba2; | |
| } | |
| /* Animations */ | |
| @keyframes fadeIn { | |
| from { | |
| opacity: 0; | |
| transform: translateY(20px); | |
| } | |
| to { | |
| opacity: 1; | |
| transform: translateY(0); | |
| } | |
| } | |
| .animate-in { | |
| animation: fadeIn 0.6s ease-out; | |
| } | |
| """ | |
| with gr.Blocks() as demo: | |
| # Header | |
| gr.HTML(""" | |
| <div class="header-container animate-in"> | |
| <h1>π YOLOv12s Vehicle Detection</h1> | |
| <p>Detect vehicles in Egyptian streets with state-of-the-art AI β’ All inferences logged to DagsHub MLflow</p> | |
| </div> | |
| """) | |
| with gr.Row(elem_classes="main-content"): | |
| # Left Column - Input | |
| with gr.Column(scale=1): | |
| gr.Markdown("### π Upload Media") | |
| media = gr.File( | |
| label="Drop your image or video here", | |
| file_types=[".jpg", ".jpeg", ".png", ".mp4", ".avi"], | |
| elem_classes="file-upload-area" | |
| ) | |
| gr.Markdown("### π― Media Type") | |
| media_type = gr.Radio( | |
| ["Image", "Video"], | |
| label="Select type", | |
| value="Image", | |
| elem_classes="radio-group" | |
| ) | |
| btn = gr.Button("π Run Detection", variant="primary", size="lg", elem_classes="primary") | |
| gr.Markdown(""" | |
| --- | |
| ### π Features | |
| - Real-time vehicle detection | |
| - Support for images & videos | |
| - Auto-logging to DagsHub | |
| - Detailed metrics tracking | |
| """) | |
| # Right Column - Output | |
| with gr.Column(scale=2): | |
| gr.Markdown("### π¬ Detection Results") | |
| with gr.Tabs(): | |
| with gr.Tab("πΈ Image Results"): | |
| with gr.Row(): | |
| img_original = gr.Image( | |
| label="Original Image", | |
| height=400, | |
| elem_classes="output-image" | |
| ) | |
| img_detected = gr.Image( | |
| label="Detected Objects", | |
| height=400, | |
| elem_classes="output-image" | |
| ) | |
| with gr.Tab("π₯ Video Results"): | |
| with gr.Row(): | |
| vid_original = gr.Video( | |
| label="Original Video", | |
| height=400, | |
| elem_classes="output-video" | |
| ) | |
| vid_detected = gr.Video( | |
| label="Detected Objects", | |
| height=400, | |
| elem_classes="output-video" | |
| ) | |
| gr.Markdown("### π Run Information") | |
| info = gr.Markdown( | |
| "**Ready to detect...** Upload a file and click 'Run Detection' to start!", | |
| elem_classes="info-box" | |
| ) | |
| # Event Handler | |
| btn.click( | |
| fn=run_inference, | |
| inputs=[media, media_type], | |
| outputs=[img_original, img_detected, vid_original, vid_detected, info] | |
| ) | |
| # Footer | |
| gr.HTML(""" | |
| <div style="text-align: center; padding: 32px; color: #6b7280;"> | |
| <p>Powered by YOLOv12s β’ MLflow Tracking β’ DagsHub Integration</p> | |
| </div> | |
| """) | |
| # Fixed for Gradio 6.0: moved css and theme to launch() | |
| demo.launch(css=css, theme=gr.themes.Soft()) |