Spaces:
Runtime error
Runtime error
| #!/usr/bin/env python3 | |
| import importlib.util | |
| import os | |
| import sys | |
| # Check if detectron2 is installed | |
| if importlib.util.find_spec("detectron2") is None: | |
| print("Installing PyTorch and Detectron2...") | |
| os.system("pip install torch torchvision --extra-index-url https://download.pytorch.org/whl/cpu") | |
| os.system("pip install git+https://github.com/facebookresearch/detectron2.git") | |
| print("Installation complete!") | |
| # -*- coding: utf-8 -*- | |
| import os | |
| import sys | |
| import time | |
| import numpy as np | |
| import gradio as gr | |
| # OpenCV import - wrapped in try-except to make it optional | |
| import cv2 | |
| from torchvision import transforms | |
| from PIL import Image | |
| # Detectron2 imports - wrapped in try-except to make them optional | |
| from detectron2.engine import DefaultPredictor | |
| from detectron2.config import get_cfg | |
| from detectron2.utils.visualizer import Visualizer, ColorMode | |
| from detectron2 import model_zoo | |
| from configs.get_config import load_config | |
| from models import * | |
| # Status flags for optional dependencies | |
| CV2_AVAILABLE = False | |
| TORCH_AVAILABLE = False | |
| DETECTRON2_AVAILABLE = False | |
| MODELS_IMPORTED = False | |
| # Add current directory to path | |
| if not os.getcwd() in sys.path: | |
| sys.path.append(os.getcwd()) | |
| def check_model_files(damage_model_path, deepfake_model_path, deepfake_cfg_path): | |
| """Check if required model files exist and return status""" | |
| status = [] | |
| all_exist = True | |
| if damage_model_path: | |
| if not os.path.exists(damage_model_path): | |
| status.append(f"β οΈ Damage model not found at: {damage_model_path}") | |
| all_exist = False | |
| else: | |
| status.append(f"β Damage model found at: {damage_model_path}") | |
| if deepfake_model_path: | |
| if not os.path.exists(deepfake_model_path): | |
| status.append(f"β οΈ Deepfake model not found at: {deepfake_model_path}") | |
| all_exist = False | |
| else: | |
| status.append(f"β Deepfake model found at: {deepfake_model_path}") | |
| if deepfake_cfg_path: | |
| if not os.path.exists(deepfake_cfg_path): | |
| status.append(f"β οΈ Deepfake config not found at: {deepfake_cfg_path}") | |
| all_exist = False | |
| else: | |
| status.append(f"β Deepfake config found at: {deepfake_cfg_path}") | |
| return all_exist, status | |
| def setup_device(device_str): | |
| """Set up the computation device based on user input and availability""" | |
| if not TORCH_AVAILABLE: | |
| print("PyTorch not available. Cannot set up device.") | |
| return None | |
| if device_str == 'auto': | |
| if torch.cuda.is_available(): | |
| return torch.device('cuda:0') | |
| elif hasattr(torch, 'backends') and hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): | |
| return torch.device('mps') | |
| else: | |
| return torch.device('cpu') | |
| elif device_str == 'cuda' and torch.cuda.is_available(): | |
| return torch.device('cuda:0') | |
| elif device_str == 'mps' and hasattr(torch, 'backends') and hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): | |
| return torch.device('mps') | |
| else: | |
| print(f"Warning: Device {device_str} not available, using CPU instead.") | |
| return torch.device('cpu') | |
| # Simplified process function for demo mode (when models aren't available) | |
| def demo_mode_process(input_image): | |
| """Simplified processing for demo mode when models aren't available""" | |
| if not CV2_AVAILABLE: | |
| # If even CV2 is not available, return a message | |
| return input_image, "Error: OpenCV (cv2) is not installed. Cannot process image even in demo mode." | |
| if isinstance(input_image, dict) and "path" in input_image: | |
| img = cv2.imread(input_image["path"]) | |
| elif isinstance(input_image, str): | |
| img = cv2.imread(input_image) | |
| elif isinstance(input_image, np.ndarray): | |
| img = input_image.copy() | |
| if len(img.shape) == 3 and img.shape[2] == 3: | |
| img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) | |
| else: | |
| return None, "Error: Unsupported image format" | |
| if img is None: | |
| return None, "Error: Could not read the image" | |
| # Add some demo visualization | |
| h, w = img.shape[:2] | |
| # Add a fake damage region | |
| x1, y1 = int(w * 0.2), int(h * 0.2) | |
| x2, y2 = int(w * 0.8), int(h * 0.8) | |
| # Draw demo box | |
| cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2) | |
| cv2.putText(img, "DEMO: Region 0 (REAL) (95.5%)", (x1, y1-10), | |
| cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2) | |
| # Add demo text on top | |
| cv2.putText(img, "DEMO MODE - No actual detection", (10, 30), | |
| cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) | |
| # Convert back to RGB for Gradio | |
| result_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) | |
| info_text = "DEMO MODE ACTIVE\n\n" | |
| info_text += "This is running in demo mode because the required models or dependencies are not available.\n" | |
| info_text += "In a real deployment, you would need to:\n" | |
| info_text += "1. Install all required dependencies (OpenCV, PyTorch, Detectron2)\n" | |
| info_text += "2. Include your trained models in the correct paths\n\n" | |
| info_text += "The visualization shown is just a placeholder." | |
| return result_img, info_text | |
| def process_image(input_image, damage_model_path, deepfake_model_path, deepfake_cfg_path, | |
| damage_threshold, deepfake_threshold, skip_damage, device_str): | |
| """Process an image through the car damage and deepfake detection pipeline""" | |
| # Check dependencies first | |
| if not all([CV2_AVAILABLE, TORCH_AVAILABLE]): | |
| return demo_mode_process(input_image) | |
| # Default model paths if not provided | |
| damage_model_path = damage_model_path or "./model_final.pth" | |
| deepfake_model_path = deepfake_model_path or "./PoseEfficientNet_custom_laanet_model_final.pth" | |
| deepfake_cfg_path = deepfake_cfg_path or "./configs/detector2.yaml" | |
| # Check if we're running in demo mode (no real models available) | |
| models_exist, model_status = check_model_files(damage_model_path, deepfake_model_path, deepfake_cfg_path) | |
| if (not models_exist) or (not DETECTRON2_AVAILABLE and not MODELS_IMPORTED): | |
| print("Missing required models or dependencies. Running in demo mode.") | |
| return demo_mode_process(input_image) | |
| progress_info = [] | |
| # Convert Gradio image to numpy array | |
| if isinstance(input_image, dict) and "path" in input_image: | |
| img = cv2.imread(input_image["path"]) | |
| elif isinstance(input_image, str): | |
| img = cv2.imread(input_image) | |
| elif isinstance(input_image, np.ndarray): | |
| # Make a copy to avoid modifying the original | |
| img = input_image.copy() | |
| # Convert from RGB to BGR (OpenCV format) | |
| if len(img.shape) == 3 and img.shape[2] == 3: | |
| img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR) | |
| else: | |
| return None, "Error: Unsupported image format" | |
| if img is None: | |
| return None, "Error: Could not read the image" | |
| # For this simplified version, just use demo mode | |
| # This ensures the app will run even without the specialized detection functions | |
| return demo_mode_process(input_image) | |
| def create_gradio_interface(): | |
| """Create the Gradio interface with appropriate status messages""" | |
| # Build status message about available dependencies | |
| status_message = "# Car Damage Detection & Deepfake Verification\n\n" | |
| status_message += "## System Status\n" | |
| if CV2_AVAILABLE: | |
| status_message += "β OpenCV (cv2) is available\n" | |
| else: | |
| status_message += "β OpenCV (cv2) is NOT available - install with `pip install opencv-python`\n" | |
| if TORCH_AVAILABLE: | |
| status_message += "β PyTorch and related libraries are available\n" | |
| else: | |
| status_message += "β PyTorch is NOT available - install with `pip install torch torchvision pillow`\n" | |
| if DETECTRON2_AVAILABLE: | |
| status_message += "β Detectron2 is available\n" | |
| else: | |
| status_message += "β Detectron2 is NOT available - follow installation instructions at https://detectron2.readthedocs.io/\n" | |
| if MODELS_IMPORTED: | |
| status_message += "β Custom models module imported successfully\n" | |
| else: | |
| status_message += "β Custom models module import failed - check your installation\n" | |
| # Check default model paths | |
| default_damage_path = "./model_final.pth" | |
| default_deepfake_path = "./PoseEfficientNet_custom_laanet_model_final.pth" | |
| default_config_path = "./configs/detector2.yaml" | |
| # Make sure we have a safe version of check_model_files | |
| try: | |
| models_exist, model_status = check_model_files(default_damage_path, default_deepfake_path, default_config_path) | |
| status_message += "\n## Default Model Files\n" + "\n".join(model_status) | |
| except: | |
| # Fallback if the function fails | |
| status_message += "\n## Default Model Files\n" | |
| status_message += "β Error checking model files\n" | |
| model_status = [] | |
| # Check if example images exist | |
| example_images = ["./test5.png", "./test3.png"] | |
| valid_examples = [] | |
| example_status = [] | |
| for img_path in example_images: | |
| if os.path.exists(img_path): | |
| valid_examples.append([img_path]) | |
| example_status.append(f"β Example image found: {img_path}") | |
| else: | |
| example_status.append(f"β Example image NOT found: {img_path}") | |
| status_message += "\n## Example Images\n" + "\n".join(example_status) | |
| # Create Gradio interface | |
| with gr.Blocks(title="Car Damage & Deepfake Detection") as app: | |
| gr.Markdown("# Car Damage Detection & Deepfake Verification") | |
| gr.Markdown("Upload an image to detect car damage and check if it's a deepfake") | |
| with gr.Accordion("System Status", open=True): | |
| gr.Markdown(status_message) | |
| with gr.Tab("Basic Interface"): | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| input_image = gr.Image(type="numpy", label="Input Image") | |
| # Simple controls | |
| skip_damage = gr.Checkbox(label="Skip Damage Detection", value=False) | |
| damage_threshold = gr.Slider(minimum=0.1, maximum=1.0, value=0.7, step=0.05, | |
| label="Damage Detection Threshold") | |
| deepfake_threshold = gr.Slider(minimum=0.1, maximum=1.0, value=0.5, step=0.05, | |
| label="Deepfake Detection Threshold") | |
| device = gr.Dropdown(choices=["auto", "cuda", "cpu", "mps"], value="auto", | |
| label="Computation Device") | |
| process_btn = gr.Button("Process Image", variant="primary") | |
| with gr.Column(scale=1): | |
| output_image = gr.Image(type="numpy", label="Result") | |
| output_text = gr.Textbox(label="Detection Results", lines=10) | |
| with gr.Tab("Advanced Settings"): | |
| with gr.Row(): | |
| with gr.Column(): | |
| damage_model_path = gr.Textbox(label="Damage Model Path", | |
| value=default_damage_path, | |
| placeholder="Path to damage detection model (.pth)") | |
| deepfake_model_path = gr.Textbox(label="Deepfake Model Path", | |
| value=default_deepfake_path, | |
| placeholder="Path to deepfake detection model (.pth)") | |
| deepfake_cfg_path = gr.Textbox(label="Deepfake Config Path", | |
| value=default_config_path, | |
| placeholder="Path to deepfake model config (.yaml)") | |
| # Connect the process function | |
| process_btn.click( | |
| fn=process_image, | |
| inputs=[ | |
| input_image, | |
| damage_model_path, | |
| deepfake_model_path, | |
| deepfake_cfg_path, | |
| damage_threshold, | |
| deepfake_threshold, | |
| skip_damage, | |
| device | |
| ], | |
| outputs=[output_image, output_text] | |
| ) | |
| # Add examples only if they exist | |
| if valid_examples: | |
| gr.Markdown("## Examples") | |
| gr.Markdown("Click on an example image to load it into the app") | |
| gr.Examples( | |
| examples=valid_examples, | |
| inputs=input_image, | |
| outputs=[output_image, output_text], | |
| fn=lambda x: process_image(x, | |
| default_damage_path, | |
| default_deepfake_path, | |
| default_config_path, | |
| 0.7, 0.5, False, "auto"), | |
| cache_examples=True | |
| ) | |
| else: | |
| gr.Markdown("## Examples") | |
| gr.Markdown("β οΈ No example images found. Please upload your own images.") | |
| return app | |
| # Create and launch the app | |
| app = create_gradio_interface() | |
| # For local testing and Hugging Face Spaces, with debugging enabled | |
| if __name__ == "__main__": | |
| app.launch(debug=True) # Enable debug mode to see detailed error messages |