Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import google.generativeai as genai | |
| import os | |
| import time | |
| import json # We need this library to parse the JSON output | |
| from dotenv import load_dotenv | |
| # --- Configuration --- | |
| load_dotenv() | |
| try: | |
| genai.configure(api_key=os.environ["GEMINI_API_KEY"]) | |
| except KeyError: | |
| raise gr.Error("FATAL: GEMINI_API_KEY not found. Please set it in your Hugging Face Space secrets.") | |
| # --- Core Function with JSON Parsing --- | |
| def analyze_device_condition(video_file_path): | |
| """ | |
| Analyzes a video and returns structured JSON data with three fields. | |
| """ | |
| if not video_file_path: | |
| return "Please upload video", "", "" # Return empty strings for the other fields | |
| try: | |
| # 1. Upload the file (same as before) | |
| print("Log: Uploading file to Google...") | |
| video_file = genai.upload_file(path=video_file_path) | |
| while video_file.state.name == "PROCESSING": | |
| print("Log: Waiting for video processing...") | |
| time.sleep(5) | |
| video_file = genai.get_file(video_file.name) | |
| if video_file.state.name == "FAILED": | |
| error_message = "Error: Video processing failed." | |
| return error_message, "", "" | |
| print(f"Log: File processed successfully.") | |
| # 2. ** NEW: Update the prompt to request JSON output ** | |
| prompt = """ | |
| Analyze the provided video of a device. Respond ONLY with a valid JSON object. | |
| The JSON object must have the following three keys and nothing else: | |
| 1. "device_type": A short string identifying the device (e.g., "iPhone 14 Pro", "Washing Machine", "Laptop"). | |
| 2. "condition": A single word describing its condition. Choose from: "Mint", "Excellent", "Good", "Fair", "Poor". | |
| 3. "reason": A brief string explaining the condition rating, mentioning specific defects like "minor screen scratches", "dents on corner", or "clean". | |
| Example JSON output: | |
| { | |
| "device_type": "Samsung Galaxy S22", | |
| "condition": "Fair", | |
| "reason": "Visible cracks on the screen and scratches on the back panel." | |
| } | |
| """ | |
| # 3. Call the Gemini Model | |
| model = genai.GenerativeModel(model_name="gemini-1.5-pro-latest") | |
| print("Log: Sending prompt and video to Gemini...") | |
| response = model.generate_content( | |
| [prompt, video_file], | |
| # Ask the model specifically for a JSON response | |
| generation_config=genai.types.GenerationConfig(response_mime_type="application/json") | |
| ) | |
| print("Log: Analysis received from Gemini.") | |
| genai.delete_file(video_file.name) | |
| print(f"Log: Uploaded file deleted.") | |
| # 4. ** NEW: Parse the JSON response ** | |
| print(f"Raw model response: {response.text}") | |
| parsed_json = json.loads(response.text) | |
| device_type = parsed_json.get("device_type", "N/A") | |
| condition = parsed_json.get("condition", "N/A") | |
| reason = parsed_json.get("reason", "N/A") | |
| # The function now returns three separate values | |
| return device_type, condition, reason | |
| except Exception as e: | |
| print(f"!!!!!!!! AN ERROR OCCURRED !!!!!!!!\n{e}") | |
| error_message = f"An error occurred: {e}" | |
| # Return the error message in the first field and empty strings for the others | |
| return error_message, "", "" | |
| # --- ** NEW: Gradio Interface with Multiple Output Fields ** --- | |
| with gr.Blocks(theme=gr.themes.Soft()) as demo: | |
| gr.Markdown( | |
| """ | |
| # π± Device Condition Analyzer | |
| Upload or record a short video of an electronic device to get a structured analysis of its condition. | |
| """ | |
| ) | |
| video_input = gr.Video( | |
| label="Upload or Record Video", | |
| sources=["upload", "webcam"], | |
| format="mp4" | |
| ) | |
| submit_button = gr.Button("Analyze Device", variant="primary") | |
| # Create a row layout for the output fields | |
| with gr.Row(): | |
| # Create three separate Textbox outputs | |
| device_type_output = gr.Textbox(label="Device Type") | |
| condition_output = gr.Textbox(label="Condition") | |
| reason_output = gr.Textbox(label="Reason / Details") | |
| # The click function now maps to three outputs instead of one | |
| submit_button.click( | |
| fn=analyze_device_condition, | |
| inputs=video_input, | |
| # The list of outputs must match the order of the return values in the function | |
| outputs=[device_type_output, condition_output, reason_output], | |
| show_progress='full' | |
| ) | |
| demo.launch(debug=True) |