File size: 3,661 Bytes
afb9dad
 
2955ae6
5a474ac
afb9dad
 
5a474ac
 
 
 
 
afb9dad
 
4f37c46
641b9cd
5a474ac
afb9dad
9c2cb20
afb9dad
5a474ac
afb9dad
 
 
2955ae6
9c2cb20
 
641b9cd
5a474ac
 
641b9cd
5a474ac
 
 
 
 
 
641b9cd
9c2cb20
641b9cd
2955ae6
641b9cd
 
9c2cb20
641b9cd
 
 
 
 
 
 
 
9c2cb20
641b9cd
5a474ac
 
 
 
9c2cb20
 
641b9cd
5a474ac
641b9cd
5a474ac
 
 
 
 
641b9cd
 
afb9dad
641b9cd
 
9c2cb20
 
 
afb9dad
9c2cb20
afb9dad
9c2cb20
 
641b9cd
9c2cb20
afb9dad
2955ae6
afb9dad
2955ae6
 
afb9dad
9c2cb20
 
 
 
 
afb9dad
 
 
9c2cb20
 
afb9dad
 
9c2cb20
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import gradio as gr
import os
import json
import mimetypes
from dotenv import load_dotenv

# Your exact requested imports
from google import genai
from google.genai import types

# --- Configuration and Client Initialization ---
load_dotenv()

try:
    # Initializing the client exactly as in your code
    client = genai.Client(api_key=os.environ["GEMINI_API_KEY"])
except KeyError:
    raise gr.Error("FATAL: GEMINI_API_KEY not found. Please set it in your Hugging Face Space secrets.")

# --- Core Gradio Function ---

def analyze_device_condition(video_file_path):
    if not video_file_path:
        return "Please upload video", "", ""

    try:
        # 1. Prepare video file
        mime_type, _ = mimetypes.guess_type(video_file_path)
        if not mime_type or not mime_type.startswith("video"):
            raise ValueError("Unsupported file type.")
        
        with open(video_file_path, "rb") as video:
            video_part = types.Part(
                inline_data=types.Blob(mime_type=mime_type, data=video.read())
            )
        
        # 2. Prepare the prompt
        prompt = """
        Analyze the provided video. Respond ONLY with a valid JSON object with three keys:
        1. "device_type": A short string identifying the device.
        2. "condition": A single word: "Mint", "Excellent", "Good", "Fair", or "Poor".
        3. "reason": A brief string explaining the condition.
        """
        prompt_part = types.Part.from_text(prompt)

        # 3. --- THIS IS THE KEY CORRECTION ---
        #    Wrapping the parts inside types.Content, exactly like your reference code.
        contents = [
            types.Content(parts=[prompt_part, video_part])
        ]
        # --- END OF CORRECTION ---
        
        # 4. Use gemini-2.5-flash and the specified config
        model_name = "gemini-2.5-flash"
        generate_content_config = types.GenerateContentConfig(
            temperature=0.2,
            response_mime_type="application/json"
        )
        
        # 5. Call the API using the streaming method from your code
        print(f"Log: Sending request to model: {model_name}...")
        stream = client.models.generate_content_stream(
            model=f"models/{model_name}",
            contents=contents,
            generation_config=generate_content_config,
        )

        # 6. Handle the streamed response to build the complete JSON string
        response_text = "".join(chunk.text for chunk in stream)

        # 7. Parse the final JSON response
        parsed_json = json.loads(response_text)
        device_type = parsed_json.get("device_type", "N/A")
        condition = parsed_json.get("condition", "N/A")
        reason = parsed_json.get("reason", "N/A")

        return device_type, condition, reason

    except Exception as e:
        error_message = f"An error occurred: {e}"
        print(f"ERROR: {error_message}")
        return error_message, "", ""

# --- Gradio Interface ---
with gr.Blocks(theme=gr.themes.Soft()) as demo:
    gr.Markdown("# 📱 Device Condition Analyzer")
    video_input = gr.Video(label="Upload or Record Video", sources=["upload", "webcam"], format="mp4")
    submit_button = gr.Button("Analyze Device", variant="primary")
    with gr.Row():
        device_type_output = gr.Textbox(label="Device Type")
        condition_output = gr.Textbox(label="Condition")
        reason_output = gr.Textbox(label="Reason / Details")

    submit_button.click(
        fn=analyze_device_condition,
        inputs=video_input,
        outputs=[device_type_output, condition_output, reason_output],
        show_progress='full'
    )

demo.launch(debug=True)