KIKERP_Demo / app.py
vivek9chavan's picture
Update app.py
4f37c46 verified
raw
history blame
3.66 kB
import gradio as gr
import os
import json
import mimetypes
from dotenv import load_dotenv
# Your exact requested imports
from google import genai
from google.genai import types
# --- Configuration and Client Initialization ---
load_dotenv()
try:
# Initializing the client exactly as in your code
client = genai.Client(api_key=os.environ["GEMINI_API_KEY"])
except KeyError:
raise gr.Error("FATAL: GEMINI_API_KEY not found. Please set it in your Hugging Face Space secrets.")
# --- Core Gradio Function ---
def analyze_device_condition(video_file_path):
if not video_file_path:
return "Please upload video", "", ""
try:
# 1. Prepare video file
mime_type, _ = mimetypes.guess_type(video_file_path)
if not mime_type or not mime_type.startswith("video"):
raise ValueError("Unsupported file type.")
with open(video_file_path, "rb") as video:
video_part = types.Part(
inline_data=types.Blob(mime_type=mime_type, data=video.read())
)
# 2. Prepare the prompt
prompt = """
Analyze the provided video. Respond ONLY with a valid JSON object with three keys:
1. "device_type": A short string identifying the device.
2. "condition": A single word: "Mint", "Excellent", "Good", "Fair", or "Poor".
3. "reason": A brief string explaining the condition.
"""
prompt_part = types.Part.from_text(prompt)
# 3. --- THIS IS THE KEY CORRECTION ---
# Wrapping the parts inside types.Content, exactly like your reference code.
contents = [
types.Content(parts=[prompt_part, video_part])
]
# --- END OF CORRECTION ---
# 4. Use gemini-2.5-flash and the specified config
model_name = "gemini-2.5-flash"
generate_content_config = types.GenerateContentConfig(
temperature=0.2,
response_mime_type="application/json"
)
# 5. Call the API using the streaming method from your code
print(f"Log: Sending request to model: {model_name}...")
stream = client.models.generate_content_stream(
model=f"models/{model_name}",
contents=contents,
generation_config=generate_content_config,
)
# 6. Handle the streamed response to build the complete JSON string
response_text = "".join(chunk.text for chunk in stream)
# 7. Parse the final JSON response
parsed_json = json.loads(response_text)
device_type = parsed_json.get("device_type", "N/A")
condition = parsed_json.get("condition", "N/A")
reason = parsed_json.get("reason", "N/A")
return device_type, condition, reason
except Exception as e:
error_message = f"An error occurred: {e}"
print(f"ERROR: {error_message}")
return error_message, "", ""
# --- Gradio Interface ---
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("# πŸ“± Device Condition Analyzer")
video_input = gr.Video(label="Upload or Record Video", sources=["upload", "webcam"], format="mp4")
submit_button = gr.Button("Analyze Device", variant="primary")
with gr.Row():
device_type_output = gr.Textbox(label="Device Type")
condition_output = gr.Textbox(label="Condition")
reason_output = gr.Textbox(label="Reason / Details")
submit_button.click(
fn=analyze_device_condition,
inputs=video_input,
outputs=[device_type_output, condition_output, reason_output],
show_progress='full'
)
demo.launch(debug=True)