KIKERP_Demo / app.py
vivek9chavan's picture
Update app.py
7135735 verified
raw
history blame
4.04 kB
import gradio as gr
import os
import json
import time # We need this to wait
from dotenv import load_dotenv
# Your requested imports
from google import genai
from google.genai import types
# --- Configuration and Client Initialization ---
load_dotenv()
try:
client = genai.Client(api_key=os.environ["GEMINI_API_KEY"])
except KeyError:
raise gr.Error("FATAL: GEMINI_API_KEY not found. Please set it in your Hugging Face Space secrets.")
# --- Core Gradio Function ---
def analyze_device_condition(video_file_path):
if not video_file_path:
return "Please upload video", "", ""
uploaded_file = None
try:
# 1. Upload the file
print(f"Log: Uploading file: {video_file_path}...")
uploaded_file = client.files.upload(file=video_file_path)
print(f"Log: File upload initiated. File name: {uploaded_file.name}, State: {uploaded_file.state.name}")
# 2. --- THIS IS THE CRITICAL POLLING LOOP ---
# Wait for the file to become ACTIVE.
while uploaded_file.state.name == "PROCESSING":
print("Log: File is processing, waiting 5 seconds...")
time.sleep(5)
# Get the latest status of the file.
uploaded_file = client.files.get(name=uploaded_file.name)
print(f"Log: Current file state: {uploaded_file.state.name}")
# If the file failed processing, stop here.
if uploaded_file.state.name != "ACTIVE":
raise Exception(f"File processing failed. Final state: {uploaded_file.state.name}")
# --- END OF CORRECTION ---
print("Log: File is now ACTIVE and ready for use.")
# 3. Prepare the prompt
prompt = """
Analyze the provided video. Respond ONLY with a valid JSON object with three keys:
1. "device_type": A short string identifying the device.
2. "condition": A single word: "Mint", "Excellent", "Good", "Fair", or "Poor".
3. "reason": A brief string explaining the condition.
"""
# 4. Use gemini-2.5-flash
model_name = "gemini-2.5-flash"
generate_content_config = types.GenerateContentConfig(
temperature=0.2,
response_mime_type="application/json"
)
# 5. Call the API with the now-active file
contents = [uploaded_file, prompt]
print(f"Log: Sending request to model: {model_name}...")
response = client.models.generate_content(
model=f"models/{model_name}",
contents=contents,
config=generate_content_config,
)
# 6. Parse the final JSON response
parsed_json = json.loads(response.text)
device_type = parsed_json.get("device_type", "N/A")
condition = parsed_json.get("condition", "N/A")
reason = parsed_json.get("reason", "N/A")
return device_type, condition, reason
except Exception as e:
error_message = f"An error occurred: {e}"
print(f"ERROR: {error_message}")
return error_message, "", ""
finally:
# 7. Cleanup: Delete the file from Google's servers
if uploaded_file:
print(f"Log: Deleting uploaded file: {uploaded_file.name}")
client.files.delete(name=uploaded_file.name)
# --- Gradio Interface ---
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("# πŸ“± Device Condition Analyzer")
video_input = gr.Video(label="Upload or Record Video", sources=["upload", "webcam"], format="mp4")
submit_button = gr.Button("Analyze Device", variant="primary")
with gr.Row():
device_type_output = gr.Textbox(label="Device Type")
condition_output = gr.Textbox(label="Condition")
reason_output = gr.Textbox(label="Reason / Details")
submit_button.click(
fn=analyze_device_condition,
inputs=video_input,
outputs=[device_type_output, condition_output, reason_output],
show_progress='full'
)
demo.launch(debug=True)