adhiltortil's picture
refactor: replace huggingface-hub secret retrieval with environment variable
2248ec2
import os
from google import genai
from PIL import Image
import gradio as gr
def get_api_key():
return os.environ.get('GEMINI_API_KEY')
def analyze_images(image1, image2):
api_key = get_api_key()
if not api_key:
return "Error: GEMINI_API_KEY not found in HF Spaces secrets"
client = genai.Client(api_key=api_key)
chat = client.chats.create(model="gemini-2.0-flash")
try:
img1 = Image.fromarray(image1)
img2 = Image.fromarray(image2)
response = chat.send_message([
img1,
img2,
"Spot the difference between 2 given images. The angles of the images will be different. Your task is to spot the missing or misplaced items"
])
return response.text
except Exception as e:
return f"Error: {str(e)}"
def clear_inputs():
return None, None, "" # Returns None for both images and empty string for output
with gr.Blocks(theme=('hmb/amethyst')) as demo:
gr.Markdown("## Scene Continuity Checker")
# Image inputs in a row
with gr.Row():
image1_input = gr.Image(
label="First Image",
height=150,
width=150,
type="numpy"
)
image2_input = gr.Image(
label="Second Image",
height=150,
width=150,
type="numpy"
)
# Analysis output
output_text = gr.Textbox(
label="Analysis Results",
lines=4,
scale=2
)
# Buttons stacked vertically (removed gr.Row())
analyze_button = gr.Button("Analyze Differences", variant="primary")
clear_button = gr.Button("Reset", variant="secondary")
# Button click events
analyze_button.click(
fn=analyze_images,
inputs=[image1_input, image2_input],
outputs=output_text
)
clear_button.click(
fn=clear_inputs,
inputs=[],
outputs=[image1_input, image2_input, output_text]
)
if __name__ == "__main__":
demo.launch()