|
|
import os |
|
|
import logging |
|
|
from io import BytesIO |
|
|
from PIL import Image |
|
|
import gradio as gr |
|
|
from google import genai |
|
|
from google.genai import types |
|
|
|
|
|
|
|
|
logging.basicConfig( |
|
|
filename='app.log', |
|
|
level=logging.INFO, |
|
|
format='%(asctime)s - %(levelname)s - %(message)s' |
|
|
) |
|
|
|
|
|
|
|
|
GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY") |
|
|
client = genai.Client(api_key=GEMINI_API_KEY) |
|
|
|
|
|
|
|
|
def explain_image(image: Image.Image): |
|
|
|
|
|
buffered = BytesIO() |
|
|
image.save(buffered, format="PNG") |
|
|
image_data = buffered.getvalue() |
|
|
|
|
|
|
|
|
contents = [ |
|
|
types.ContentPart.from_data(data=image_data, mime_type="image/png"), |
|
|
types.ContentPart.text("請用繁體中文說明這張圖片的內容。") |
|
|
] |
|
|
|
|
|
|
|
|
response = client.models.generate_content( |
|
|
model="gemini-1.5-flash", |
|
|
contents=contents, |
|
|
config=types.GenerateContentConfig(response_modalities=["TEXT"]) |
|
|
) |
|
|
|
|
|
|
|
|
explanation = response.candidates[0].content.parts[0].text |
|
|
logging.info("圖片說明成功取得。") |
|
|
return explanation |
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
gr.Markdown("## 🧠 Gemini 圖片解釋器(圖 ➜ 文)") |
|
|
image_input = gr.Image(type="pil", label="上傳圖片") |
|
|
explain_button = gr.Button("解釋圖片") |
|
|
output_text = gr.Textbox(label="圖片說明", lines=5) |
|
|
|
|
|
explain_button.click(fn=explain_image, inputs=image_input, outputs=output_text) |
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo.launch() |
|
|
|