Spaces:
Running
Running
| from __future__ import annotations | |
| import io | |
| import json | |
| import logging | |
| import os | |
| from pathlib import Path | |
| from typing import List | |
| import gradio as gr | |
| from PIL import Image | |
| from llm_clients import GEMINI_3_VISION, OPENAI_GPT5, OPENAI_GPT5_MINI | |
| from pipeline import DEFAULT_USER_PROMPT, process_images | |
| from settings import load_settings | |
| logging.basicConfig(level=logging.INFO) | |
| LOGGER = logging.getLogger("app") | |
| def _load_images(files: List[gr.File], downscale: bool) -> List[bytes]: | |
| images: List[bytes] = [] | |
| for f in files or []: | |
| data = Path(f.name).read_bytes() | |
| if downscale: | |
| try: | |
| img = Image.open(io.BytesIO(data)).convert("RGB") | |
| img.thumbnail((1024, 1024)) | |
| buf = io.BytesIO() | |
| img.save(buf, format="PNG") | |
| data = buf.getvalue() | |
| except Exception as exc: # noqa: BLE001 | |
| LOGGER.warning("Downscale failed for %s: %s; using original", f.name, exc) | |
| images.append(data) | |
| return images | |
| def make_interface(): | |
| settings = load_settings() | |
| settings.require_api_keys() | |
| def _infer(files, model, creativity, downscale_images): | |
| images = _load_images(files, downscale_images) | |
| if not images: | |
| raise gr.Error("Please upload at least one image.") | |
| try: | |
| result = process_images( | |
| images, | |
| model, | |
| settings, | |
| system_prompt_path=None, | |
| user_prompt=DEFAULT_USER_PROMPT, | |
| ) | |
| except Exception as exc: # noqa: BLE001 | |
| LOGGER.exception("Inference failed") | |
| raise gr.Error(str(exc)) | |
| trends = result["trends"] | |
| bullets = result["summary"] | |
| md = "\n\n".join(f"- {b}" for b in bullets) if bullets else "No summary available." | |
| trends_text = json.dumps(trends, indent=2) | |
| return trends_text, md | |
| def _on_files_change(files): | |
| """Update preview and clear outputs when files are removed.""" | |
| if not files: | |
| return [], "", "" | |
| return files, gr.update(), gr.update() | |
| enable_gemini = os.environ.get("ENABLE_GEMINI", "true").lower() not in {"0", "false", "no"} | |
| with gr.Blocks(title="Garment Micro-Trend Detector") as demo: | |
| gr.Markdown( | |
| "Upload garment image(s), pick a model, and get structured micro-trend JSON plus a bullet summary." | |
| ) | |
| with gr.Row(): | |
| image_input = gr.Files(file_count="multiple", label="Garment images") | |
| with gr.Column(): | |
| model_choices = [OPENAI_GPT5, OPENAI_GPT5_MINI] | |
| if enable_gemini: | |
| model_choices.append(GEMINI_3_VISION) | |
| default_model = settings.openai_model if settings.openai_model in model_choices else OPENAI_GPT5_MINI | |
| model_dd = gr.Dropdown( | |
| choices=model_choices, | |
| value=default_model, | |
| label="Model", | |
| allow_custom_value=True, # allow custom OpenAI model overrides like gpt-5.1 | |
| ) | |
| creativity = gr.Slider( | |
| minimum=0.0, | |
| maximum=1.0, | |
| step=0.1, | |
| value=0.2, | |
| label="Creativity (temperature hint)", | |
| info="Not all models use this directly; for now it is informational.", | |
| ) | |
| downscale_chk = gr.Checkbox(value=True, label="Downscale images to 1024px for speed/cost") | |
| run_btn = gr.Button("Analyze", variant="primary") | |
| preview = gr.Gallery( | |
| label="Preview", | |
| show_label=True, | |
| object_fit="contain", # preserve aspect ratio | |
| height="auto", | |
| ) | |
| json_out = gr.Textbox(label="Micro-trend JSON", lines=20, interactive=False) | |
| summary_md = gr.Markdown(label="Summary") | |
| image_input.change(_on_files_change, inputs=image_input, outputs=[preview, json_out, summary_md], queue=False) | |
| run_btn.click( | |
| _infer, | |
| inputs=[image_input, model_dd, creativity, downscale_chk], | |
| outputs=[json_out, summary_md], | |
| queue=True, | |
| ) | |
| return demo | |
| if __name__ == "__main__": | |
| app = make_interface() | |
| app.queue() | |
| app.launch( | |
| server_name="0.0.0.0", | |
| server_port=int(os.environ.get("PORT", 7860)), | |
| share=False, # Spaces provides its own public URL; share=True not supported there | |
| show_api=False, # avoid schema generation that can crash on HF | |
| ) | |