File size: 4,634 Bytes
2948ced
 
 
917c876
2948ced
c4512d1
2948ced
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
917c876
 
2948ced
 
 
 
e429a4e
2948ced
 
f2f28df
 
2948ced
 
 
 
 
 
 
 
f2f28df
 
 
2948ced
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e429a4e
2948ced
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c4512d1
 
 
917c876
e204259
c4512d1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
from __future__ import annotations

import io
import json
import logging
import os
from pathlib import Path
from typing import List

import gradio as gr
from PIL import Image

from llm_clients import GEMINI_3_VISION, OPENAI_GPT5, OPENAI_GPT5_MINI
from pipeline import DEFAULT_USER_PROMPT, process_images
from settings import load_settings

logging.basicConfig(level=logging.INFO)
LOGGER = logging.getLogger("app")


def _load_images(files: List[gr.File], downscale: bool) -> List[bytes]:
    images: List[bytes] = []
    for f in files or []:
        data = Path(f.name).read_bytes()
        if downscale:
            try:
                img = Image.open(io.BytesIO(data)).convert("RGB")
                img.thumbnail((1024, 1024))
                buf = io.BytesIO()
                img.save(buf, format="PNG")
                data = buf.getvalue()
            except Exception as exc:  # noqa: BLE001
                LOGGER.warning("Downscale failed for %s: %s; using original", f.name, exc)
        images.append(data)
    return images


def make_interface():
    settings = load_settings()
    settings.require_api_keys()

    def _infer(files, model, creativity, downscale_images):
        images = _load_images(files, downscale_images)
        if not images:
            raise gr.Error("Please upload at least one image.")

        try:
            result = process_images(
                images,
                model,
                settings,
                system_prompt_path=None,
                user_prompt=DEFAULT_USER_PROMPT,
            )
        except Exception as exc:  # noqa: BLE001
            LOGGER.exception("Inference failed")
            raise gr.Error(str(exc))

        trends = result["trends"]
        bullets = result["summary"]
        md = "\n\n".join(f"- {b}" for b in bullets) if bullets else "No summary available."
        trends_text = json.dumps(trends, indent=2)
        return trends_text, md

    def _on_files_change(files):
        """Update preview and clear outputs when files are removed."""
        if not files:
            return [], "", ""
        return files, gr.update(), gr.update()

    enable_gemini = os.environ.get("ENABLE_GEMINI", "true").lower() not in {"0", "false", "no"}

    with gr.Blocks(title="Garment Micro-Trend Detector") as demo:
        gr.Markdown(
            "Upload garment image(s), pick a model, and get structured micro-trend JSON plus a bullet summary."
        )

        with gr.Row():
            image_input = gr.Files(file_count="multiple", label="Garment images")
            with gr.Column():
                model_choices = [OPENAI_GPT5, OPENAI_GPT5_MINI]
                if enable_gemini:
                    model_choices.append(GEMINI_3_VISION)
                default_model = settings.openai_model if settings.openai_model in model_choices else OPENAI_GPT5_MINI
                model_dd = gr.Dropdown(
                    choices=model_choices,
                    value=default_model,
                    label="Model",
                    allow_custom_value=True,  # allow custom OpenAI model overrides like gpt-5.1
                )
                creativity = gr.Slider(
                    minimum=0.0,
                    maximum=1.0,
                    step=0.1,
                    value=0.2,
                    label="Creativity (temperature hint)",
                    info="Not all models use this directly; for now it is informational.",
                )
                downscale_chk = gr.Checkbox(value=True, label="Downscale images to 1024px for speed/cost")
                run_btn = gr.Button("Analyze", variant="primary")

        preview = gr.Gallery(
            label="Preview",
            show_label=True,
            object_fit="contain",  # preserve aspect ratio
            height="auto",
        )
        json_out = gr.Textbox(label="Micro-trend JSON", lines=20, interactive=False)
        summary_md = gr.Markdown(label="Summary")

        image_input.change(_on_files_change, inputs=image_input, outputs=[preview, json_out, summary_md], queue=False)

        run_btn.click(
            _infer,
            inputs=[image_input, model_dd, creativity, downscale_chk],
            outputs=[json_out, summary_md],
            queue=True,
        )

    return demo


if __name__ == "__main__":
    app = make_interface()
    app.queue()
    app.launch(
        server_name="0.0.0.0",
        server_port=int(os.environ.get("PORT", 7860)),
        share=False,  # Spaces provides its own public URL; share=True not supported there
        show_api=False,  # avoid schema generation that can crash on HF
    )