File size: 3,089 Bytes
156fd1b
 
 
 
 
 
 
 
2ce7d1f
156fd1b
 
2ce7d1f
 
156fd1b
 
 
2ce7d1f
156fd1b
 
 
2ce7d1f
 
156fd1b
2ce7d1f
 
156fd1b
2ce7d1f
 
 
156fd1b
 
2ce7d1f
156fd1b
 
2ce7d1f
156fd1b
 
 
 
 
 
 
 
 
 
 
 
2ce7d1f
156fd1b
 
 
 
 
 
2ce7d1f
 
 
 
 
 
 
156fd1b
 
2ce7d1f
 
 
 
 
 
 
 
156fd1b
2ce7d1f
156fd1b
2ce7d1f
 
 
 
 
 
156fd1b
2ce7d1f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import gradio as gr
import pandas as pd
import numpy as np
from sentence_transformers import SentenceTransformer
from datasets import load_dataset
from sklearn.metrics.pairwise import cosine_similarity
from PIL import Image

# 1. ื˜ืขื™ื ืช ื”ืžืฉืื‘ื™ื (Startup)
print("โณ Loading Model and Data...")
model = SentenceTransformer('clip-ViT-B-32')

# ื˜ืขื™ื ืช ื”ืžื˜ื-ื“ืื˜ื” ืžื”ืงื•ื‘ืฅ ืฉืฉืžืจื ื•
df = pd.read_parquet("ven_inventory.parquet")
inventory_embeddings = np.stack(df['embedding'].values)

# ื˜ืขื™ื ืช ื”ืชืžื•ื ื•ืช ืžื”ื“ืื˜ื”-ืกื˜
dataset = load_dataset("detection-datasets/fashionpedia", split='train')
subset = dataset.select(range(5050))

# ืคื•ื ืงืฆื™ื™ืช ื”ื”ืžืœืฆื” ื”ืžืขื•ื“ื›ื ืช - ืžืงื‘ืœืช ืืช ื›ืœ ื”ืงืœื˜ื™ื
def recommend(text_query, image_query, input_mode):
    if input_mode == "Text":
        if not text_query: return None
        query_emb = model.encode([text_query])
    else:
        if image_query is None: return None
        # ื”ืžืจืช ื”ืชืžื•ื ื” ืœ-PIL ืื ื”ื™ื ืžื’ื™ืขื” ื›ืžืขืจืš ื ื•ืžืคื™
        img = Image.fromarray(image_query).convert("RGB")
        query_emb = model.encode([img])
    
    # ื ื•ืจืžืœื™ื–ืฆื™ื”
    query_emb = query_emb / np.linalg.norm(query_emb)
    
    # ื—ื™ืฉื•ื‘ ื“ืžื™ื•ืŸ ืงื•ืกื™ื ื•ืก
    scores = cosine_similarity(query_emb, inventory_embeddings)[0]
    top_indices = np.argsort(scores)[::-1][:3]
    
    results = []
    for idx in top_indices:
        actual_idx = int(idx)
        results.append((
            subset[actual_idx]['image'], 
            f"Match Score: {scores[actual_idx]:.2%} | Cluster: {df.iloc[actual_idx]['cluster']}"
        ))
    return results

# 2. ื‘ื ื™ื™ืช ืžืžืฉืง ื”ืžืฉืชืžืฉ (UI)
with gr.Blocks(theme=gr.themes.Soft()) as demo:
    gr.Markdown("# ๐ŸŒฟ Ven Community - Fashion Recommender")
    gr.Markdown("Search Ven's inventory by text or image.")
    
    with gr.Row():
        with gr.Column():
            input_mode = gr.Radio(["Text", "Image"], label="Input Type", value="Text")
            
            # ื™ืฆื™ืจืช ืฉื ื™ ืจื›ื™ื‘ื™ ื”ืงืœื˜
            text_input = gr.Textbox(label="Description", placeholder="e.g., White sneakers", visible=True)
            image_input = gr.Image(label="Upload Image", visible=False)
            
            search_btn = gr.Button("Find Similar Items", variant="primary")
        
        with gr.Column():
            output_gallery = gr.Gallery(label="Results", columns=3)

    # ืขื“ื›ื•ืŸ ื ืจืื•ืช ื”ืจื›ื™ื‘ื™ื ืœืคื™ ื”ื‘ื—ื™ืจื” ื‘-Radio
    def update_visibility(mode):
        if mode == "Text":
            return gr.update(visible=True), gr.update(visible=False)
        else:
            return gr.update(visible=False), gr.update(visible=True)

    input_mode.change(update_visibility, inputs=input_mode, outputs=[text_input, image_input])
    
    # ืฉืœื™ื—ืช ื›ืœ ื”ืจื›ื™ื‘ื™ื ืœืคื•ื ืงืฆื™ื” - ื–ื” ื”ืชื™ืงื•ืŸ ื”ืงืจื™ื˜ื™
    search_btn.click(
        fn=recommend, 
        inputs=[text_input, image_input, input_mode], 
        outputs=output_gallery
    )

demo.launch()