Spaces:
Sleeping
Sleeping
| # -*- coding: utf-8 -*- | |
| """FinalProject.ipynb | |
| Automatically generated by Colab. | |
| Original file is located at | |
| https://colab.research.google.com/drive/1_wYfP0IRdb9fpc2zvbg8IqdXGx1dTo7X | |
| """ | |
| !pip install datasets transformers torch torchvision faiss-cpu gradio | |
| from datasets import load_dataset | |
| from PIL import Image | |
| # Load dataset from Hugging Face | |
| dataset = load_dataset("lirus18/deepfashion", split="train") | |
| # Show one image | |
| from IPython.display import display | |
| image = dataset[0]['image'] | |
| display(image) | |
| from transformers import CLIPProcessor, CLIPModel | |
| import torch | |
| # Load the CLIP model and processor | |
| model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32") | |
| processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") | |
| # Use GPU if available | |
| device = "cuda" if torch.cuda.is_available() else "cpu" | |
| model.to(device) | |
| import numpy as np | |
| image_vectors = [] | |
| image_indices = [] | |
| # Use a subset (you can increase to 1000+ later) | |
| N = 500 | |
| for i in range(N): | |
| image = dataset[i]['image'].convert("RGB") | |
| inputs = processor(images=image, return_tensors="pt").to(device) | |
| with torch.no_grad(): | |
| embedding = model.get_image_features(**inputs) | |
| image_vectors.append(embedding.cpu().numpy().squeeze()) | |
| image_indices.append(i) # Store index for later retrieval | |
| image_vectors = np.array(image_vectors) | |
| from sklearn.metrics.pairwise import cosine_similarity | |
| def find_similar(user_image, top_k=3, exclude_index=None): | |
| # Embed the user image | |
| inputs = processor(images=user_image.convert("RGB"), return_tensors="pt").to(device) | |
| with torch.no_grad(): | |
| query_vec = model.get_image_features(**inputs).cpu().numpy() | |
| # Compute cosine similarity | |
| sims = cosine_similarity(query_vec, image_vectors)[0] | |
| # Exclude the query image itself | |
| if exclude_index is not None: | |
| sims[exclude_index] = -1 # Force low similarity | |
| # Get top K similar image indices | |
| top_idx = sims.argsort()[-top_k:][::-1] | |
| return [dataset[image_indices[i]]['image'] for i in top_idx] | |
| from IPython.display import display | |
| query_index = 10 | |
| query_image = dataset[query_index]['image'] | |
| display(query_image) | |
| similar_images = find_similar(query_image, exclude_index=query_index) | |
| for img in similar_images: | |
| display(img) | |
| from diffusers import StableDiffusionImg2ImgPipeline | |
| import torch | |
| # Load Stable Diffusion (this might take a while) | |
| pipe = StableDiffusionImg2ImgPipeline.from_pretrained( | |
| "runwayml/stable-diffusion-v1-5", | |
| torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32, | |
| ).to(device) | |
| def generate_outfit_from_image(input_image): | |
| prompt = "fashion outfit design inspired by the clothing item" | |
| init_image = input_image.resize((512, 512)) | |
| generated = pipe(prompt=prompt, image=init_image, strength=0.7, guidance_scale=7.5) | |
| return generated.images[0] | |
| from PIL import ImageChops | |
| def recommend_from_upload(uploaded_image): | |
| # Step 1: Compare uploaded image to all dataset images and find the most identical one | |
| uploaded_image = uploaded_image.convert("RGB") | |
| closest_idx = None | |
| for i in range(len(image_indices)): | |
| dataset_image = dataset[image_indices[i]]['image'].convert("RGB") | |
| if ImageChops.difference(dataset_image, uploaded_image).getbbox() is None: | |
| closest_idx = i | |
| break | |
| # Step 2: Get top 3 similar, excluding the identical one if found | |
| similar_imgs = find_similar(uploaded_image, top_k=3, exclude_index=closest_idx) | |
| # Step 3: Generate 1 new outfit (placeholder for now) | |
| generated_img = generate_outfit_from_image(uploaded_image) | |
| return [uploaded_image] + similar_imgs + [generated_img] | |
| import gradio as gr | |
| import os | |
| # Prepare example paths | |
| example_images = [ | |
| ["/content/fashion_examples/new1.jpg"], | |
| ["/content/fashion_examples/newnew.jpg"], | |
| ["/content/fashion_examples/newoutfit.jpg"], | |
| ] | |
| # Build the interface | |
| demo = gr.Interface( | |
| fn=recommend_from_upload, | |
| inputs=gr.Image(type="pil", label="Upload a clothing item"), | |
| outputs=[ | |
| gr.Image(label="Your Input"), | |
| gr.Image(label="Similar Item 1"), | |
| gr.Image(label="Similar Item 2"), | |
| gr.Image(label="Similar Item 3"), | |
| gr.Image(label="Generated New Outfit"), | |
| ], | |
| title="👗 Fashion Outfit Recommender", | |
| description="Upload your own image *or* click an example to get 3 similar items + 1 AI-generated outfit.", | |
| examples=example_images | |
| ) | |
| demo.launch() |