FashionDesign / app.py
wracell
changes
c93638f
import streamlit as st
import numpy as np
import cv2
from PIL import Image
from segment_anything import sam_model_registry, SamPredictor
from io import BytesIO
import base64
from google.generativeai import configure, GenerativeModel
# Configure Gemini API
configure(api_key="AIzaSyBawh403z5cyyQzFhQo14y7oUQw6nr8mIg")
model = GenerativeModel("gemini-2.0-flash")
# Load SAM model with ViT-Base
def load_sam_model():
sam = sam_model_registry["vit_b"](checkpoint="sam_vit_b.pth")
predictor = SamPredictor(sam)
return predictor
# Preprocess image using OpenCV (Edge Detection & Background Removal)
def preprocess_image(image):
image = np.array(image.convert("RGB"))
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
edges = cv2.Canny(gray, 100, 200)
return Image.fromarray(edges)
# Segment garment using SAM
def segment_garment(image, predictor):
# Convert image to NumPy and set it in SAM
image_np = np.array(image.convert("RGB"))
predictor.set_image(image_np)
# Define multiple input points (center, upper body, lower body)
height, width, _ = image_np.shape
input_points = np.array([
[width // 2, height // 3], # upper torso
[width // 2, height // 2], # center
[width // 2, 2 * height // 3] # lower torso
])
input_labels = np.array([1, 1, 1]) # all positive prompts
# Generate mask using multiple points
masks, scores, logits = predictor.predict(
point_coords=input_points,
point_labels=input_labels,
multimask_output=False
)
# Post-process mask
mask = masks[0]
mask_resized = cv2.resize(mask.astype(np.uint8) * 255, (width, height), interpolation=cv2.INTER_NEAREST)
mask_rgb = np.stack([mask_resized] * 3, axis=-1)
# Apply mask to original image
segmented = np.where(mask_rgb > 0, image_np, 0)
return Image.fromarray(segmented)
# AI garment analysis
def analyze_garment(image, style_pref=None, feedback=None, generate_variations=False):
image_bytes = BytesIO()
image.save(image_bytes, format="PNG")
encoded_image = base64.b64encode(image_bytes.getvalue()).decode("utf-8")
style_input = f"\nStyle preference: {style_pref}" if style_pref else ""
feedback_input = f"\nUser feedback: {feedback}" if feedback else ""
variation_input = "\nGenerate 3 creative variations based on this mixed or hybrid style." if generate_variations else ""
prompt = {
"parts": [
{
"text": (
"You are a professional fashion consultant. "
"Analyze the uploaded garment image and describe its style, fabric, and design elements. "
"Then, suggest complementary pieces and occasions to wear it. "
"The user might provide unique or combined style preferences such as 'elegant streetwear' or 'casual luxury'. "
"In such cases, blend both styles and explain how to balance or merge them." +
style_input + feedback_input + variation_input
)
},
{"inline_data": {"mime_type": "image/png", "data": encoded_image}}
]
}
response = model.generate_content(prompt)
return response.text if response else "Analysis failed."
# Load SAM model
sam_predictor = load_sam_model()
# Streamlit UI
st.title("πŸ‘— AI Fashion Analysis & Design Studio")
st.markdown("""
### πŸ“Œ How to Use this App:
1. Upload a fashion image or sketch.
2. Preprocess to see edge detection and garment segmentation.
3. Provide your style preferences or feedback.
4. Analyze and generate new fashion ideas using AI!
""")
uploaded_file = st.file_uploader("πŸ“€ Upload a fashion image (PNG, JPG, JPEG)", type=["jpg", "png", "jpeg"])
if uploaded_file:
image = Image.open(uploaded_file)
st.image(image, caption="πŸ–ΌοΈ Uploaded Image", use_container_width=True)
# Session state setup
if "processed_image" not in st.session_state:
st.session_state.processed_image = None
if "segmented_image" not in st.session_state:
st.session_state.segmented_image = None
if "last_analysis_result" not in st.session_state:
st.session_state.last_analysis_result = None
if st.button("βš™οΈ Preprocess Image"):
st.session_state.processed_image = preprocess_image(image)
st.session_state.segmented_image = segment_garment(image, sam_predictor)
if st.session_state.processed_image:
st.image(st.session_state.processed_image, caption="🧠 Edge Detection", use_container_width=True)
if st.session_state.segmented_image:
st.image(st.session_state.segmented_image, caption="βœ‚οΈ Segmented Garment", use_container_width=True)
# πŸ§‘β€πŸ€β€πŸ§‘ Human-AI Interaction
st.markdown("### 🧠 Customize Your Style & Get AI Suggestions")
style_pref = st.text_input("πŸ”– Describe your style preference (e.g., elegant, streetwear, minimalist)")
generate_variations = st.checkbox("🎨 Generate design variations")
if st.button("πŸ€– Analyze & Collaborate with AI"):
result = analyze_garment(image, style_pref)
st.session_state.last_analysis_result = result
st.success(result)
# Fabric suggestion extraction (Optional basic parsing)
st.markdown("### 🧡 Suggested Fabrics / Materials:")
for line in result.splitlines():
if "fabric" in line.lower() or "material" in line.lower():
st.info(line)
# Feedback AFTER analysis
if st.session_state.last_analysis_result:
st.markdown("### πŸ—£οΈ Give Feedback to Improve AI Suggestions")
feedback = st.text_input("What would you like to adjust or improve?", key="feedback_input")
if st.button("πŸ” Refine Based on Feedback"):
updated_result = analyze_garment(image, style_pref, feedback, generate_variations)
st.session_state.last_analysis_result = updated_result
st.success(updated_result)