Spaces:
Sleeping
Sleeping
File size: 6,024 Bytes
7ef7b02 caef22e 7ef7b02 caef22e 7ef7b02 caef22e 05527ba caef22e 7ef7b02 caef22e 05527ba 7ef7b02 caef22e c93638f caef22e 7ef7b02 c93638f caef22e c93638f 05527ba caef22e c93638f 7ef7b02 c93638f 7ef7b02 caef22e 05527ba 7ef7b02 caef22e 7ef7b02 05527ba d26f75e 05527ba 7ef7b02 05527ba d26f75e 05527ba 7ef7b02 05527ba 7ef7b02 b86d733 caef22e 7ef7b02 05527ba caef22e 05527ba b86d733 7ef7b02 05527ba 7ef7b02 05527ba 7ef7b02 05527ba 7ef7b02 05527ba caef22e d26f75e caef22e 05527ba caef22e 05527ba caef22e 05527ba d26f75e 05527ba d26f75e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 |
import streamlit as st
import numpy as np
import cv2
from PIL import Image
from segment_anything import sam_model_registry, SamPredictor
from io import BytesIO
import base64
from google.generativeai import configure, GenerativeModel
# Configure Gemini API
configure(api_key="AIzaSyBawh403z5cyyQzFhQo14y7oUQw6nr8mIg")
model = GenerativeModel("gemini-2.0-flash")
# Load SAM model with ViT-Base
def load_sam_model():
sam = sam_model_registry["vit_b"](checkpoint="sam_vit_b.pth")
predictor = SamPredictor(sam)
return predictor
# Preprocess image using OpenCV (Edge Detection & Background Removal)
def preprocess_image(image):
image = np.array(image.convert("RGB"))
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
edges = cv2.Canny(gray, 100, 200)
return Image.fromarray(edges)
# Segment garment using SAM
def segment_garment(image, predictor):
# Convert image to NumPy and set it in SAM
image_np = np.array(image.convert("RGB"))
predictor.set_image(image_np)
# Define multiple input points (center, upper body, lower body)
height, width, _ = image_np.shape
input_points = np.array([
[width // 2, height // 3], # upper torso
[width // 2, height // 2], # center
[width // 2, 2 * height // 3] # lower torso
])
input_labels = np.array([1, 1, 1]) # all positive prompts
# Generate mask using multiple points
masks, scores, logits = predictor.predict(
point_coords=input_points,
point_labels=input_labels,
multimask_output=False
)
# Post-process mask
mask = masks[0]
mask_resized = cv2.resize(mask.astype(np.uint8) * 255, (width, height), interpolation=cv2.INTER_NEAREST)
mask_rgb = np.stack([mask_resized] * 3, axis=-1)
# Apply mask to original image
segmented = np.where(mask_rgb > 0, image_np, 0)
return Image.fromarray(segmented)
# AI garment analysis
def analyze_garment(image, style_pref=None, feedback=None, generate_variations=False):
image_bytes = BytesIO()
image.save(image_bytes, format="PNG")
encoded_image = base64.b64encode(image_bytes.getvalue()).decode("utf-8")
style_input = f"\nStyle preference: {style_pref}" if style_pref else ""
feedback_input = f"\nUser feedback: {feedback}" if feedback else ""
variation_input = "\nGenerate 3 creative variations based on this mixed or hybrid style." if generate_variations else ""
prompt = {
"parts": [
{
"text": (
"You are a professional fashion consultant. "
"Analyze the uploaded garment image and describe its style, fabric, and design elements. "
"Then, suggest complementary pieces and occasions to wear it. "
"The user might provide unique or combined style preferences such as 'elegant streetwear' or 'casual luxury'. "
"In such cases, blend both styles and explain how to balance or merge them." +
style_input + feedback_input + variation_input
)
},
{"inline_data": {"mime_type": "image/png", "data": encoded_image}}
]
}
response = model.generate_content(prompt)
return response.text if response else "Analysis failed."
# Load SAM model
sam_predictor = load_sam_model()
# Streamlit UI
st.title("π AI Fashion Analysis & Design Studio")
st.markdown("""
### π How to Use this App:
1. Upload a fashion image or sketch.
2. Preprocess to see edge detection and garment segmentation.
3. Provide your style preferences or feedback.
4. Analyze and generate new fashion ideas using AI!
""")
uploaded_file = st.file_uploader("π€ Upload a fashion image (PNG, JPG, JPEG)", type=["jpg", "png", "jpeg"])
if uploaded_file:
image = Image.open(uploaded_file)
st.image(image, caption="πΌοΈ Uploaded Image", use_container_width=True)
# Session state setup
if "processed_image" not in st.session_state:
st.session_state.processed_image = None
if "segmented_image" not in st.session_state:
st.session_state.segmented_image = None
if "last_analysis_result" not in st.session_state:
st.session_state.last_analysis_result = None
if st.button("βοΈ Preprocess Image"):
st.session_state.processed_image = preprocess_image(image)
st.session_state.segmented_image = segment_garment(image, sam_predictor)
if st.session_state.processed_image:
st.image(st.session_state.processed_image, caption="π§ Edge Detection", use_container_width=True)
if st.session_state.segmented_image:
st.image(st.session_state.segmented_image, caption="βοΈ Segmented Garment", use_container_width=True)
# π§βπ€βπ§ Human-AI Interaction
st.markdown("### π§ Customize Your Style & Get AI Suggestions")
style_pref = st.text_input("π Describe your style preference (e.g., elegant, streetwear, minimalist)")
generate_variations = st.checkbox("π¨ Generate design variations")
if st.button("π€ Analyze & Collaborate with AI"):
result = analyze_garment(image, style_pref)
st.session_state.last_analysis_result = result
st.success(result)
# Fabric suggestion extraction (Optional basic parsing)
st.markdown("### π§΅ Suggested Fabrics / Materials:")
for line in result.splitlines():
if "fabric" in line.lower() or "material" in line.lower():
st.info(line)
# Feedback AFTER analysis
if st.session_state.last_analysis_result:
st.markdown("### π£οΈ Give Feedback to Improve AI Suggestions")
feedback = st.text_input("What would you like to adjust or improve?", key="feedback_input")
if st.button("π Refine Based on Feedback"):
updated_result = analyze_garment(image, style_pref, feedback, generate_variations)
st.session_state.last_analysis_result = updated_result
st.success(updated_result) |