aikanava commited on
Commit
0e7d289
·
verified ·
1 Parent(s): 55de009

Upload 3 files

Browse files
Files changed (3) hide show
  1. accessories.py +33 -0
  2. app.py +69 -0
  3. requirements.txt +9 -0
accessories.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def recommend_accessories(style_label):
2
+ style_label = style_label.lower()
3
+
4
+ # Define style groups and their recommendations
5
+ style_groups = {
6
+ "suit": [
7
+ "tie", "cufflinks", "polished dress shoes", "pocket square", "leather belt", "stylish watch"
8
+ ],
9
+ "casual": [
10
+ "trendy sneakers", "denim jackets", "baseball cap"
11
+ ],
12
+ "formal": [
13
+ "elegant watches", "leather belts", "cufflinks"
14
+ ],
15
+ "bohemian": [
16
+ "layered necklaces", "floppy hats", "fringe bags"
17
+ ],
18
+ "sporty": [
19
+ "wristbands", "running shoes", "sweat-wicking fabrics"
20
+ ],
21
+ "vintage": [
22
+ "pearl earrings", "brooches", "classic handbags"
23
+ ]
24
+ }
25
+
26
+ # Try to find the best matching group based on keywords in the style_label
27
+ for group, accessories in style_groups.items():
28
+ if group in style_label:
29
+ rec = ", ".join(accessories)
30
+ return f"Try these accessories to complement your {group} style: {rec}."
31
+
32
+ # Default recommendation if no group matched
33
+ return "Try some trendy accessories like sunglasses and a stylish belt."
app.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from PIL import Image
3
+ import torch
4
+ from transformers import ViTFeatureExtractor, ViTForImageClassification
5
+ from accessories import recommend_accessories
6
+
7
+ # Load ViT Model for style classification
8
+ def load_model():
9
+ feature_extractor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224")
10
+ model = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224")
11
+ return feature_extractor, model
12
+
13
+ extractor, model = load_model()
14
+
15
+ def analyze_style(image):
16
+ if image is None:
17
+ return "Please upload an image.", None, None
18
+
19
+ inputs = extractor(images=image, return_tensors="pt")
20
+ with torch.no_grad():
21
+ outputs = model(**inputs)
22
+ predicted_class = outputs.logits.argmax(-1).item()
23
+ style_name = model.config.id2label[predicted_class]
24
+
25
+ style_label = style_name.lower()
26
+ rec = recommend_accessories(style_label)
27
+
28
+ return f"**Predicted Style Class:** {style_name}", rec, image
29
+
30
+ title = "StyleCraft: AI-Enhanced Fashion Designer"
31
+
32
+ description = """
33
+ **StyleCraft** helps fashion enthusiasts and designers analyze garment styles and get accessory & fabric recommendations. Upload a photo or sketch, and let AI do the magic!
34
+
35
+ **How to use:**
36
+ 1. Upload a clear image or sketch of a garment.
37
+ 2. View the predicted style.
38
+ 3. See recommended accessories and fabrics to enhance your design.
39
+ """
40
+
41
+ with gr.Blocks() as demo:
42
+ gr.Markdown(f"# {title}")
43
+ gr.Markdown(description)
44
+
45
+ with gr.Row():
46
+ # Remove paste and webcam by using image upload only:
47
+ image_input = gr.Image(label="Upload a garment image or sketch", type="pil",
48
+ interactive=True, source="upload")
49
+
50
+ with gr.Column():
51
+ style_output = gr.Markdown(label="Style Analysis")
52
+ rec_output = gr.Markdown(label="💍 Accessory & Fabric Recommendation")
53
+ clear_btn = gr.Button("Clear")
54
+
55
+ analyze_button = gr.Button("Analyze Style")
56
+
57
+ analyze_button.click(
58
+ fn=analyze_style,
59
+ inputs=image_input,
60
+ outputs=[style_output, rec_output, image_input],
61
+ )
62
+
63
+ clear_btn.click(
64
+ fn=lambda: ("", "", None),
65
+ inputs=None,
66
+ outputs=[style_output, rec_output, image_input]
67
+ )
68
+
69
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ gradio
2
+ transformers
3
+ torch
4
+ Pillow
5
+ opencv-python
6
+ scikit-learn
7
+ matplotlib
8
+ numpy
9
+ requests