sahadev10 commited on
Commit
5d4f45d
·
verified ·
1 Parent(s): ee6b687

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +116 -14
app.py CHANGED
@@ -1,3 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
  import torch
3
  import numpy as np
@@ -5,6 +73,9 @@ from PIL import Image
5
  import os
6
  import legacy
7
  import torch_utils
 
 
 
8
 
9
  # Load the pre-trained StyleGAN model
10
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
@@ -42,23 +113,54 @@ def mix_styles(image1_path, image2_path, styles_to_mix):
42
 
43
  def style_mixing_interface(image1, image2, mix_value):
44
  if image1 is None or image2 is None:
45
- return None
 
46
  selected_layers = list(range(mix_value + 1))
47
- return mix_styles(image1, image2, selected_layers)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
48
 
49
  # Gradio UI
50
- iface = gr.Interface(
51
- fn=style_mixing_interface,
52
- inputs=[
53
- gr.Image(label="First Clothing Image", type="filepath"),
54
- gr.Image(label="Second Clothing Image", type="filepath"),
55
- gr.Slider(label="Style Mixing Strength (Layers 0 to N)", minimum=0, maximum=9, step=1, value=5)
56
- ],
57
- outputs=gr.Image(label="Mixed Clothing Design"),
58
- live=True,
59
- title="Style Mixing for Clothing Design",
60
- description="Upload two projected images and choose how many early layers to mix."
61
- )
 
 
 
 
 
 
 
62
 
 
63
 
64
  iface.launch()
 
1
+ # import gradio as gr
2
+ # import torch
3
+ # import numpy as np
4
+ # from PIL import Image
5
+ # import os
6
+ # import legacy
7
+ # import torch_utils
8
+
9
+ # # Load the pre-trained StyleGAN model
10
+ # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
11
+ # model_path = 'dress_model.pkl' # Place your .pkl in the same directory or update path
12
+
13
+ # # Load StyleGAN Generator
14
+ # with open(model_path, 'rb') as f:
15
+ # G = legacy.load_network_pkl(f)['G_ema'].to(device)
16
+
17
+ # def mix_styles(image1_path, image2_path, styles_to_mix):
18
+ # # Extract image names (without extensions)
19
+ # image1_name = os.path.splitext(os.path.basename(image1_path))[0]
20
+ # image2_name = os.path.splitext(os.path.basename(image2_path))[0]
21
+
22
+ # # Load latent vectors from .npz
23
+ # latent_vector_1 = np.load(os.path.join("projection_results", image1_name, "projected_w.npz"))['w']
24
+ # latent_vector_2 = np.load(os.path.join("projection_results", image2_name, "projected_w.npz"))['w']
25
+
26
+ # # Convert to torch tensors
27
+ # latent_1_tensor = torch.from_numpy(latent_vector_1).to(device)
28
+ # latent_2_tensor = torch.from_numpy(latent_vector_2).to(device)
29
+
30
+ # # Mix layers
31
+ # mixed_latent = latent_1_tensor.clone()
32
+ # mixed_latent[:, styles_to_mix] = latent_2_tensor[:, styles_to_mix]
33
+
34
+ # # Generate image
35
+ # with torch.no_grad():
36
+ # image = G.synthesis(mixed_latent, noise_mode='const')
37
+
38
+ # # Convert to image
39
+ # image = (image.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).cpu().numpy()
40
+ # mixed_image = Image.fromarray(image[0], 'RGB')
41
+ # return mixed_image
42
+
43
+ # def style_mixing_interface(image1, image2, mix_value):
44
+ # if image1 is None or image2 is None:
45
+ # return None
46
+ # selected_layers = list(range(mix_value + 1))
47
+ # return mix_styles(image1, image2, selected_layers)
48
+
49
+ # # Gradio UI
50
+ # iface = gr.Interface(
51
+ # fn=style_mixing_interface,
52
+ # inputs=[
53
+ # gr.Image(label="First Clothing Image", type="filepath"),
54
+ # gr.Image(label="Second Clothing Image", type="filepath"),
55
+ # gr.Slider(label="Style Mixing Strength (Layers 0 to N)", minimum=0, maximum=9, step=1, value=5)
56
+ # ],
57
+ # outputs=gr.Image(label="Mixed Clothing Design"),
58
+ # live=True,
59
+ # title="Style Mixing for Clothing Design",
60
+ # description="Upload two projected images and choose how many early layers to mix."
61
+ # )
62
+
63
+
64
+ # iface.launch()
65
+
66
+
67
+
68
+
69
  import gradio as gr
70
  import torch
71
  import numpy as np
 
73
  import os
74
  import legacy
75
  import torch_utils
76
+ import requests
77
+ import io
78
+ import base64
79
 
80
  # Load the pre-trained StyleGAN model
81
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
 
113
 
114
  def style_mixing_interface(image1, image2, mix_value):
115
  if image1 is None or image2 is None:
116
+ return None, None
117
+
118
  selected_layers = list(range(mix_value + 1))
119
+ mixed_img = mix_styles(image1, image2, selected_layers)
120
+
121
+ # Convert to base64
122
+ buffer = io.BytesIO()
123
+ mixed_img.save(buffer, format="PNG")
124
+ img_bytes = buffer.getvalue()
125
+ img_base64 = base64.b64encode(img_bytes).decode("utf-8")
126
+
127
+ return mixed_img, img_base64
128
+
129
+ def send_to_backend(base64_img):
130
+ try:
131
+ response = requests.post(
132
+ "http://localhost:3000/customisation/save", # Change if using different port/route
133
+ json={"image": base64_img},
134
+ timeout=10
135
+ )
136
+ if response.status_code == 200:
137
+ return "✅ Saved to database!"
138
+ else:
139
+ return f"❌ Failed to save: {response.status_code} - {response.text}"
140
+ except Exception as e:
141
+ return f"⚠️ Error: {str(e)}"
142
 
143
  # Gradio UI
144
+ with gr.Blocks(title="Style Mixing for Clothing Design") as iface:
145
+ gr.Markdown("## Style Mixing for Clothing Design\nUpload two projected clothing images and select how many early layers to mix.")
146
+
147
+ with gr.Row():
148
+ image1_input = gr.Image(label="First Clothing Image", type="filepath")
149
+ image2_input = gr.Image(label="Second Clothing Image", type="filepath")
150
+
151
+ mix_slider = gr.Slider(label="Style Mixing Strength (Layers 0 to N)", minimum=0, maximum=9, step=1, value=5)
152
+
153
+ output_image = gr.Image(label="Mixed Clothing Design")
154
+ base64_output = gr.Textbox(visible=False)
155
+
156
+ download_button = gr.Button("Download & Save to Database")
157
+ save_status = gr.Textbox(label="Save Status", interactive=False)
158
+
159
+ def mix_and_return(image1, image2, mix_value):
160
+ return style_mixing_interface(image1, image2, mix_value)
161
+
162
+ mix_slider.change(mix_and_return, inputs=[image1_input, image2_input, mix_slider], outputs=[output_image, base64_output])
163
 
164
+ download_button.click(fn=send_to_backend, inputs=[base64_output], outputs=[save_status])
165
 
166
  iface.launch()