Spaces:
Sleeping
Sleeping
File size: 6,268 Bytes
5d4f45d 34c6e97 de85013 5d4f45d e1ed2a7 1f03fe0 e1ed2a7 34c6e97 e1ed2a7 34c6e97 e1ed2a7 34c6e97 e1ed2a7 e86172c 9be4b34 1f03fe0 8cf9a64 e1ed2a7 e86172c 4f5a562 e1ed2a7 5d4f45d e1ed2a7 8cf9a64 e1ed2a7 d251e99 e1ed2a7 e86172c 4c5b602 e1ed2a7 d251e99 e1ed2a7 d251e99 e1ed2a7 8efbff9 e1ed2a7 34c6e97 1f03fe0 8cf9a64 e1ed2a7 e86172c | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 | # import gradio as gr
# import torch
# import numpy as np
# from PIL import Image
# import os
# import legacy
# import torch_utils
# # Load the pre-trained StyleGAN model
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# model_path = 'dress_model.pkl' # Place your .pkl in the same directory or update path
# # Load StyleGAN Generator
# with open(model_path, 'rb') as f:
# G = legacy.load_network_pkl(f)['G_ema'].to(device)
# def mix_styles(image1_path, image2_path, styles_to_mix):
# # Extract image names (without extensions)
# image1_name = os.path.splitext(os.path.basename(image1_path))[0]
# image2_name = os.path.splitext(os.path.basename(image2_path))[0]
# # Load latent vectors from .npz
# latent_vector_1 = np.load(os.path.join("projection_results", image1_name, "projected_w.npz"))['w']
# latent_vector_2 = np.load(os.path.join("projection_results", image2_name, "projected_w.npz"))['w']
# # Convert to torch tensors
# latent_1_tensor = torch.from_numpy(latent_vector_1).to(device)
# latent_2_tensor = torch.from_numpy(latent_vector_2).to(device)
# # Mix layers
# mixed_latent = latent_1_tensor.clone()
# mixed_latent[:, styles_to_mix] = latent_2_tensor[:, styles_to_mix]
# # Generate image
# with torch.no_grad():
# image = G.synthesis(mixed_latent, noise_mode='const')
# # Convert to image
# image = (image.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).cpu().numpy()
# mixed_image = Image.fromarray(image[0], 'RGB')
# return mixed_image
# def style_mixing_interface(image1, image2, mix_value):
# if image1 is None or image2 is None:
# return None
# selected_layers = list(range(mix_value + 1))
# return mix_styles(image1, image2, selected_layers)
# # Gradio UI
# iface = gr.Interface(
# fn=style_mixing_interface,
# inputs=[
# gr.Image(label="First Clothing Image", type="filepath"),
# gr.Image(label="Second Clothing Image", type="filepath"),
# gr.Slider(label="Style Mixing Strength (Layers 0 to N)", minimum=0, maximum=9, step=1, value=5)
# ],
# outputs=gr.Image(label="Mixed Clothing Design"),
# live=True,
# title="Style Mixing for Clothing Design",
# description="Upload two projected images and choose how many early layers to mix."
# )
# iface.launch()
import gradio as gr
import torch
import numpy as np
from PIL import Image
import os
import legacy
import torch_utils
import requests
import io
import warnings
# Suppress deprecated torch warnings
warnings.filterwarnings("ignore")
# --- Load the pre-trained StyleGAN model ---
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model_path = 'dress_model.pkl'
with open(model_path, 'rb') as f:
G = legacy.load_network_pkl(f)['G_ema'].to(device)
def mix_styles(image1_path, image2_path, styles_to_mix):
image1_name = os.path.splitext(os.path.basename(image1_path))[0]
image2_name = os.path.splitext(os.path.basename(image2_path))[0]
latent_vector_1 = np.load(os.path.join("projection_results", image1_name, "projected_w.npz"))['w']
latent_vector_2 = np.load(os.path.join("projection_results", image2_name, "projected_w.npz"))['w']
latent_1_tensor = torch.from_numpy(latent_vector_1).to(device)
latent_2_tensor = torch.from_numpy(latent_vector_2).to(device)
mixed_latent = latent_1_tensor.clone()
mixed_latent[:, styles_to_mix] = latent_2_tensor[:, styles_to_mix]
with torch.no_grad():
image = G.synthesis(mixed_latent, noise_mode='const')
image = (image.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).cpu().numpy()
mixed_image = Image.fromarray(image[0], 'RGB')
return mixed_image
def style_mixing_interface(image1, image2, mix_value):
if image1 is None or image2 is None:
return None, None
selected_layers = list(range(mix_value + 1))
mixed_img = mix_styles(image1, image2, selected_layers)
buffer = io.BytesIO()
mixed_img.save(buffer, format="PNG")
buffer.seek(0)
return mixed_img, buffer
def send_to_backend(image_buffer, user_id):
if not user_id:
return "❌ user_id not found in URL."
try:
files = {'file': ('generated_image.png', image_buffer, 'image/png')}
url = f"https://361d-103-40-74-78.ngrok-free.app/customisation/upload/{user_id}"
response = requests.post(url, files=files)
if response.status_code == 201:
return "✅ Image uploaded and saved to database!"
else:
return f"❌ Upload failed: {response.status_code} - {response.text}"
except Exception as e:
return f"⚠️ Error: {str(e)}"
# --- Gradio UI ---
with gr.Blocks(title="Style Mixing for Clothing Design") as iface:
user_id_state = gr.State()
gr.Markdown("## Style Mixing for Clothing Design\nUpload two projected clothing images and mix their styles.")
with gr.Row():
image1_input = gr.Image(label="First Clothing Image", type="filepath")
image2_input = gr.Image(label="Second Clothing Image", type="filepath")
mix_slider = gr.Slider(label="Style Mixing Strength (Layers 0 to N)", minimum=0, maximum=9, step=1, value=5)
with gr.Row():
output_image = gr.Image(label="Mixed Clothing Design")
save_button = gr.Button("Download & Save to Database")
image_buffer = gr.State()
save_status = gr.Textbox(label="Save Status", interactive=False)
def mix_and_store(image1, image2, mix_value):
result_image, buffer = style_mixing_interface(image1, image2, mix_value)
return result_image, buffer
mix_slider.change(
mix_and_store,
inputs=[image1_input, image2_input, mix_slider],
outputs=[output_image, image_buffer]
)
save_button.click(
send_to_backend,
inputs=[image_buffer, user_id_state],
outputs=[save_status]
)
@iface.load
def on_load(request: gr.Request):
user_id = request.query_params.get("user_id", "")
return gr.update(value=user_id) # This updates the user_id_state
# Bind the return of on_load to user_id_state
iface.load(on_load, None, [user_id_state])
iface.launch()
|