Stylemixing / app.py
sahadev10's picture
Update app.py
8efbff9 verified
raw
history blame
6.27 kB
# import gradio as gr
# import torch
# import numpy as np
# from PIL import Image
# import os
# import legacy
# import torch_utils
# # Load the pre-trained StyleGAN model
# device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# model_path = 'dress_model.pkl' # Place your .pkl in the same directory or update path
# # Load StyleGAN Generator
# with open(model_path, 'rb') as f:
# G = legacy.load_network_pkl(f)['G_ema'].to(device)
# def mix_styles(image1_path, image2_path, styles_to_mix):
# # Extract image names (without extensions)
# image1_name = os.path.splitext(os.path.basename(image1_path))[0]
# image2_name = os.path.splitext(os.path.basename(image2_path))[0]
# # Load latent vectors from .npz
# latent_vector_1 = np.load(os.path.join("projection_results", image1_name, "projected_w.npz"))['w']
# latent_vector_2 = np.load(os.path.join("projection_results", image2_name, "projected_w.npz"))['w']
# # Convert to torch tensors
# latent_1_tensor = torch.from_numpy(latent_vector_1).to(device)
# latent_2_tensor = torch.from_numpy(latent_vector_2).to(device)
# # Mix layers
# mixed_latent = latent_1_tensor.clone()
# mixed_latent[:, styles_to_mix] = latent_2_tensor[:, styles_to_mix]
# # Generate image
# with torch.no_grad():
# image = G.synthesis(mixed_latent, noise_mode='const')
# # Convert to image
# image = (image.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).cpu().numpy()
# mixed_image = Image.fromarray(image[0], 'RGB')
# return mixed_image
# def style_mixing_interface(image1, image2, mix_value):
# if image1 is None or image2 is None:
# return None
# selected_layers = list(range(mix_value + 1))
# return mix_styles(image1, image2, selected_layers)
# # Gradio UI
# iface = gr.Interface(
# fn=style_mixing_interface,
# inputs=[
# gr.Image(label="First Clothing Image", type="filepath"),
# gr.Image(label="Second Clothing Image", type="filepath"),
# gr.Slider(label="Style Mixing Strength (Layers 0 to N)", minimum=0, maximum=9, step=1, value=5)
# ],
# outputs=gr.Image(label="Mixed Clothing Design"),
# live=True,
# title="Style Mixing for Clothing Design",
# description="Upload two projected images and choose how many early layers to mix."
# )
# iface.launch()
import gradio as gr
import torch
import numpy as np
from PIL import Image
import os
import legacy
import torch_utils
import requests
import io
import warnings
# Suppress deprecated torch warnings
warnings.filterwarnings("ignore")
# --- Load the pre-trained StyleGAN model ---
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model_path = 'dress_model.pkl'
with open(model_path, 'rb') as f:
G = legacy.load_network_pkl(f)['G_ema'].to(device)
def mix_styles(image1_path, image2_path, styles_to_mix):
image1_name = os.path.splitext(os.path.basename(image1_path))[0]
image2_name = os.path.splitext(os.path.basename(image2_path))[0]
latent_vector_1 = np.load(os.path.join("projection_results", image1_name, "projected_w.npz"))['w']
latent_vector_2 = np.load(os.path.join("projection_results", image2_name, "projected_w.npz"))['w']
latent_1_tensor = torch.from_numpy(latent_vector_1).to(device)
latent_2_tensor = torch.from_numpy(latent_vector_2).to(device)
mixed_latent = latent_1_tensor.clone()
mixed_latent[:, styles_to_mix] = latent_2_tensor[:, styles_to_mix]
with torch.no_grad():
image = G.synthesis(mixed_latent, noise_mode='const')
image = (image.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).cpu().numpy()
mixed_image = Image.fromarray(image[0], 'RGB')
return mixed_image
def style_mixing_interface(image1, image2, mix_value):
if image1 is None or image2 is None:
return None, None
selected_layers = list(range(mix_value + 1))
mixed_img = mix_styles(image1, image2, selected_layers)
buffer = io.BytesIO()
mixed_img.save(buffer, format="PNG")
buffer.seek(0)
return mixed_img, buffer
def send_to_backend(image_buffer, user_id):
if not user_id:
return "❌ user_id not found in URL."
try:
files = {'file': ('generated_image.png', image_buffer, 'image/png')}
url = f"https://361d-103-40-74-78.ngrok-free.app/customisation/upload/{user_id}"
response = requests.post(url, files=files)
if response.status_code == 201:
return "✅ Image uploaded and saved to database!"
else:
return f"❌ Upload failed: {response.status_code} - {response.text}"
except Exception as e:
return f"⚠️ Error: {str(e)}"
# --- Gradio UI ---
with gr.Blocks(title="Style Mixing for Clothing Design") as iface:
user_id_state = gr.State()
gr.Markdown("## Style Mixing for Clothing Design\nUpload two projected clothing images and mix their styles.")
with gr.Row():
image1_input = gr.Image(label="First Clothing Image", type="filepath")
image2_input = gr.Image(label="Second Clothing Image", type="filepath")
mix_slider = gr.Slider(label="Style Mixing Strength (Layers 0 to N)", minimum=0, maximum=9, step=1, value=5)
with gr.Row():
output_image = gr.Image(label="Mixed Clothing Design")
save_button = gr.Button("Download & Save to Database")
image_buffer = gr.State()
save_status = gr.Textbox(label="Save Status", interactive=False)
def mix_and_store(image1, image2, mix_value):
result_image, buffer = style_mixing_interface(image1, image2, mix_value)
return result_image, buffer
mix_slider.change(
mix_and_store,
inputs=[image1_input, image2_input, mix_slider],
outputs=[output_image, image_buffer]
)
save_button.click(
send_to_backend,
inputs=[image_buffer, user_id_state],
outputs=[save_status]
)
@iface.load
def on_load(request: gr.Request):
user_id = request.query_params.get("user_id", "")
return gr.update(value=user_id) # This updates the user_id_state
# Bind the return of on_load to user_id_state
iface.load(on_load, None, [user_id_state])
iface.launch()