uhdessai's picture
Update app.py
0f040e8 verified
# import copy
# import numpy as np
# import PIL.Image
# import torch
# import torch.nn.functional as F
# import pickle
# import numpy as np
# import functools
# import matplotlib.pyplot as plt
# import imageio
# import random
# import torchvision.models as models
# from sklearn.decomposition import PCA
# from matplotlib.patches import Rectangle
# import math
# from PIL import Image
# import os
# import time
# import gradio as gr
# import numpy as np
# import dnnlib
# import legacy
# import torch_utils
# import os
# import time
# import copy
# import torch
# import pickle
# import random
# import imageio
# import numpy as np
# import functools
# import matplotlib.pyplot as plt
# from PIL import Image
# from sklearn.decomposition import PCA
# import torch.nn.functional as F
# import torchvision.models as models
# import gradio as gr
# # Load pretrained StyleGAN model
# model_path = r"dressmodel.pkl"
# with open(model_path, "rb") as f:
# G = pickle.load(f)["G_ema"].eval().to("cpu") # moved model to CPU
# # Generate latent vector from seed
# def generateZ_from_seed(seed, G, device='cpu'): # default device is now 'cpu'
# return torch.from_numpy(np.random.RandomState(seed).randn(1, G.z_dim)).to(device)
# # Generate image from latent vector z
# def generate_image_from_z(z, G, truncation_psi=1, noise_mode='none'):
# G.forward = functools.partial(G.forward, force_fp32=True)
# w = G.mapping(z, None, truncation_psi=truncation_psi)
# img = G.synthesis(w, noise_mode=noise_mode, force_fp32=True)
# img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
# return np.expand_dims(w.cpu().numpy()[0, 0], 0), img[0].cpu().numpy()
# # Generate image from w
# def generate_image_from_w(w, G, noise_mode='none'):
# img = G.synthesis(w, noise_mode=noise_mode, force_fp32=True)
# img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
# return img[0].cpu().numpy()
# # Generate w from z
# def generate_w(z, G, truncation_psi=1):
# return G.mapping(z, None, truncation_psi=truncation_psi)
# # Apply PCA directions to w vector
# def E(vectors, vs, rs, ls, w, device='cpu'):
# for v, l, r in zip(vs, ls, rs):
# w[0, l] += vectors[v] * r
# return torch.from_numpy(w).float().to(device)
# # Compute PCA over w latent space
# seed = 0
# z = generateZ_from_seed(seed, G)
# w, _ = generate_image_from_z(z, G)
# N = 100
# for seed in np.arange(1, N):
# z = generateZ_from_seed(seed, G)
# w_seed, _ = generate_image_from_z(z, G)
# w = np.concatenate((w, w_seed), axis=0)
# pca = PCA()
# pca.fit(w)
# vectors = pca.components_
# variances = pca.explained_variance_ratio_
# # Visualize modified image
# def visualize_magnitude_img_inn(G, vectors, direction_ind, layers, magnitudes, seed):
# z = generateZ_from_seed(seed, G)
# w_origin = generate_w(z, G)
# w_modify = E(vectors, direction_ind, magnitudes, layers, w_origin.cpu().numpy())
# img_modify = generate_image_from_w(w_modify, G)
# os.makedirs("result/pca", exist_ok=True)
# timestamp = time.strftime("%Y%m%d-%H%M%S")
# save_path = f"result/pca/pca_direction_in_{direction_ind[0]}_{layers[0][0]}_{timestamp}.png"
# if isinstance(img_modify, np.ndarray):
# img_modify = Image.fromarray(img_modify)
# img_modify.save(save_path)
# return img_modify
# # Attribute options for manipulation
# options = {
# "Sleeve Length": {"direction_ind": [0], "layers": [np.arange(0, 4)]},
# "Neck": {"direction_ind": [2], "layers": [np.arange(0, 7)]},
# "Color": {"direction_ind": [6], "layers": [np.arange(10, 14)]}
# }
# # # Gradio UI callback
# # def generate_image(option, magnitude, seed):
# # selected = options[option]
# # return visualize_magnitude_img_inn(G, vectors, selected["direction_ind"], selected["layers"], [magnitude], seed)
# # # Launch Gradio interface
# # interface = gr.Interface(
# # fn=generate_image,
# # inputs=[
# # gr.Radio(list(options.keys()), value="Sleeve Length", label="Choose Attribute"),
# # gr.Slider(-7, 10, step=0.5, value=0, label="Magnitude"),
# # gr.Number(value=5, label="Seed")
# # ],
# # outputs=gr.Image(type="pil"),
# # title="Clothing Style Manipulation"
# # )
# # interface.launch(debug=True)
# with gr.Blocks() as interface:
# gr.Markdown("## 🎨 StyleGAN Latent Space Manipulation")
# attr = gr.Radio(choices=list(options.keys()), value="Sleeve Length", label="Choose Attribute")
# mag = gr.Slider(-7, 10, step=0.5, value=0, label="Magnitude")
# output_img = gr.Image(type="pil", label="Generated Image")
# # Hidden state to track the seed
# seed_state = gr.State(value=random.randint(5, 20))
# gen_btn = gr.Button("Update")
# update_seed_btn = gr.Button("click here to change design and click on update")
# # Generates image using current seed state
# def generate_image(option, magnitude, seed):
# selected = options[option]
# return visualize_magnitude_img_inn(G, vectors, selected["direction_ind"], selected["layers"], [magnitude], seed)
# # Randomizes seed and updates the state
# def randomize_seed():
# return random.randint(3, 500)
# # Use hidden seed for image generation
# gen_btn.click(fn=generate_image, inputs=[attr, mag, seed_state], outputs=output_img)
# # Update hidden seed state
# update_seed_btn.click(fn=randomize_seed, outputs=seed_state)
# interface.launch(debug=True)
import copy
import numpy as np
import PIL.Image
import torch
import torch.nn.functional as F
import pickle
import numpy as np
import functools
import matplotlib.pyplot as plt
import imageio
import random
import torchvision.models as models
from sklearn.decomposition import PCA
from matplotlib.patches import Rectangle
import math
from PIL import Image
import os
import time
import gradio as gr
import numpy as np
import dnnlib
import legacy
import torch_utils
import os
import time
import copy
import torch
import pickle
import random
import imageio
import numpy as np
import functools
import matplotlib.pyplot as plt
from PIL import Image
from sklearn.decomposition import PCA
import torch.nn.functional as F
import torchvision.models as models
import gradio as gr
# Load pretrained StyleGAN model
model_path = r"dressmodel.pkl"
with open(model_path, "rb") as f:
G = pickle.load(f)["G_ema"].eval().to("cpu") # moved model to CPU
# Generate latent vector from seed
def generateZ_from_seed(seed, G, device='cpu'): # default device is now 'cpu'
return torch.from_numpy(np.random.RandomState(seed).randn(1, G.z_dim)).to(device)
# Generate image from latent vector z
def generate_image_from_z(z, G, truncation_psi=1, noise_mode='none'):
G.forward = functools.partial(G.forward, force_fp32=True)
w = G.mapping(z, None, truncation_psi=truncation_psi)
img = G.synthesis(w, noise_mode=noise_mode, force_fp32=True)
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
return np.expand_dims(w.cpu().numpy()[0, 0], 0), img[0].cpu().numpy()
# Generate image from w
def generate_image_from_w(w, G, noise_mode='none'):
img = G.synthesis(w, noise_mode=noise_mode, force_fp32=True)
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
return img[0].cpu().numpy()
# Generate w from z
def generate_w(z, G, truncation_psi=1):
return G.mapping(z, None, truncation_psi=truncation_psi)
# Apply PCA directions to w vector
def E(vectors, vs, rs, ls, w, device='cpu'):
for v, l, r in zip(vs, ls, rs):
w[0, l] += vectors[v] * r
return torch.from_numpy(w).float().to(device)
# Compute PCA over w latent space
seed = 0
z = generateZ_from_seed(seed, G)
w, _ = generate_image_from_z(z, G)
N = 100
for seed in np.arange(1, N):
z = generateZ_from_seed(seed, G)
w_seed, _ = generate_image_from_z(z, G)
w = np.concatenate((w, w_seed), axis=0)
pca = PCA()
pca.fit(w)
vectors = pca.components_
variances = pca.explained_variance_ratio_
# Visualize modified image
def visualize_magnitude_img_inn(G, vectors, direction_ind, layers, magnitudes, seed):
z = generateZ_from_seed(seed, G)
w_origin = generate_w(z, G)
w_modify = E(vectors, direction_ind, magnitudes, layers, w_origin.cpu().numpy())
img_modify = generate_image_from_w(w_modify, G)
os.makedirs("result/pca", exist_ok=True)
timestamp = time.strftime("%Y%m%d-%H%M%S")
save_path = f"result/pca/pca_direction_in_{direction_ind[0]}_{layers[0][0]}_{timestamp}.png"
if isinstance(img_modify, np.ndarray):
img_modify = Image.fromarray(img_modify)
img_modify.save(save_path)
return img_modify
# Attribute options for manipulation
options = {
"Sleeve Length": {"direction_ind": [0], "layers": [np.arange(0, 4)]},
"Neck": {"direction_ind": [2], "layers": [np.arange(0, 7)]},
"Color": {"direction_ind": [6], "layers": [np.arange(10, 14)]}
}
import io
import requests
import random
from PIL import Image
import gradio as gr
# Your existing function to save image to backend (adapt URLs and user_id as needed)
def send_to_backend(image_buffer, user_id):
if not user_id:
return "❌ user_id not found."
if image_buffer is None:
return "⚠️ No image generated. Please generate an image first."
try:
file_bytes = image_buffer.getvalue()
files = {'file': ('generated_image.png', file_bytes, 'image/png')}
url = f" https://68be601de1e4.ngrok-free.app/customisation/upload/{user_id}" # <-- change this to your API endpoint
response = requests.post(url, files=files)
if response.status_code == 201:
return "✅ Image uploaded and saved to database!"
else:
return f"❌ Upload failed: {response.status_code} - {response.text}"
except Exception as e:
return f"⚠️ Error: {str(e)}"
# Wrap your existing visualize function to produce image + buffer
def generate_image_with_buffer(option, magnitude, seed):
selected = options[option]
img_pil = visualize_magnitude_img_inn(G, vectors, selected["direction_ind"], selected["layers"], [magnitude], seed)
# Save image to bytes buffer for uploading later
buffer = io.BytesIO()
img_pil.save(buffer, format="PNG")
buffer.seek(0)
return img_pil, buffer
with gr.Blocks() as interface:
gr.Markdown("## 🎨 StyleGAN Latent Space Manipulation")
attr = gr.Radio(choices=list(options.keys()), value="Sleeve Length", label="Choose Attribute")
mag = gr.Slider(-7, 10, step=0.5, value=0, label="Magnitude")
output_img = gr.Image(type="pil", label="Generated Image")
seed_state = gr.State(value=5)
image_buffer_state = gr.State(value=None) # To store generated image buffer
user_id_state = gr.State(value="") # <-- Added: to store user_id
gen_btn = gr.Button("Update")
update_seed_btn = gr.Button("Click to change design and then click Update")
save_btn = gr.Button("💾 Save Image")
save_status = gr.Textbox(label="Save Status", interactive=False)
# On page load: get user_id from query param and store in state
@interface.load(inputs=None, outputs=[user_id_state])
def get_user_id(request: gr.Request):
return request.query_params.get("user_id", "")
# Generate image and update buffer
def generate_and_store_buffer(option, magnitude, seed):
img, buffer = generate_image_with_buffer(option, magnitude, seed)
return img, buffer
# Generate random seed
def randomize_seed():
return random.randint(3, 25)
# Save current image buffer to backend using user_id from URL param
def save_current_image(buffer, user_id):
return send_to_backend(buffer, user_id)
gen_btn.click(fn=generate_and_store_buffer, inputs=[attr, mag, seed_state], outputs=[output_img, image_buffer_state])
update_seed_btn.click(fn=randomize_seed, outputs=seed_state)
# Use user_id_state here, not seed_state!
save_btn.click(fn=save_current_image, inputs=[image_buffer_state, user_id_state], outputs=save_status)
interface.launch(debug=True)