BitokenPlus commited on
Commit
061b0a1
·
verified ·
1 Parent(s): f5a5165

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +40 -64
app.py CHANGED
@@ -1,70 +1,46 @@
1
  import gradio as gr
 
2
  from PIL import Image
3
- from diffusers import AutoPipelineForInpainting, AutoencoderKL
4
- import torch
5
- from SegBody import segment_body # Import the segmentation function
6
-
7
- # Check if CUDA is available and set the device accordingly
8
- device = "cuda" if torch.cuda.is_available() else "cpu"
9
-
10
- # Load models with the correct precision based on the device
11
- if device == "cuda":
12
- vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16) # Use fp16 for GPU
13
- pipeline = AutoPipelineForInpainting.from_pretrained(
14
- "diffusers/stable-diffusion-xl-1.0-inpainting-0.1",
15
- vae=vae,
16
- torch_dtype=torch.float16, # Use fp16 for GPU
17
- variant="fp16", # Correct variant for GPU
18
- use_safetensors=True
19
- ).to(device) # Ensure it uses the GPU
20
- else:
21
- vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float32) # Use fp32 for CPU
22
- pipeline = AutoPipelineForInpainting.from_pretrained(
23
- "diffusers/stable-diffusion-xl-1.0-inpainting-0.1",
24
- vae=vae,
25
- torch_dtype=torch.float32, # Use fp32 for CPU
26
- variant="fp16", # Use fp32 for CPU
27
- use_safetensors=True
28
- ).to(device) # Ensure it uses the CPU if no GPU
29
-
30
- # Define the inference function
31
- def inpaint(person_image, garment_image, prompt):
32
- # Preprocess the images by resizing them to 512x512
33
- person_image = person_image.convert("RGB").resize((512, 512))
34
- garment_image = garment_image.convert("RGB").resize((512, 512))
35
-
36
- # Use segment_body to generate the body mask for inpainting
37
- seg_image, mask_image = segment_body(person_image, face=False) # You can control face removal here (face=False)
38
-
39
- # Resize mask to 512x512 to match the inpainting requirements
40
- mask_image = mask_image.resize((512, 512))
41
-
42
- # Perform inpainting using the pipeline
43
- results = pipeline(
44
- prompt=prompt,
45
- negative_prompt="ugly, bad quality, bad anatomy",
46
- image=person_image,
47
- mask_image=mask_image, # Use the mask from segmentation
48
- ip_adapter_image=garment_image, # Garment image as the IP Adapter image
49
- strength=0.99,
50
- guidance_scale=8.0,
51
- num_inference_steps=100
52
- )
53
-
54
- return results.images[0] # Return the generated image
55
-
56
- # Set up the Gradio interface
57
- demo = gr.Interface(
58
- fn=inpaint,
59
  inputs=[
60
- gr.Image(type="pil", label="Person Image"), # Input for person image
61
- gr.Image(type="pil", label="Garment Image"), # Input for garment image
62
- gr.Textbox(label="Prompt", placeholder="Enter the prompt for the model") # Text prompt for inpainting
63
  ],
64
- outputs=gr.Image(type="pil"),
65
- title="Stable Diffusion Inpainting with Segmentation",
66
- description="Inpainting model for seamless garment transfer on segmented body image using Stable Diffusion XL.",
67
- server_timeout=100, # Increase timeout duration to prevent session errors
68
  )
69
 
70
- demo.launch(share=True) # Enable share link for testing in a public domain
 
 
 
 
 
1
  import gradio as gr
2
+ import requests
3
  from PIL import Image
4
+ import io
5
+
6
+ # URL del modelo TryOnGAN (o cualquier otro modelo Hugging Face)
7
+ MODEL_URL = "https://huggingface.co/spaces/akhaliq/TryOnGAN"
8
+
9
+ # Función que procesa la imagen usando la API de Hugging Face
10
+ def try_on_clothes(photo, clothing):
11
+ # Convertir las imágenes a formato adecuado para la solicitud
12
+ files = {
13
+ 'photo': ('photo.jpg', photo, 'image/jpeg'),
14
+ 'clothing': ('clothing.jpg', clothing, 'image/jpeg')
15
+ }
16
+
17
+ try:
18
+ # Enviar las imágenes al modelo de Hugging Face
19
+ response = requests.post(MODEL_URL, files=files)
20
+
21
+ if response.status_code == 200:
22
+ # Suponiendo que la respuesta es una imagen generada (puede variar según el modelo)
23
+ output_image = Image.open(io.BytesIO(response.content))
24
+ return output_image
25
+ else:
26
+ return "Error al procesar las imágenes, intente nuevamente."
27
+ except Exception as e:
28
+ return f"Error: {str(e)}"
29
+
30
+ # Crear la interfaz de Gradio
31
+ iface = gr.Interface(
32
+ fn=try_on_clothes,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  inputs=[
34
+ gr.Image(type="pil", label="Foto del Maniquí"),
35
+ gr.Image(type="pil", label="Prenda a Probar")
 
36
  ],
37
+ outputs=gr.Image(type="pil", label="Resultado del Probador Virtual"),
38
+ title="Probador Virtual AI",
39
+ description="Sube una foto de tu maniquí y una prenda para probarla en tiempo real con TryOnGAN."
 
40
  )
41
 
42
+ # Ejecutar la interfaz
43
+ if __name__ == "__main__":
44
+ iface.launch()
45
+
46
+