psurmreqmer commited on
Commit
bc2c982
·
1 Parent(s): cbc9a1f
Files changed (1) hide show
  1. app6.py +6 -6
app6.py CHANGED
@@ -2,15 +2,15 @@ import gradio as gr
2
  import torch
3
  from PIL import Image
4
  from diffusers import DiffusionPipeline
5
- from transformers import pipline
6
 
7
- modeloObtenerTextoImagen = pipeline ("image-to-text", model = "Salesforce/blip-image-captioning-base")
8
- modeloGenerarImagen = DiffusionPipeline.from_pretrained("sd-legacy/stable-diffusion.v1-5", torch_dtype=torch.float32)
9
 
10
  def obtenerDescripcion(imagen):
11
  resultadoModeloTI = modeloObtenerTextoImagen(Image.fromarray(imagen))
12
- print(f'La frase que se ha obtenido de la images es {resultadoModeloTI}')
13
  return modeloGenerarImagen(resultadoModeloTI[0]['generated_text']).images[0]
14
 
15
- demo = gr.Interface(fn=obtenerDescripcion, input="image", outputs="image")
16
- demo.launch(share=Trade)
 
2
  import torch
3
  from PIL import Image
4
  from diffusers import DiffusionPipeline
5
+ from transformers import pipeline
6
 
7
+ modeloObtenerTextoImagen = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
8
+ modeloGenerarImagen = DiffusionPipeline.from_pretrained("sd-legacy/stable-diffusion-v1-5", torch_dtype=torch.float32)
9
 
10
  def obtenerDescripcion(imagen):
11
  resultadoModeloTI = modeloObtenerTextoImagen(Image.fromarray(imagen))
12
+ print(f'La frase que se ha obtenido de la imagen es {resultadoModeloTI}')
13
  return modeloGenerarImagen(resultadoModeloTI[0]['generated_text']).images[0]
14
 
15
+ demo = gr.Interface(fn=obtenerDescripcion, inputs="image", outputs="image")
16
+ demo.launch(share=True)