Spaces:
No application file
No application file
File size: 716 Bytes
84c734d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 |
import gradio as gr
import torch
from PIL import Image
from diffusers import DiffusionPipeline
from transformers import pipline
modeloObtenerTextoImagen = pipeline ("image-to-text", model = "Salesforce/blip-image-captioning-base")
modeloGenerarImagen = DiffusionPipeline.from_pretrained("sd-legacy/stable-diffusion.v1-5", torch_dtype=torch.float32)
def obtenerDescripcion(imagen):
resultadoModeloTI = modeloObtenerTextoImagen(Image.fromarray(imagen))
print(f'La frase que se ha obtenido de la images es {resultadoModeloTI}')
return modeloGenerarImagen(resultadoModeloTI[0]['generated_text']).images[0]
demo = gr.Interface(fn=obtenerDescripcion, input="image", outputs="image")
demo.launch(share=Trade) |