Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -2,6 +2,18 @@ import gradio as gr
|
|
| 2 |
from transformers import pipeline
|
| 3 |
import torch
|
| 4 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
# Cargar el modelo que convierte imagen a texto
|
| 6 |
image_to_text_model = pipeline("image-classification")
|
| 7 |
|
|
@@ -11,13 +23,14 @@ text_to_audio_model = pipeline("text-to-speech")
|
|
| 11 |
# Funci贸n para la interfaz de Gradio
|
| 12 |
def image_to_audio(input_image):
|
| 13 |
# Convertir la imagen a texto
|
| 14 |
-
text_output = image_to_text_model(input_image)[0]['label']
|
| 15 |
|
| 16 |
# Generar audio a partir del texto
|
| 17 |
audio_output = text_to_audio_model(text_output)[0]['audio']
|
| 18 |
|
| 19 |
return audio_output
|
| 20 |
|
|
|
|
| 21 |
# Interfaz Gradio
|
| 22 |
iface = gr.Interface(
|
| 23 |
fn=image_to_audio,
|
|
@@ -29,7 +42,4 @@ iface = gr.Interface(
|
|
| 29 |
)
|
| 30 |
|
| 31 |
# Ejecutar la interfaz
|
| 32 |
-
iface.launch()
|
| 33 |
-
# Crear interfaz de Gradio
|
| 34 |
-
#iface = gr.Interface(fn=asr, inputs=gr.inputs.Audio(source="microphone", type="file"), outputs="text")
|
| 35 |
-
#iface.launch()
|
|
|
|
| 2 |
from transformers import pipeline
|
| 3 |
import torch
|
| 4 |
|
| 5 |
+
#Definir 2 modelos uno de imagen a texto y otro de texto a audio que inyecta
|
| 6 |
+
# el resultado del primero modelo(texto generado) en la entrada del 2潞 modelo
|
| 7 |
+
# texto to audio
|
| 8 |
+
|
| 9 |
+
def transform(example_batch):
|
| 10 |
+
# Take a list of PIL images and turn them to pixel values
|
| 11 |
+
inputs = feature_extractor([x.convert("RGB") for x in example_batch['image']], return_tensors='pt')
|
| 12 |
+
|
| 13 |
+
# Don't forget to include the labels!
|
| 14 |
+
inputs['labels'] = example_batch['labels']
|
| 15 |
+
return inputs
|
| 16 |
+
|
| 17 |
# Cargar el modelo que convierte imagen a texto
|
| 18 |
image_to_text_model = pipeline("image-classification")
|
| 19 |
|
|
|
|
| 23 |
# Funci贸n para la interfaz de Gradio
|
| 24 |
def image_to_audio(input_image):
|
| 25 |
# Convertir la imagen a texto
|
| 26 |
+
text_output = transform(image_to_text_model(input_image)[0]['label'])
|
| 27 |
|
| 28 |
# Generar audio a partir del texto
|
| 29 |
audio_output = text_to_audio_model(text_output)[0]['audio']
|
| 30 |
|
| 31 |
return audio_output
|
| 32 |
|
| 33 |
+
|
| 34 |
# Interfaz Gradio
|
| 35 |
iface = gr.Interface(
|
| 36 |
fn=image_to_audio,
|
|
|
|
| 42 |
)
|
| 43 |
|
| 44 |
# Ejecutar la interfaz
|
| 45 |
+
iface.launch()
|
|
|
|
|
|
|
|
|