SandraCLV commited on
Commit
a435eea
1 Parent(s): 4e43c85

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -30
app.py CHANGED
@@ -1,28 +1,34 @@
1
  import gradio as gr
2
- from transformers import AutoProcessor, BlipForConditionalGeneration, AutoModelForCausalLM, AutoImageProcessor, VisionEncoderDecoderModel, AutoTokenizer
3
- import io
4
- import base64
 
5
 
6
  # from transformers import AutoProcessor, AutoTokenizer, AutoImageProcessor, AutoModelForCausalLM, BlipForConditionalGeneration, Blip2ForConditionalGeneration, VisionEncoderDecoderModel
7
- import torch
8
  import open_clip
9
- import openai
10
-
11
- from huggingface_hub import hf_hub_download
12
 
 
 
 
 
 
 
 
13
  # Carga el modelo de clasificaci贸n de imagen a texto
14
  blip_processor_large = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
15
  blip_model_large = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
16
 
17
- # Carga el modelo de texto a voz
18
- openai.api_key = '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'
19
- model_id = "base"
20
- #model_version = "2022-01-01"
21
- whisper = openai.Model(model_id=model_id)
22
 
23
  device = "cuda" if torch.cuda.is_available() else "cpu"
24
  blip_model_large.to(device)
25
 
 
 
26
  def generate_caption(processor, model, image, tokenizer=None, use_float_16=False):
27
  inputs = processor(images=image, return_tensors="pt").to(device)
28
 
@@ -49,32 +55,55 @@ def generate_caption_coca(model, transform, image):
49
  def generate_captions(image):
50
 
51
  caption_blip_large = generate_caption(blip_processor_large, blip_model_large, image)
52
- print(caption_blip_large)
53
- return caption_blip_large
54
 
 
55
 
 
56
  # Define la funci贸n que convierte texto en voz
57
- def text_to_speech(text):
58
- # Genera el audio utilizando el modelo Whisper
59
- response = whisper.generate(prompt=text)
60
- print(response)
61
- # Extrae el audio del resultado
62
- audio = response.choices[0].audio
63
-
64
- # Codifica el audio en base64
65
- audio_base64 = base64.b64encode(audio).decode("utf-8")
66
-
67
- # Devuelve el audio como un archivo MP3
68
- return BytesIO(base64.b64decode(audio_base64))
69
-
70
- # Define la interfaz de usuario utilizando Gradio
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  inputsImg = [
72
  gr.Image(type="pil", label="Imagen"),
73
  ]
74
 
75
- outputs = [ gr.Textbox(label="Caption generated by BLIP-large") ]
 
76
  title = "Clasificaci贸n de imagen a texto y conversi贸n de texto a voz"
77
- description = "Carga una imagen y obt茅n una descripci贸n de texto de lo que contiene la imagen, as铆 como un archivo de audio que lee el texto en voz alta."
78
  examples = []
79
 
80
  interface = gr.Interface(fn=generate_captions,
 
1
  import gradio as gr
2
+ from transformers import AutoProcessor, BlipForConditionalGeneration, AutoTokenizer,SpeechT5Processor, SpeechT5ForTextToSpeech, SpeechT5HifiGan
3
+ import librosa
4
+ import numpy as np
5
+ import torch
6
 
7
  # from transformers import AutoProcessor, AutoTokenizer, AutoImageProcessor, AutoModelForCausalLM, BlipForConditionalGeneration, Blip2ForConditionalGeneration, VisionEncoderDecoderModel
 
8
  import open_clip
 
 
 
9
 
10
+ #CONSTANTS
11
+ speaker_embeddings = {
12
+ "BDL": "spkemb/cmu_us_bdl_arctic-wav-arctic_a0009.npy",
13
+ "CLB": "spkemb/cmu_us_clb_arctic-wav-arctic_a0144.npy",
14
+ "RMS": "spkemb/cmu_us_rms_arctic-wav-arctic_b0353.npy",
15
+ "SLT": "spkemb/cmu_us_slt_arctic-wav-arctic_a0508.npy",
16
+ }
17
  # Carga el modelo de clasificaci贸n de imagen a texto
18
  blip_processor_large = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
19
  blip_model_large = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
20
 
21
+ # # Carga el modelo de clasificaci贸n de tetxo a audio speech
22
+ checkpoint = "microsoft/speecht5_tts"
23
+ processor = SpeechT5Processor.from_pretrained(checkpoint)
24
+ model = SpeechT5ForTextToSpeech.from_pretrained(checkpoint)
25
+ vocoder = SpeechT5HifiGan.from_pretrained("microsoft/speecht5_hifigan")
26
 
27
  device = "cuda" if torch.cuda.is_available() else "cpu"
28
  blip_model_large.to(device)
29
 
30
+
31
+ ##### IMAGE MODEL TO TEXT, MODEL 1
32
  def generate_caption(processor, model, image, tokenizer=None, use_float_16=False):
33
  inputs = processor(images=image, return_tensors="pt").to(device)
34
 
 
55
  def generate_captions(image):
56
 
57
  caption_blip_large = generate_caption(blip_processor_large, blip_model_large, image)
58
+ print('generate_captions>>>'+caption_blip_large)
59
+ return caption_blip_large,text_to_speech(caption_blip_large,"Surprise Me!")
60
 
61
+ #####END IMAGE MODEL TO TEXT
62
 
63
+ ### TEXT TO AUDIO SPEECH MODEL 2
64
  # Define la funci贸n que convierte texto en voz
65
+ def text_to_speech(text,speaker):
66
+ # Genera el audio utilizando el modelo
67
+ if len(text.strip()) == 0:
68
+ return (16000, np.zeros(0).astype(np.int16))
69
+ inputs = processor(text=text, return_tensors="pt")
70
+
71
+ # limit input length
72
+ input_ids = inputs["input_ids"]
73
+ input_ids = input_ids[..., :model.config.max_text_positions]
74
+
75
+ if speaker == "Surprise Me!":
76
+ # load one of the provided speaker embeddings at random
77
+ idx = np.random.randint(len(speaker_embeddings))
78
+ key = list(speaker_embeddings.keys())[idx]
79
+ speaker_embedding = np.load(speaker_embeddings[key])
80
+
81
+ # randomly shuffle the elements
82
+ np.random.shuffle(speaker_embedding)
83
+
84
+ # randomly flip half the values
85
+ x = (np.random.rand(512) >= 0.5) * 1.0
86
+ x[x == 0] = -1.0
87
+ speaker_embedding *= x
88
+
89
+ #speaker_embedding = np.random.rand(512).astype(np.float32) * 0.3 - 0.15
90
+ speaker_embedding = torch.tensor(speaker_embedding).unsqueeze(0)
91
+
92
+ speech = model.generate_speech(input_ids, speaker_embedding, vocoder=vocoder)
93
+
94
+ speech = (speech.numpy() * 32767).astype(np.int16)
95
+ return (16000, speech)
96
+ ### END TEXT TO AUDIO SPEECH MODEL 2
97
+
98
+ # Define la interfaz de usuario utilizando Gradio entradas y salidas
99
  inputsImg = [
100
  gr.Image(type="pil", label="Imagen"),
101
  ]
102
 
103
+ #Salidas es lo que genera de tetxo y el audio
104
+ outputs = [ gr.Textbox(label="Caption generated by BLIP-large"),gr.Audio(type="numpy",label='Transcripcion')]
105
  title = "Clasificaci贸n de imagen a texto y conversi贸n de texto a voz"
106
+ description = "Carga una imagen y obt茅n una descripci贸n de texto de lo que contiene la imagen, as铆 como un archivo de audio de la trasncripcion de la imagen en audio descrito."
107
  examples = []
108
 
109
  interface = gr.Interface(fn=generate_captions,