Made changes for audio
Browse files
app.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
| 1 |
import torch
|
| 2 |
import torch.nn as nn
|
| 3 |
import gradio as gr
|
| 4 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
| 5 |
|
| 6 |
|
| 7 |
class _MLPVectorProjector(nn.Module):
|
|
@@ -20,10 +20,35 @@ class _MLPVectorProjector(nn.Module):
|
|
| 20 |
def forward(self, x):
|
| 21 |
return torch.cat([mlp(x) for mlp in self.mlps], dim=-2)
|
| 22 |
|
|
|
|
|
|
|
| 23 |
model_name = "microsoft/phi-2"
|
| 24 |
|
| 25 |
phi2_text = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
|
| 26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
|
| 28 |
def example_inference(input_text, count): #, image, img_qn, audio):
|
| 29 |
pred_text = textMode(input_text, count)
|
|
@@ -33,8 +58,8 @@ def example_inference(input_text, count): #, image, img_qn, audio):
|
|
| 33 |
|
| 34 |
def textMode(text, count):
|
| 35 |
count = int(count)
|
| 36 |
-
inputs =
|
| 37 |
-
prediction =
|
| 38 |
phi2_text.generate(
|
| 39 |
**inputs,
|
| 40 |
max_new_tokens=count,
|
|
@@ -51,7 +76,8 @@ def imageMode(image, question):
|
|
| 51 |
return "In progress"
|
| 52 |
|
| 53 |
def audioMode(audio):
|
| 54 |
-
|
|
|
|
| 55 |
|
| 56 |
|
| 57 |
interface_title = "TSAI-ERA-V1 - Capstone - Multimodal GPT Demo"
|
|
|
|
| 1 |
import torch
|
| 2 |
import torch.nn as nn
|
| 3 |
import gradio as gr
|
| 4 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
|
| 5 |
|
| 6 |
|
| 7 |
class _MLPVectorProjector(nn.Module):
|
|
|
|
| 20 |
def forward(self, x):
|
| 21 |
return torch.cat([mlp(x) for mlp in self.mlps], dim=-2)
|
| 22 |
|
| 23 |
+
## Text model
|
| 24 |
+
|
| 25 |
model_name = "microsoft/phi-2"
|
| 26 |
|
| 27 |
phi2_text = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True)
|
| 28 |
+
tokenizer_text = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)
|
| 29 |
+
|
| 30 |
+
## Audio model
|
| 31 |
+
model_id_audio = "openai/whisper-large-v3"
|
| 32 |
+
|
| 33 |
+
model_audio = AutoModelForSpeechSeq2Seq.from_pretrained(
|
| 34 |
+
model_id_audio, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True).to("cpu")
|
| 35 |
+
|
| 36 |
+
processor_audio = AutoProcessor.from_pretrained(model_id_audio)
|
| 37 |
+
|
| 38 |
+
pipe_audio = pipeline(
|
| 39 |
+
"automatic-speech-recognition",
|
| 40 |
+
model=model_audio,
|
| 41 |
+
tokenizer=processor_audio.tokenizer,
|
| 42 |
+
feature_extractor=processor_audio.feature_extractor,
|
| 43 |
+
max_new_tokens=128,
|
| 44 |
+
chunk_length_s=30,
|
| 45 |
+
batch_size=16,
|
| 46 |
+
return_timestamps=False,
|
| 47 |
+
torch_dtype=torch.float32,
|
| 48 |
+
device="cpu",
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
## image model
|
| 52 |
|
| 53 |
def example_inference(input_text, count): #, image, img_qn, audio):
|
| 54 |
pred_text = textMode(input_text, count)
|
|
|
|
| 58 |
|
| 59 |
def textMode(text, count):
|
| 60 |
count = int(count)
|
| 61 |
+
inputs = tokenizer_text(text, return_tensors="pt", return_attention_mask=False)
|
| 62 |
+
prediction = tokenizer_text.batch_decode(
|
| 63 |
phi2_text.generate(
|
| 64 |
**inputs,
|
| 65 |
max_new_tokens=count,
|
|
|
|
| 76 |
return "In progress"
|
| 77 |
|
| 78 |
def audioMode(audio):
|
| 79 |
+
result = pipe_audio(audio)
|
| 80 |
+
return result["text"]
|
| 81 |
|
| 82 |
|
| 83 |
interface_title = "TSAI-ERA-V1 - Capstone - Multimodal GPT Demo"
|