Spaces:
Runtime error
Runtime error
Commit ·
d1de822
1
Parent(s): efd6e56
Update app.py
Browse files
app.py
CHANGED
|
@@ -14,8 +14,73 @@ whisper_model = whisper.load_model("medium")
|
|
| 14 |
|
| 15 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 16 |
|
| 17 |
-
def
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
|
| 20 |
-
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
| 16 |
|
| 17 |
+
def transcribe(audio):
|
| 18 |
+
|
| 19 |
+
print("""
|
| 20 |
+
—
|
| 21 |
+
Sending audio to Whisper ...
|
| 22 |
+
—
|
| 23 |
+
""")
|
| 24 |
+
|
| 25 |
+
audio = whisper.load_audio(audio)
|
| 26 |
+
audio = whisper.pad_or_trim(audio)
|
| 27 |
+
|
| 28 |
+
mel = whisper.log_mel_spectrogram(audio).to(whisper_model.device)
|
| 29 |
+
|
| 30 |
+
_, probs = whisper_model.detect_language(mel)
|
| 31 |
+
|
| 32 |
+
transcript_options = whisper.DecodingOptions(task="transcribe", fp16 = False)
|
| 33 |
+
translate_options = whisper.DecodingOptions(task="translate", fp16 = False)
|
| 34 |
+
|
| 35 |
+
transcription = whisper.decode(whisper_model, mel, transcript_options)
|
| 36 |
+
translation = whisper.decode(whisper_model, mel, translate_options)
|
| 37 |
+
|
| 38 |
+
print("Language Spoken: " + transcription.language)
|
| 39 |
+
print("Transcript: " + transcription.text)
|
| 40 |
+
print("Translated: " + translation.text)
|
| 41 |
|
| 42 |
+
|
| 43 |
+
return transcription.text
|
| 44 |
+
|
| 45 |
+
def transcribe_upload(audio):
|
| 46 |
+
return transcribe(audio)
|
| 47 |
+
|
| 48 |
+
def transcribe_yt(link):
|
| 49 |
+
yt = YouTube(link)
|
| 50 |
+
path = yt.streams.filter(only_audio=True)[0].download(filename="audio.mp3")
|
| 51 |
+
return transcribe(path)
|
| 52 |
+
|
| 53 |
+
with gr.Blocks(css = css) as demo:
|
| 54 |
+
gr.Markdown("""
|
| 55 |
+
## Multi-lingual Transcript Generator
|
| 56 |
+
""")
|
| 57 |
+
gr.HTML('''
|
| 58 |
+
<p style="margin-bottom: 10px">
|
| 59 |
+
Save Transcripts of videos as PDF with the help of Whisper, which is a general-purpose speech recognition model released by OpenAI that can perform multilingual speech recognition as well as speech translation and language identification.
|
| 60 |
+
</p>
|
| 61 |
+
''')
|
| 62 |
+
with gr.Column():
|
| 63 |
+
#gr.Markdown(""" ### Record audio """)
|
| 64 |
+
with gr.Tab("Youtube Link"):
|
| 65 |
+
yt_input = gr.Textbox(label = 'Youtube Link')
|
| 66 |
+
transcribe_audio_yt = gr.Button('Transcribe')
|
| 67 |
+
|
| 68 |
+
with gr.Tab("Upload Podcast as File"):
|
| 69 |
+
audio_input_u = gr.Audio(label = 'Upload Audio',source="upload",type="filepath")
|
| 70 |
+
transcribe_audio_u = gr.Button('Transcribe')
|
| 71 |
+
|
| 72 |
+
with gr.Row():
|
| 73 |
+
transcript_output = gr.Textbox(label="Transcription in the language spoken", lines = 20)
|
| 74 |
+
summary_output = gr.Textbox(label = "English Summary", lines = 10)
|
| 75 |
+
|
| 76 |
+
transcribe_audio_yt.click(transcribe_yt, inputs = yt_input, outputs = [transcript_output, summary_output])
|
| 77 |
+
transcribe_audio_u.click(transcribe_upload, inputs = audio_input_u, outputs = [transcript_output,summary_output])
|
| 78 |
+
gr.HTML('''
|
| 79 |
+
<div class="footer">
|
| 80 |
+
<p>Whisper Model by <a href="https://github.com/openai/whisper" style="text-decoration: underline;" target="_blank">OpenAI</a>
|
| 81 |
+
</p>
|
| 82 |
+
</div>
|
| 83 |
+
''')
|
| 84 |
+
|
| 85 |
+
demo.queue()
|
| 86 |
+
demo.launch()
|