Spaces:
Runtime error
Runtime error
Pin gradio version
Browse files
app.py
CHANGED
|
@@ -1,20 +1,12 @@
|
|
| 1 |
import os
|
|
|
|
| 2 |
import gradio as gr
|
| 3 |
import torch
|
| 4 |
-
import
|
| 5 |
-
import torchaudio
|
| 6 |
-
from torchaudio.sox_effects import apply_effects_tensor
|
| 7 |
-
import numpy as np
|
| 8 |
from transformers import AutoFeatureExtractor, AutoModelForAudioXVector
|
| 9 |
|
| 10 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 11 |
|
| 12 |
-
def load_audio(file_name):
|
| 13 |
-
audio = pydub.AudioSegment.from_file(file_name)
|
| 14 |
-
arr = np.array(audio.get_array_of_samples(), dtype=np.float32)
|
| 15 |
-
arr = arr / (1 << (8 * audio.sample_width - 1))
|
| 16 |
-
return arr.astype(np.float32), audio.frame_rate
|
| 17 |
-
|
| 18 |
STYLE = """
|
| 19 |
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/css/bootstrap.min.css" integrity="sha256-YvdLHPgkqJ8DVUxjjnGVlMMJtNimJ6dYkowFFvp4kKs=" crossorigin="anonymous">
|
| 20 |
"""
|
|
@@ -63,12 +55,9 @@ cosine_sim = torch.nn.CosineSimilarity(dim=-1)
|
|
| 63 |
def similarity_fn(path1, path2):
|
| 64 |
if not (path1 and path2):
|
| 65 |
return '<b style="color:red">ERROR: Please record audio for *both* speakers!</b>'
|
| 66 |
-
|
| 67 |
-
wav1,
|
| 68 |
-
|
| 69 |
-
wav1, _ = apply_effects_tensor(torch.tensor(wav1).unsqueeze(0), sr1, EFFECTS)
|
| 70 |
-
wav2, sr2 = load_audio(path2)
|
| 71 |
-
wav2, _ = apply_effects_tensor(torch.tensor(wav2).unsqueeze(0), sr2, EFFECTS)
|
| 72 |
print(wav1.shape, wav2.shape)
|
| 73 |
|
| 74 |
input1 = feature_extractor(wav1.squeeze(0), return_tensors="pt", sampling_rate=16000).input_values.to(device)
|
|
@@ -138,4 +127,4 @@ interface = gr.Interface(
|
|
| 138 |
live=False,
|
| 139 |
examples=examples,
|
| 140 |
)
|
| 141 |
-
interface.launch(enable_queue=True)
|
|
|
|
| 1 |
import os
|
| 2 |
+
os.system("pip install gradio==2.8.0b2")
|
| 3 |
import gradio as gr
|
| 4 |
import torch
|
| 5 |
+
from torchaudio.sox_effects import apply_effects_file
|
|
|
|
|
|
|
|
|
|
| 6 |
from transformers import AutoFeatureExtractor, AutoModelForAudioXVector
|
| 7 |
|
| 8 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
STYLE = """
|
| 11 |
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/css/bootstrap.min.css" integrity="sha256-YvdLHPgkqJ8DVUxjjnGVlMMJtNimJ6dYkowFFvp4kKs=" crossorigin="anonymous">
|
| 12 |
"""
|
|
|
|
| 55 |
def similarity_fn(path1, path2):
|
| 56 |
if not (path1 and path2):
|
| 57 |
return '<b style="color:red">ERROR: Please record audio for *both* speakers!</b>'
|
| 58 |
+
|
| 59 |
+
wav1, _ = apply_effects_file(path1, EFFECTS)
|
| 60 |
+
wav2, _ = apply_effects_file(path2, EFFECTS)
|
|
|
|
|
|
|
|
|
|
| 61 |
print(wav1.shape, wav2.shape)
|
| 62 |
|
| 63 |
input1 = feature_extractor(wav1.squeeze(0), return_tensors="pt", sampling_rate=16000).input_values.to(device)
|
|
|
|
| 127 |
live=False,
|
| 128 |
examples=examples,
|
| 129 |
)
|
| 130 |
+
interface.launch(enable_queue=True)
|