Update app.py
Browse files
app.py
CHANGED
|
@@ -75,7 +75,6 @@ def synthesize_speech(text, embedding_path):
|
|
| 75 |
# Synthesize each chunk
|
| 76 |
audio_chunks = []
|
| 77 |
for chunk in text_chunks:
|
| 78 |
-
start_time = time.time()
|
| 79 |
out = tts.synthesizer.tts_model.inference(
|
| 80 |
chunk,
|
| 81 |
"ru",
|
|
@@ -85,10 +84,13 @@ def synthesize_speech(text, embedding_path):
|
|
| 85 |
length_penalty=1.0,
|
| 86 |
repetition_penalty=2.0,
|
| 87 |
)
|
| 88 |
-
#
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
|
|
|
|
|
|
|
|
|
| 92 |
# Combine and save audio
|
| 93 |
full_audio = np.concatenate(audio_chunks)
|
| 94 |
output_path = "output.wav"
|
|
|
|
| 75 |
# Synthesize each chunk
|
| 76 |
audio_chunks = []
|
| 77 |
for chunk in text_chunks:
|
|
|
|
| 78 |
out = tts.synthesizer.tts_model.inference(
|
| 79 |
chunk,
|
| 80 |
"ru",
|
|
|
|
| 84 |
length_penalty=1.0,
|
| 85 |
repetition_penalty=2.0,
|
| 86 |
)
|
| 87 |
+
# Handle both tensor and numpy array outputs
|
| 88 |
+
wav = out["wav"].squeeze()
|
| 89 |
+
if isinstance(wav, torch.Tensor):
|
| 90 |
+
audio_chunks.append(wav.cpu().numpy())
|
| 91 |
+
else:
|
| 92 |
+
audio_chunks.append(wav)
|
| 93 |
+
|
| 94 |
# Combine and save audio
|
| 95 |
full_audio = np.concatenate(audio_chunks)
|
| 96 |
output_path = "output.wav"
|