Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -5,7 +5,8 @@ import os
|
|
| 5 |
import random
|
| 6 |
import torch
|
| 7 |
|
| 8 |
-
IS_DUPLICATE = not os.getenv('SPACE_ID', '').startswith('hexgrad/')
|
|
|
|
| 9 |
CUDA_AVAILABLE = torch.cuda.is_available()
|
| 10 |
if not IS_DUPLICATE:
|
| 11 |
import kokoro
|
|
@@ -20,7 +21,7 @@ pipelines = {lang_code: KPipeline(lang_code=lang_code, model=False) for lang_cod
|
|
| 20 |
def forward_gpu(ps, ref_s, speed):
|
| 21 |
return models[True](ps, ref_s, speed)
|
| 22 |
|
| 23 |
-
def generate_first(text, voice='
|
| 24 |
text = text if CHAR_LIMIT is None else text.strip()[:CHAR_LIMIT]
|
| 25 |
pipeline = pipelines[voice[0]]
|
| 26 |
pack = pipeline.load_voice(voice)
|
|
@@ -43,16 +44,16 @@ def generate_first(text, voice='af_heart', speed=1, use_gpu=CUDA_AVAILABLE):
|
|
| 43 |
return None, ''
|
| 44 |
|
| 45 |
# Arena API
|
| 46 |
-
def predict(text, voice='
|
| 47 |
return generate_first(text, voice, speed, use_gpu=False)[0]
|
| 48 |
|
| 49 |
-
def tokenize_first(text, voice='
|
| 50 |
pipeline = pipelines[voice[0]]
|
| 51 |
for _, ps, _ in pipeline(text, voice):
|
| 52 |
return ps
|
| 53 |
return ''
|
| 54 |
|
| 55 |
-
def generate_all(text, voice='
|
| 56 |
text = text if CHAR_LIMIT is None else text.strip()[:CHAR_LIMIT]
|
| 57 |
pipeline = pipelines[voice[0]]
|
| 58 |
pack = pipeline.load_voice(voice)
|
|
@@ -136,25 +137,26 @@ with gr.Blocks() as stream_tab:
|
|
| 136 |
gr.DuplicateButton()
|
| 137 |
|
| 138 |
BANNER_TEXT = '''
|
| 139 |
-
[***Kokoro*** **
|
| 140 |
|
| 141 |
-
|
| 142 |
'''
|
| 143 |
-
API_OPEN = os.getenv('SPACE_ID') != 'hexgrad/Kokoro-TTS'
|
|
|
|
| 144 |
API_NAME = None if API_OPEN else False
|
| 145 |
with gr.Blocks() as app:
|
| 146 |
with gr.Row():
|
| 147 |
gr.Markdown(BANNER_TEXT, container=True)
|
| 148 |
with gr.Row():
|
| 149 |
with gr.Column():
|
| 150 |
-
text = gr.Textbox(label='Input Text', info=f"
|
| 151 |
with gr.Row():
|
| 152 |
-
voice = gr.Dropdown(list(CHOICES.items()), value='
|
| 153 |
use_gpu = gr.Dropdown(
|
| 154 |
[('ZeroGPU 🚀', True), ('CPU 🐌', False)],
|
| 155 |
value=CUDA_AVAILABLE,
|
| 156 |
label='Hardware',
|
| 157 |
-
info='GPU
|
| 158 |
interactive=CUDA_AVAILABLE
|
| 159 |
)
|
| 160 |
speed = gr.Slider(minimum=0.5, maximum=2, value=1, step=0.1, label='Speed')
|
|
|
|
| 5 |
import random
|
| 6 |
import torch
|
| 7 |
|
| 8 |
+
# IS_DUPLICATE = not os.getenv('SPACE_ID', '').startswith('hexgrad/')
|
| 9 |
+
IS_DUPLICATE = False
|
| 10 |
CUDA_AVAILABLE = torch.cuda.is_available()
|
| 11 |
if not IS_DUPLICATE:
|
| 12 |
import kokoro
|
|
|
|
| 21 |
def forward_gpu(ps, ref_s, speed):
|
| 22 |
return models[True](ps, ref_s, speed)
|
| 23 |
|
| 24 |
+
def generate_first(text, voice='ef_dora', speed=1, use_gpu=CUDA_AVAILABLE):
|
| 25 |
text = text if CHAR_LIMIT is None else text.strip()[:CHAR_LIMIT]
|
| 26 |
pipeline = pipelines[voice[0]]
|
| 27 |
pack = pipeline.load_voice(voice)
|
|
|
|
| 44 |
return None, ''
|
| 45 |
|
| 46 |
# Arena API
|
| 47 |
+
def predict(text, voice='ef_dora', speed=1):
|
| 48 |
return generate_first(text, voice, speed, use_gpu=False)[0]
|
| 49 |
|
| 50 |
+
def tokenize_first(text, voice='ef_dora'):
|
| 51 |
pipeline = pipelines[voice[0]]
|
| 52 |
for _, ps, _ in pipeline(text, voice):
|
| 53 |
return ps
|
| 54 |
return ''
|
| 55 |
|
| 56 |
+
def generate_all(text, voice='ef_dora', speed=1, use_gpu=CUDA_AVAILABLE):
|
| 57 |
text = text if CHAR_LIMIT is None else text.strip()[:CHAR_LIMIT]
|
| 58 |
pipeline = pipelines[voice[0]]
|
| 59 |
pack = pipeline.load_voice(voice)
|
|
|
|
| 137 |
gr.DuplicateButton()
|
| 138 |
|
| 139 |
BANNER_TEXT = '''
|
| 140 |
+
[***Kokoro*** **es un modelo TTS de peso abierto con 82 millones de parámetros.**](https://huggingface.co/hexgrad/Kokoro-82M)
|
| 141 |
|
| 142 |
+
Esta demo solo muestra voces en español, pero puedes usar el modelo directamente para acceder a otros idiomas.
|
| 143 |
'''
|
| 144 |
+
#API_OPEN = os.getenv('SPACE_ID') != 'hexgrad/Kokoro-TTS'
|
| 145 |
+
API_OPEN = false
|
| 146 |
API_NAME = None if API_OPEN else False
|
| 147 |
with gr.Blocks() as app:
|
| 148 |
with gr.Row():
|
| 149 |
gr.Markdown(BANNER_TEXT, container=True)
|
| 150 |
with gr.Row():
|
| 151 |
with gr.Column():
|
| 152 |
+
text = gr.Textbox(label='Input Text', info=f"hasta ~500 characteres por Generacion, o {'∞' if CHAR_LIMIT is None else CHAR_LIMIT} characters per Stream")
|
| 153 |
with gr.Row():
|
| 154 |
+
voice = gr.Dropdown(list(CHOICES.items()), value='ef_dora', label='Voice', info='La calidad varia segun los lenguages')
|
| 155 |
use_gpu = gr.Dropdown(
|
| 156 |
[('ZeroGPU 🚀', True), ('CPU 🐌', False)],
|
| 157 |
value=CUDA_AVAILABLE,
|
| 158 |
label='Hardware',
|
| 159 |
+
info='GPU es mas rapido, pero esta limitado',
|
| 160 |
interactive=CUDA_AVAILABLE
|
| 161 |
)
|
| 162 |
speed = gr.Slider(minimum=0.5, maximum=2, value=1, step=0.1, label='Speed')
|