File size: 3,752 Bytes
0d53fd0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import gradio as gr
import edge_tts
import asyncio
import os

SUPPORTED_VOICES = {
    'Xiaoxiao-晓晓': 'zh-CN-XiaoxiaoNeural',
    'Xiaoyi-晓伊': 'zh-CN-XiaoyiNeural',
    'Yunjian-云健': 'zh-CN-YunjianNeural',
    'Yunxi-云希': 'zh-CN-YunxiNeural',
    'Yunxia-云夏': 'zh-CN-YunxiaNeural',
    'Yunyang-云扬': 'zh-CN-YunyangNeural',
    'liaoning-Xiaobei-晓北辽宁': 'zh-CN-liaoning-XiaobeiNeural',
    'shaanxi-Xiaoni-陕西晓妮': 'zh-CN-shaanxi-XiaoniNeural'
}

async def changeVoice(voices):
    example = SUPPORTED_VOICES[voices]
    example_file = os.path.join(os.path.dirname(__file__), "example/"+example+".wav")
    return example_file

async def textToSpeech(text, voices, rate, volume):
    output_file = "output.mp3"
    voices = SUPPORTED_VOICES[voices]
    rates = "+" + str(rate) + "%" if rate >= 0 else str(rate) + "%"
    volumes = "+" + str(volume) + "%" if volume >= 0 else str(volume) + "%"

    communicate = edge_tts.Communicate(text, voices, rate=rates, volume=volumes, proxy=None)
    await communicate.save(output_file)

    audio_file = os.path.join(os.path.dirname(__file__), "output.mp3")
    if os.path.exists(audio_file):
        return audio_file
    else:
        raise gr.Error("转换失败!")

async def clearSpeech():
    output_file = os.path.join(os.path.dirname(__file__), "output.mp3")
    if os.path.exists(output_file):
        os.remove(output_file)
    return None, None

async def main():
    with gr.Blocks(title="Edge TTS") as demo:
        gr.Markdown("""
        # Edge TTS
        """)
        with gr.Row():
            with gr.Column():
                text = gr.TextArea(label="文本", elem_classes="text-area")
                btn = gr.Button("生成", elem_id="submit-btn")
            with gr.Column():
                voices = gr.Dropdown(choices=[
                    "Xiaoxiao-晓晓", "Xiaoyi-晓伊", "Yunjian-云健", "Yunxi-云希",
                    "Yunxia-云夏", "Yunyang-云扬", "liaoning-Xiaobei-晓北辽宁",
                    "shaanxi-Xiaoni-陕西晓妮"
                ],
                    value="Xiaoxiao-晓晓",
                    label="发音",
                    info="请选择发音人",
                    interactive=True)

                example = gr.Audio(label="试听",
                                   value="example/zh-CN-XiaoxiaoNeural.wav",
                                   interactive=False,
                                   elem_classes="example")

                voices.change(fn=changeVoice, inputs=voices, outputs=example)
                rate = gr.Slider(-100,
                                 100,
                                 step=1,
                                 value=0,
                                 label="语速增减",
                                 info="加快或减慢语速",
                                 interactive=True)

                volume = gr.Slider(-100,
                                   100,
                                   step=1,
                                   value=0,
                                   label="音调增减",
                                   info="加大或减小音调",
                                   interactive=True)
                audio = gr.Audio(label="输出",
                                 interactive=False,
                                 elem_classes="audio")
                clear = gr.Button("清除", elem_id="clear-btn")
                btn.click(fn=textToSpeech,
                          inputs=[text, voices, rate, volume],
                          outputs=[audio])
                clear.click(fn=clearSpeech, outputs=[text, audio])

    if __name__ == "__main__":
        await demo.launch()

asyncio.run(main())