File size: 3,235 Bytes
df4eaf9
1547671
df4eaf9
 
 
 
 
 
 
 
10a9dd9
df4eaf9
6bac4ab
 
 
 
072773f
6bac4ab
df4eaf9
 
 
 
 
 
 
 
 
 
 
 
 
db68367
df4eaf9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b7edbd8
df4eaf9
 
 
 
 
f6ecd7d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
from flask import Flask, jsonify, request, send_file
import sys
import re
import os
import torch
import utils
import commons
import soundfile as sf
from models import SynthesizerTrn
from text import text_to_sequence, _clean_text
from pyngrok import ngrok

parser = sys.argv
argsAuth = parser[1]
argsDevice = parser[2]
ngrok.set_auth_token(argsAuth)
public_url = ngrok.connect(5000).public_url
print("Monggo: " + public_url)
# limit text and audio length in huggingface spaces
limitation = os.getenv("SYSTEM") == "spaces"
hps_ms = utils.get_hparams_from_file(r'config/config.json')

def get_text(text, hps, is_symbol):
    text_norm, clean_text = text_to_sequence(
        text, hps.symbols, [] if is_symbol else hps.data.text_cleaners)
    if hps.data.add_blank:
        text_norm = commons.intersperse(text_norm, 0)
    text_norm = torch.LongTensor(text_norm)
    return text_norm, clean_text

def generate(textnya):
    tipeDevicenya = argsDevice
    net_g_ms = SynthesizerTrn(
    len(hps_ms.symbols),
    hps_ms.data.filter_length // 2 + 1,
    hps_ms.train.segment_size // hps_ms.data.hop_length,
    n_speakers=hps_ms.data.n_speakers,
    **hps_ms.model)
    utils.load_checkpoint('models/ayaka-jp.pth', net_g_ms, None)
    _ = net_g_ms.eval().to(tipeDevicenya)
    is_symbol = True
    input_text = textnya
    language = "Mix(wrap the Chinese text with [ZH][ZH], wrap the Japanese text with [JA][JA])"
    noise_scale = 0.6
    noise_scale_w = 0.668
    length_scale = 1.2
    sid = 303
    speaker_id = 303
    text = input_text.replace('\n', ' ').replace('\r', '').replace(" ", "")
    if limitation:
        text_len = len(re.sub("\[([A-Z]{2})\]", "", input_text))
        max_len = 100
        if is_symbol:
            max_len *= 3
        if text_len > max_len:
            print("Error: Text is too long")
    if not is_symbol:
        if language == 0:
            text = f"[ZH]{text}[ZH]"
        elif language == 1:
            text = f"[JA]{text}[JA]"
        else:
            text = f"{text}"
    stn_tst, clean_text = get_text(text, hps_ms, is_symbol)
    with torch.no_grad():
        x_tst = stn_tst.unsqueeze(0).to(tipeDevicenya)
        x_tst_lengths = torch.LongTensor([stn_tst.size(0)]).to(tipeDevicenya)
        sid = torch.LongTensor([speaker_id]).to(tipeDevicenya)
        audio = net_g_ms.infer(x_tst, x_tst_lengths, sid=sid, noise_scale=noise_scale, noise_scale_w=noise_scale_w,
                               length_scale=length_scale)[0][0, 0].data.cpu().float().numpy()
    sf.write('speech.wav', audio, 22050)

def create_to_symbol_fn(hps, is_symbol_input, input_text, temp_lang):
    if temp_lang == 0:
        clean_text = f'[ZH]{input_text}[ZH]'
    elif temp_lang == 1:
        clean_text = f'[JA]{input_text}[JA]'
    else:
        clean_text = input_text
    return _clean_text(clean_text, hps.data.text_cleaners) if is_symbol_input else ''

app = Flask(__name__)
@app.route('/')
def home():
    return "WOI" 
@app.route('/ayaka/')
def ayakaTTS():
    query = request.args.get('text')
    generate(query)
    return jsonify({"work": query, "url": public_url + "/result"})
@app.route('/result')
def resultAud():
    return send_file("speech.wav")

if __name__ == "__main__":
    app.run()