File size: 4,891 Bytes
b0c2db7
 
239a35e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b0c2db7
239a35e
b0c2db7
 
 
 
 
 
 
 
239a35e
 
b0c2db7
 
 
239a35e
 
 
b0c2db7
239a35e
 
b0c2db7
 
 
239a35e
 
b0c2db7
 
 
 
 
 
 
239a35e
b0c2db7
239a35e
a2a7890
b0c2db7
239a35e
b0c2db7
 
 
 
 
 
 
239a35e
 
 
 
a2a7890
 
356b3b9
a2a7890
b0c2db7
 
 
 
 
 
 
 
 
239a35e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b0c2db7
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
'''
sudo docker run --gpus all --runtime=nvidia --rm \
    -v /home/ubuntu/dotdemo/third_party:/third_party \
    -v /home/ubuntu/dotdemo-dev:/dotdemo-dev \
    -v /home/ubuntu/dot-demo-assets/ml-logs:/logs \
    -v /home/ubuntu/audio-inference-output:/outputDir \
    -v /home/ubuntu/dotdemo/inference_server:/app \
    --network="host" \
    -it fantasyfish677/rvc:v0 /bin/bash
pip3 install flask_cors
python3 /app/server.py 2>&1 | tee /logs/inference_server.log

export FLASK_APP=server
export FLASK_DEBUG=true
pip3 install gunicorn
gunicorn -b 0.0.0.0:8080 -w 4 --timeout=600 server:app

curl -X POST http://3.16.130.199:8081/ping

curl -X POST http://3.16.130.199:8081/available-models

curl -X POST http://3.16.130.199:8081/inference \
   -H 'Content-Type: application/json' \
   -d '{"expName":"CashMoney","audioFilePath":"radwimps.mp3", "pitchShift": 0, "saveFilePath":"radwimps_CashMoney.wav"}'

curl -X POST http://3.16.130.199:8081/delete \
   -H 'Content-Type: application/json' \
   -d '{"modelName":"drake-100"}'
'''
import json
import os
from flask import Flask, request
from logging import exception
import time
from server_utils import model_inference_single
from glob import glob
from flask_cors import CORS, cross_origin
import shutil

print("import successful!")

app = Flask("inference server")
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'

@app.route("/ping", methods=['GET', 'POST'])
@cross_origin()
def healthcheck():
    return json.dumps({"code": 200, "message": "responding"}).encode('utf-8')

@app.route("/inference", methods=['GET', 'POST'])
@cross_origin()
def inference():
    if request.headers['Content-Type'] != 'application/json':
        exception("Header error")
        return json.dumps({"message":"Header error"}), 500
    try:
        content = request.get_json()
        exp_name = content['expName']
        audio_file_path = content['audioFilePath']
        pitch_shift = content['pitchShift']
        save_file_path = content['saveFilePath']
        
        model_path = exp_name + '.pth'
        audio_path = os.path.join('/dotdemo-dev', audio_file_path)
        if not os.path.exists('/third_party/RVC/weights/{}'.format(model_path)):
            exception("Model doesn't exist")
            return json.dumps({"message":"Model doesn't exist"}), 404
        if not os.path.exists(audio_path):
            exception("Audio file doesn't exist")
            return json.dumps({"message":"Audio file doesn't exist"}), 404

        save_path = os.path.join('/dotdemo-dev', save_file_path)
        save_dir = os.path.dirname(save_path)
        os.makedirs(save_dir, exist_ok=True)

        error_log_path = os.path.join("/logs{}.log".format(exp_name))
        index_path_list = glob("/third_party/RVC/logs/{}/added_IVF*_Flat_nprobe_1_v1.index".format(exp_name))
        index_path = index_path_list[0] if len(index_path_list) > 0 else ""
        
        start_time = time.time()
        model_inference_single(model_path, index_path, audio_path, save_path, error_log_path, pitch_shift)
        end_time = time.time()
        return json.dumps({"message": "Inference Completed in {} secs.".format(end_time - start_time)}), 200

    except Exception as e:
        exception("Training process failed")
        return json.dumps({"message":"Inference process failed due to {}".format(e)}), 500

@app.route("/available-models", methods=['GET', 'POST'])
@cross_origin()
def get_available_models():
    model_dir = '/third_party/RVC/weights'
    model_list = os.listdir(model_dir)
    model_list = [model_name[:-4] for model_name in model_list]
    return json.dumps({"message": ','.join(model_list)}), 200

@app.route("/delete", methods=['GET', 'POST'])
@cross_origin()
def delete_model():
    if request.headers['Content-Type'] != 'application/json':
        exception("Header error")
        return json.dumps({"message":"Header error"}), 500
    try:
        content = request.get_json()
        model_name = content['modelName']
        if model_name == "mute":
            return json.dumps({"message": "Sorry, it's not allowed to delete mute directory"}), 500

        model_dir = '/third_party/RVC/weights'
        model_path = '{}/{}.pth'.format(model_dir, model_name)
        if not os.path.exists(model_path):
            exception("Model doesn't exist")
            return json.dumps({"message":"Model doesn't exist"}), 404
        
        os.remove(model_path)
        log_dir = '/third_party/RVC/logs/' + model_name
        if os.path.exists(log_dir):
            shutil.rmtree(model_path)
        return json.dumps({"message": 'Model {} has been deleted'.format(model_name)}), 200
    except Exception as e:
        exception("Training process failed")
        return json.dumps({"message":"Inference process failed due to {}".format(e)}), 500

if __name__ == "__main__":
     app.run(host="0.0.0.0", port=8081, debug=True)