fantasyfish's picture
update two server.py files
239a35e
'''
sudo docker run --gpus all --runtime=nvidia --rm \
-v /home/ubuntu/dotdemo/third_party:/third_party \
-v /home/ubuntu/dotdemo-dev:/dotdemo-dev \
-v /home/ubuntu/dot-demo-assets/ml-logs:/logs \
-v /home/ubuntu/audio-inference-output:/outputDir \
-v /home/ubuntu/dotdemo/inference_server:/app \
--network="host" \
-it fantasyfish677/rvc:v0 /bin/bash
pip3 install flask_cors
python3 /app/server.py 2>&1 | tee /logs/inference_server.log
export FLASK_APP=server
export FLASK_DEBUG=true
pip3 install gunicorn
gunicorn -b 0.0.0.0:8080 -w 4 --timeout=600 server:app
curl -X POST http://3.16.130.199:8081/ping
curl -X POST http://3.16.130.199:8081/available-models
curl -X POST http://3.16.130.199:8081/inference \
-H 'Content-Type: application/json' \
-d '{"expName":"CashMoney","audioFilePath":"radwimps.mp3", "pitchShift": 0, "saveFilePath":"radwimps_CashMoney.wav"}'
curl -X POST http://3.16.130.199:8081/delete \
-H 'Content-Type: application/json' \
-d '{"modelName":"drake-100"}'
'''
import json
import os
from flask import Flask, request
from logging import exception
import time
from server_utils import model_inference_single
from glob import glob
from flask_cors import CORS, cross_origin
import shutil
print("import successful!")
app = Flask("inference server")
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
@app.route("/ping", methods=['GET', 'POST'])
@cross_origin()
def healthcheck():
return json.dumps({"code": 200, "message": "responding"}).encode('utf-8')
@app.route("/inference", methods=['GET', 'POST'])
@cross_origin()
def inference():
if request.headers['Content-Type'] != 'application/json':
exception("Header error")
return json.dumps({"message":"Header error"}), 500
try:
content = request.get_json()
exp_name = content['expName']
audio_file_path = content['audioFilePath']
pitch_shift = content['pitchShift']
save_file_path = content['saveFilePath']
model_path = exp_name + '.pth'
audio_path = os.path.join('/dotdemo-dev', audio_file_path)
if not os.path.exists('/third_party/RVC/weights/{}'.format(model_path)):
exception("Model doesn't exist")
return json.dumps({"message":"Model doesn't exist"}), 404
if not os.path.exists(audio_path):
exception("Audio file doesn't exist")
return json.dumps({"message":"Audio file doesn't exist"}), 404
save_path = os.path.join('/dotdemo-dev', save_file_path)
save_dir = os.path.dirname(save_path)
os.makedirs(save_dir, exist_ok=True)
error_log_path = os.path.join("/logs{}.log".format(exp_name))
index_path_list = glob("/third_party/RVC/logs/{}/added_IVF*_Flat_nprobe_1_v1.index".format(exp_name))
index_path = index_path_list[0] if len(index_path_list) > 0 else ""
start_time = time.time()
model_inference_single(model_path, index_path, audio_path, save_path, error_log_path, pitch_shift)
end_time = time.time()
return json.dumps({"message": "Inference Completed in {} secs.".format(end_time - start_time)}), 200
except Exception as e:
exception("Training process failed")
return json.dumps({"message":"Inference process failed due to {}".format(e)}), 500
@app.route("/available-models", methods=['GET', 'POST'])
@cross_origin()
def get_available_models():
model_dir = '/third_party/RVC/weights'
model_list = os.listdir(model_dir)
model_list = [model_name[:-4] for model_name in model_list]
return json.dumps({"message": ','.join(model_list)}), 200
@app.route("/delete", methods=['GET', 'POST'])
@cross_origin()
def delete_model():
if request.headers['Content-Type'] != 'application/json':
exception("Header error")
return json.dumps({"message":"Header error"}), 500
try:
content = request.get_json()
model_name = content['modelName']
if model_name == "mute":
return json.dumps({"message": "Sorry, it's not allowed to delete mute directory"}), 500
model_dir = '/third_party/RVC/weights'
model_path = '{}/{}.pth'.format(model_dir, model_name)
if not os.path.exists(model_path):
exception("Model doesn't exist")
return json.dumps({"message":"Model doesn't exist"}), 404
os.remove(model_path)
log_dir = '/third_party/RVC/logs/' + model_name
if os.path.exists(log_dir):
shutil.rmtree(model_path)
return json.dumps({"message": 'Model {} has been deleted'.format(model_name)}), 200
except Exception as e:
exception("Training process failed")
return json.dumps({"message":"Inference process failed due to {}".format(e)}), 500
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8081, debug=True)