File size: 2,037 Bytes
8eb2cb0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
from flask import Flask, render_template, request, jsonify
from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, pipeline
from mistral_7b import generate_text
import torch
from inference import voice_inference
app = Flask(__name__, static_url_path='/static')
# ๋ชจ๋ธ ๋ฐ ํ ํฌ๋์ด์ ๋ก๋
tokenizer_name = "gogamza/kobart-base-v2"
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name) # ํ ํฌ๋์ด์ ์์
model_name = "/content/flask/eojin/checkpoint-142243"
model = AutoModelForSeq2SeqLM.from_pretrained(model_name) # ๋ชจ๋ธ ์์
# Pipeline์ ์ด์ฉํด์ ํ์ตํ ๋ชจ๋ธ๋ก ํ
์คํธ ์์ฑํด๋ณด๊ธฐ
nlg_pipeline = pipeline('translation_ko_to_ko', model=model, tokenizer=tokenizer)
@app.route('/')
def index():
return render_template("index.html")
@app.route('/voice/<sentence>')
def voice(sentence):
OUTPUT_WAV_PATH = voice_inference(sentence)
return OUTPUT_WAV_PATH
@app.route('/chatbot')
def chatbot():
return render_template("chatbot.html")
# ์
๋ ฅ์ ์ฒ๋ฆฌํ๋ ๊ฒฝ๋ก
@app.route('/process_input/<input_text>')
def process_input(input_text):
try:
print("input_text", input_text, "==========================================")
# answer ๋ฐ๊ธฐ
with torch.no_grad():
answer = generate_text(input_text)
print(answer, "=============================================")
# ํ
์คํธ ๋งํฌ ๋ณํ ๋ชจ๋ธ์ ์ฌ์ฉํ์ฌ ์ ์ฃผ๋ ์ฌํฌ๋ฆฌ๋ก ๋ณํ
jeju_answer = nlg_pipeline(answer, max_length=60)[0]['translation_text']
print(jeju_answer, "=============================================")
# ๊ฒฐ๊ณผ๋ฅผ JSON ํ์์ผ๋ก ๋ฐํ
return jsonify({'answer': answer, 'jeju_answer': jeju_answer})
except Exception as e:
print("Exception:", str(e))
# ์์ธ ๋ฐ์ ์ ์๋ฌ ๋ฉ์์ง๋ฅผ JSON ํ์์ผ๋ก ๋ฐํ
return jsonify({'error': str(e)})
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=8000)
|