File size: 1,436 Bytes
eadeab6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
import torch
import numpy as np
from transformers import AutoModelForSequenceClassification, AutoTokenizer
# ๋ชจ๋ธ ๋ก๋ฉ
model = AutoModelForSequenceClassification.from_pretrained("M1NJ1/klue-bert-emotion")
tokenizer = AutoTokenizer.from_pretrained("M1NJ1/klue-bert-emotion")
model.eval().to("cuda" if torch.cuda.is_available() else "cpu")
label_map = list(range(44)) # ์ค์ ๊ฐ์ ๋ผ๋ฒจ ์ด๋ฆ์ผ๋ก ์์ ๊ฐ๋ฅ
def predict_multi(text):
device = "cuda" if torch.cuda.is_available() else "cpu"
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=64).to(device)
with torch.no_grad():
outputs = model(**inputs)
probs = torch.sigmoid(outputs.logits).squeeze().cpu().numpy()
emotions = []
for i, p in enumerate(probs):
if p > 0.5:
emotions.append(f"๊ฐ์ {i}")
return emotions
def generate_response(emotions, mode):
if not emotions:
return "๊ฐ์ ์ ํ์
ํ์ง ๋ชปํ์ด์. ๋ค์ ๋งํด์ค ์ ์๋์?"
response = ""
if mode == 'T':
if '๊ฐ์ 1' in emotions:
response += "๋ถ์์ ์ค๋น ๋ถ์กฑ์์ ์ฌ ์ ์์ด์. ์ฐจ๊ทผํ ์ ๋ฆฌํด๋ณผ๊น์?
"
elif mode == 'F':
if '๊ฐ์ 1' in emotions:
response += "๋ถ์ํ ํ๋ฃจ์๊ฒ ๊ตฐ์... ๊ด์ฐฎ์์, ๋น์ ์ํ๊ณ ์์ด์.
"
return response or "๋น์ ์ ๊ฐ์ ์ ๊ท ๊ธฐ์ธ์ด๊ณ ์์ด์."
|