|
|
|
|
|
import torch |
|
|
import numpy as np |
|
|
from transformers import AutoModelForSequenceClassification, AutoTokenizer |
|
|
|
|
|
|
|
|
model = AutoModelForSequenceClassification.from_pretrained("M1NJ1/klue-bert-emotion") |
|
|
tokenizer = AutoTokenizer.from_pretrained("M1NJ1/klue-bert-emotion") |
|
|
model.eval().to("cuda" if torch.cuda.is_available() else "cpu") |
|
|
|
|
|
label_map = list(range(44)) |
|
|
|
|
|
def predict_multi(text): |
|
|
device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=64).to(device) |
|
|
with torch.no_grad(): |
|
|
outputs = model(**inputs) |
|
|
probs = torch.sigmoid(outputs.logits).squeeze().cpu().numpy() |
|
|
emotions = [] |
|
|
for i, p in enumerate(probs): |
|
|
if p > 0.5: |
|
|
emotions.append(f"๊ฐ์ {i}") |
|
|
return emotions |
|
|
|
|
|
def generate_response(emotions, mode): |
|
|
if not emotions: |
|
|
return "๊ฐ์ ์ ํ์
ํ์ง ๋ชปํ์ด์. ๋ค์ ๋งํด์ค ์ ์๋์?" |
|
|
response = "" |
|
|
if mode == 'T': |
|
|
if '๊ฐ์ 1' in emotions: |
|
|
response += "๋ถ์์ ์ค๋น ๋ถ์กฑ์์ ์ฌ ์ ์์ด์. ์ฐจ๊ทผํ ์ ๋ฆฌํด๋ณผ๊น์? |
|
|
" |
|
|
elif mode == 'F': |
|
|
if '๊ฐ์ 1' in emotions: |
|
|
response += "๋ถ์ํ ํ๋ฃจ์๊ฒ ๊ตฐ์... ๊ด์ฐฎ์์, ๋น์ ์ํ๊ณ ์์ด์. |
|
|
" |
|
|
return response or "๋น์ ์ ๊ฐ์ ์ ๊ท ๊ธฐ์ธ์ด๊ณ ์์ด์." |
|
|
|