File size: 1,436 Bytes
eadeab6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38

import torch
import numpy as np
from transformers import AutoModelForSequenceClassification, AutoTokenizer

# ๋ชจ๋ธ ๋กœ๋”ฉ
model = AutoModelForSequenceClassification.from_pretrained("M1NJ1/klue-bert-emotion")
tokenizer = AutoTokenizer.from_pretrained("M1NJ1/klue-bert-emotion")
model.eval().to("cuda" if torch.cuda.is_available() else "cpu")

label_map = list(range(44))  # ์‹ค์ œ ๊ฐ์ • ๋ผ๋ฒจ ์ด๋ฆ„์œผ๋กœ ์ˆ˜์ • ๊ฐ€๋Šฅ

def predict_multi(text):
    device = "cuda" if torch.cuda.is_available() else "cpu"
    inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=64).to(device)
    with torch.no_grad():
        outputs = model(**inputs)
        probs = torch.sigmoid(outputs.logits).squeeze().cpu().numpy()
    emotions = []
    for i, p in enumerate(probs):
        if p > 0.5:
            emotions.append(f"๊ฐ์ •{i}")
    return emotions

def generate_response(emotions, mode):
    if not emotions:
        return "๊ฐ์ •์„ ํŒŒ์•…ํ•˜์ง€ ๋ชปํ–ˆ์–ด์š”. ๋‹ค์‹œ ๋งํ•ด์ค„ ์ˆ˜ ์žˆ๋‚˜์š”?"
    response = ""
    if mode == 'T':
        if '๊ฐ์ •1' in emotions:
            response += "๋ถˆ์•ˆ์€ ์ค€๋น„ ๋ถ€์กฑ์—์„œ ์˜ฌ ์ˆ˜ ์žˆ์–ด์š”. ์ฐจ๊ทผํžˆ ์ •๋ฆฌํ•ด๋ณผ๊นŒ์š”?
"
    elif mode == 'F':
        if '๊ฐ์ •1' in emotions:
            response += "๋ถˆ์•ˆํ•œ ํ•˜๋ฃจ์˜€๊ฒ ๊ตฐ์š”... ๊ดœ์ฐฎ์•„์š”, ๋‹น์‹  ์ž˜ํ•˜๊ณ  ์žˆ์–ด์š”.
"
    return response or "๋‹น์‹ ์˜ ๊ฐ์ •์— ๊ท€ ๊ธฐ์šธ์ด๊ณ  ์žˆ์–ด์š”."