emotion-chatbot / src /emotion_engine.py
kootaeng2
Chore: Improve and update code comments
c1b9543
# emotion_engine.py
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
import os
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
import os
def load_emotion_classifier():
# ๋กœ์ปฌ ๊ฒฝ๋กœ ๋Œ€์‹ , Hugging Face Hub์˜ ๋ชจ๋ธ ID๋ฅผ ์‚ฌ์šฉํ•ฉ๋‹ˆ๋‹ค.
MODEL_PATH = "koons/korean-emotion-classifier-final" # "์‚ฌ์šฉ์ž์ด๋ฆ„/๋ชจ๋ธ์ด๋ฆ„" ํ˜•์‹
print(f"Hugging Face Hub ๋ชจ๋ธ '{MODEL_PATH}'์—์„œ ๋ชจ๋ธ์„ ๋ถˆ๋Ÿฌ์˜ต๋‹ˆ๋‹ค...")
try:
# local_files_only ์˜ต์…˜์„ ์ œ๊ฑฐํ•˜์—ฌ ์˜จ๋ผ์ธ์—์„œ ๋‹ค์šด๋กœ๋“œํ•˜๋„๋ก ํ•ฉ๋‹ˆ๋‹ค.
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
model = AutoModelForSequenceClassification.from_pretrained(MODEL_PATH)
print("โœ… Hugging Face Hub ๋ชจ๋ธ ๋กœ๋”ฉ ์„ฑ๊ณต!")
except Exception as e:
print(f"โŒ ๋ชจ๋ธ ๋กœ๋”ฉ ์ค‘ ์˜ค๋ฅ˜: {e}")
return None
device = 0 if torch.cuda.is_available() else -1
emotion_classifier = pipeline("text-classification", model=model, tokenizer=tokenizer, device=device)
return emotion_classifier
def predict_emotion(classifier, text):
if not text or not text.strip(): return "๋‚ด์šฉ ์—†์Œ"
if classifier is None: return "์˜ค๋ฅ˜: ๊ฐ์ • ๋ถ„์„ ์—”์ง„ ์ค€๋น„ ์•ˆ๋จ."
result = classifier(text)
return result[0]['label']