Spaces:
Runtime error
Runtime error
| # emotion_engine.py | |
| import torch | |
| from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline | |
| import os | |
| import torch | |
| from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline | |
| import os | |
| def load_emotion_classifier(): | |
| # ๋ก์ปฌ ๊ฒฝ๋ก ๋์ , Hugging Face Hub์ ๋ชจ๋ธ ID๋ฅผ ์ฌ์ฉํฉ๋๋ค. | |
| MODEL_PATH = "koons/korean-emotion-classifier-final" # "์ฌ์ฉ์์ด๋ฆ/๋ชจ๋ธ์ด๋ฆ" ํ์ | |
| print(f"Hugging Face Hub ๋ชจ๋ธ '{MODEL_PATH}'์์ ๋ชจ๋ธ์ ๋ถ๋ฌ์ต๋๋ค...") | |
| try: | |
| # local_files_only ์ต์ ์ ์ ๊ฑฐํ์ฌ ์จ๋ผ์ธ์์ ๋ค์ด๋ก๋ํ๋๋ก ํฉ๋๋ค. | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH) | |
| model = AutoModelForSequenceClassification.from_pretrained(MODEL_PATH) | |
| print("โ Hugging Face Hub ๋ชจ๋ธ ๋ก๋ฉ ์ฑ๊ณต!") | |
| except Exception as e: | |
| print(f"โ ๋ชจ๋ธ ๋ก๋ฉ ์ค ์ค๋ฅ: {e}") | |
| return None | |
| device = 0 if torch.cuda.is_available() else -1 | |
| emotion_classifier = pipeline("text-classification", model=model, tokenizer=tokenizer, device=device) | |
| return emotion_classifier | |
| def predict_emotion(classifier, text): | |
| if not text or not text.strip(): return "๋ด์ฉ ์์" | |
| if classifier is None: return "์ค๋ฅ: ๊ฐ์ ๋ถ์ ์์ง ์ค๋น ์๋จ." | |
| result = classifier(text) | |
| return result[0]['label'] |