Spaces:
Sleeping
Sleeping
Initial commit
Browse files- .gitignore +0 -1
- models/emotion-classification-model/.gitkeep +0 -0
- models/emotion_model.py +0 -17
- pipeline/preprocess.py +14 -1
.gitignore
CHANGED
|
@@ -15,7 +15,6 @@ venv/
|
|
| 15 |
|
| 16 |
# models/ νμ λλ ν 리 ν¬ν¨, λͺ¨λΈ νμΌλ€μ 무μ
|
| 17 |
models/emotion-classification-model/*
|
| 18 |
-
!models/emotion-classification-model/.gitkeep
|
| 19 |
|
| 20 |
models/fallback-npc-model/*
|
| 21 |
!models/fallback-npc-model/.gitkeep
|
|
|
|
| 15 |
|
| 16 |
# models/ νμ λλ ν 리 ν¬ν¨, λͺ¨λΈ νμΌλ€μ 무μ
|
| 17 |
models/emotion-classification-model/*
|
|
|
|
| 18 |
|
| 19 |
models/fallback-npc-model/*
|
| 20 |
!models/fallback-npc-model/.gitkeep
|
models/emotion-classification-model/.gitkeep
DELETED
|
File without changes
|
models/emotion_model.py
DELETED
|
@@ -1,17 +0,0 @@
|
|
| 1 |
-
from transformers import pipeline
|
| 2 |
-
from fastapi import Request
|
| 3 |
-
|
| 4 |
-
async def detect_emotion(request: Request, text: str) -> dict:
|
| 5 |
-
tokenizer = request.app.state.emotion_tokenizer
|
| 6 |
-
model = request.app.state.emotion_model
|
| 7 |
-
|
| 8 |
-
emotion_pipeline = pipeline(
|
| 9 |
-
"text-classification",
|
| 10 |
-
model=model,
|
| 11 |
-
tokenizer=tokenizer,
|
| 12 |
-
return_all_scores=True
|
| 13 |
-
)
|
| 14 |
-
|
| 15 |
-
results = emotion_pipeline(text)
|
| 16 |
-
# κ²°κ³Όλ₯Ό label: score ννλ‘ λ³ν
|
| 17 |
-
return {r["label"]: r["score"] for r in results[0]}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pipeline/preprocess.py
CHANGED
|
@@ -34,6 +34,19 @@ def _semantic_match_embedder(embedder, user_input: str, trigger_texts: list, thr
|
|
| 34 |
matched_text = trigger_texts[int(idx.item())]
|
| 35 |
return (score_val >= threshold, score_val, matched_text)
|
| 36 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
async def _llm_trigger_check(request: Request, user_input: str, label_list: list) -> bool:
|
| 38 |
if not label_list:
|
| 39 |
return False
|
|
@@ -72,7 +85,7 @@ async def preprocess_input(
|
|
| 72 |
context: dict
|
| 73 |
) -> dict:
|
| 74 |
parser = ContextParser(context)
|
| 75 |
-
emotion = await
|
| 76 |
|
| 77 |
require_items = context.get("require", {}).get("items", [])
|
| 78 |
require_actions = context.get("require", {}).get("actions", [])
|
|
|
|
| 34 |
matched_text = trigger_texts[int(idx.item())]
|
| 35 |
return (score_val >= threshold, score_val, matched_text)
|
| 36 |
|
| 37 |
+
async def extract_emotion_via_fallback(request: Request, user_input: str) -> str:
|
| 38 |
+
prompt = (
|
| 39 |
+
"λ€μ λ¬Έμ₯μ νμ κ°μ μ ν λ¨μ΄ λλ μ§§μ λ¬Έμ₯μΌλ‘ μ€λͺ
νμμ€.\n\n"
|
| 40 |
+
f"[λ¬Έμ₯]\n{user_input}\n\n"
|
| 41 |
+
"μ§μ:\n- κ°μ μ μ§μ μ μΌλ‘ νννμ§ μμλ λ¬Έλ§₯μ ν΅ν΄ μΆλ‘ νμμ€.\n"
|
| 42 |
+
"- κ°λ₯ν κ²½μ° κ°μ μ κ°λλ λμμ€λ λ°μνμμ€.\n"
|
| 43 |
+
"- μ: λΆλ
Έ, μ¬ν, νΌλ, κΈ°λ, 무κ΄μ¬, μ΄μ‘°ν¨ λ±\n"
|
| 44 |
+
"- λ¨μ΄ νλ λλ μ§§μ λ¬Έμ₯μΌλ‘λ§ μΆλ ₯νμμ€.\n\n"
|
| 45 |
+
"μ λ΅:"
|
| 46 |
+
)
|
| 47 |
+
response = await generate_fallback_response(request, prompt)
|
| 48 |
+
return response.strip()
|
| 49 |
+
|
| 50 |
async def _llm_trigger_check(request: Request, user_input: str, label_list: list) -> bool:
|
| 51 |
if not label_list:
|
| 52 |
return False
|
|
|
|
| 85 |
context: dict
|
| 86 |
) -> dict:
|
| 87 |
parser = ContextParser(context)
|
| 88 |
+
emotion = await extract_emotion_via_fallback(request, user_input)
|
| 89 |
|
| 90 |
require_items = context.get("require", {}).get("items", [])
|
| 91 |
require_actions = context.get("require", {}).get("actions", [])
|