QAway-to commited on
Commit ·
8e845a7
1
Parent(s): b6185eb
Back to normal app.py v1.2
Browse files- core/interviewer.py +55 -36
- core/mbti_analyzer.py +22 -0
core/interviewer.py
CHANGED
|
@@ -1,5 +1,6 @@
|
|
| 1 |
# core/interviewer.py
|
| 2 |
-
import
|
|
|
|
| 3 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
| 4 |
|
| 5 |
INTERVIEWER_MODEL = "f3nsmart/TinyLlama-MBTI-Interviewer-LoRA"
|
|
@@ -11,45 +12,63 @@ llm_pipe = pipeline(
|
|
| 11 |
"text-generation",
|
| 12 |
model=model,
|
| 13 |
tokenizer=tokenizer,
|
| 14 |
-
max_new_tokens=
|
| 15 |
-
temperature=0.
|
| 16 |
top_p=0.9,
|
| 17 |
)
|
| 18 |
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
)
|
| 44 |
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
|
|
|
| 48 |
|
| 49 |
-
|
| 50 |
-
if not
|
| 51 |
-
|
|
|
|
| 52 |
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
# core/interviewer.py
|
| 2 |
+
import random
|
| 3 |
+
import uuid
|
| 4 |
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
| 5 |
|
| 6 |
INTERVIEWER_MODEL = "f3nsmart/TinyLlama-MBTI-Interviewer-LoRA"
|
|
|
|
| 12 |
"text-generation",
|
| 13 |
model=model,
|
| 14 |
tokenizer=tokenizer,
|
| 15 |
+
max_new_tokens=60,
|
| 16 |
+
temperature=0.6,
|
| 17 |
top_p=0.9,
|
| 18 |
)
|
| 19 |
|
| 20 |
+
# MBTI категории
|
| 21 |
+
CATEGORIES = [
|
| 22 |
+
"Introversion", "Extroversion",
|
| 23 |
+
"Sensing", "Intuition",
|
| 24 |
+
"Thinking", "Feeling",
|
| 25 |
+
"Judging", "Perceiving"
|
| 26 |
+
]
|
| 27 |
+
|
| 28 |
+
# Память сессий
|
| 29 |
+
session_state = {}
|
| 30 |
+
|
| 31 |
+
def init_session(user_id: str):
|
| 32 |
+
"""Инициализирует структуру данных сессии."""
|
| 33 |
+
session_state[user_id] = {
|
| 34 |
+
"asked": [],
|
| 35 |
+
"answers": {},
|
| 36 |
+
"iteration": 1,
|
| 37 |
+
"dominant_axis": None
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
def select_next_category(user_id: str):
|
| 41 |
+
"""Выбирает следующую категорию, не повторяя."""
|
| 42 |
+
s = session_state[user_id]
|
| 43 |
+
remaining = [c for c in CATEGORIES if c not in s["asked"]]
|
| 44 |
+
if not remaining:
|
| 45 |
+
return None
|
| 46 |
+
next_cat = random.choice(remaining)
|
| 47 |
+
s["asked"].append(next_cat)
|
| 48 |
+
return next_cat
|
| 49 |
+
|
| 50 |
+
def build_prompt(category: str):
|
| 51 |
+
"""Формирует промпт в стиле JSON-диалога."""
|
| 52 |
+
return (
|
| 53 |
+
f'{{"history": [], '
|
| 54 |
+
f'"instruction": "Ask one open-ended question about {category}. '
|
| 55 |
+
f'Avoid repeating previous phrasing. Output only the question.", '
|
| 56 |
+
f'"next_question": "?"}}'
|
| 57 |
)
|
| 58 |
|
| 59 |
+
def generate_question(user_id: str):
|
| 60 |
+
"""Генератор вопроса для текущей категории."""
|
| 61 |
+
if user_id not in session_state:
|
| 62 |
+
init_session(user_id)
|
| 63 |
|
| 64 |
+
category = select_next_category(user_id)
|
| 65 |
+
if not category:
|
| 66 |
+
yield "✅ All 8 categories completed."
|
| 67 |
+
return
|
| 68 |
|
| 69 |
+
prompt = build_prompt(category)
|
| 70 |
+
raw = llm_pipe(prompt)[0]["generated_text"]
|
| 71 |
+
question = raw.strip().split("\n")[0]
|
| 72 |
+
if "?" not in question:
|
| 73 |
+
question += "?"
|
| 74 |
+
yield f"({category}) {question}"
|
core/mbti_analyzer.py
CHANGED
|
@@ -17,3 +17,25 @@ def analyze_mbti(user_text: str):
|
|
| 17 |
res_sorted = sorted(res, key=lambda x: x["score"], reverse=True)
|
| 18 |
mbti_text = "\n".join([f"{r['label']} → {r['score']:.3f}" for r in res_sorted[:3]])
|
| 19 |
yield mbti_text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
res_sorted = sorted(res, key=lambda x: x["score"], reverse=True)
|
| 18 |
mbti_text = "\n".join([f"{r['label']} → {r['score']:.3f}" for r in res_sorted[:3]])
|
| 19 |
yield mbti_text
|
| 20 |
+
|
| 21 |
+
def compute_dominant_axis(results):
|
| 22 |
+
"""
|
| 23 |
+
results = [
|
| 24 |
+
{"label": "Introversion", "score": 0.73},
|
| 25 |
+
{"label": "Extroversion", "score": 0.27},
|
| 26 |
+
...
|
| 27 |
+
]
|
| 28 |
+
"""
|
| 29 |
+
axes = {
|
| 30 |
+
"IE": ("Introversion", "Extroversion"),
|
| 31 |
+
"SN": ("Sensing", "Intuition"),
|
| 32 |
+
"TF": ("Thinking", "Feeling"),
|
| 33 |
+
"JP": ("Judging", "Perceiving")
|
| 34 |
+
}
|
| 35 |
+
mbti_code = ""
|
| 36 |
+
for pair in axes.values():
|
| 37 |
+
left, right = pair
|
| 38 |
+
left_score = next((r["score"] for r in results if r["label"] == left), 0)
|
| 39 |
+
right_score = next((r["score"] for r in results if r["label"] == right), 0)
|
| 40 |
+
mbti_code += left[0] if left_score >= right_score else right[0]
|
| 41 |
+
return mbti_code
|