lanna_lalala;- commited on
Commit ·
edeae54
1
Parent(s): e3a7a8c
update api.py
Browse files- phase/Student_view/chatbot.py +28 -20
phase/Student_view/chatbot.py
CHANGED
|
@@ -9,7 +9,7 @@ from huggingface_hub import InferenceClient
|
|
| 9 |
# Hugging Face model setup
|
| 10 |
# -------------------------------
|
| 11 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 12 |
-
GEN_MODEL = os.getenv("GEN_MODEL", "
|
| 13 |
|
| 14 |
if not HF_TOKEN:
|
| 15 |
st.error("⚠️ HF_TOKEN is not set. In your Space, add a Secret named HF_TOKEN.")
|
|
@@ -33,45 +33,53 @@ def add_message(text: str, sender: str):
|
|
| 33 |
)
|
| 34 |
|
| 35 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
# -------------------------------
|
| 37 |
# Hugging Face reply function
|
| 38 |
# -------------------------------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
def _reply_with_hf():
|
| 40 |
if "client" not in globals():
|
| 41 |
raise RuntimeError("HF client not initialized")
|
| 42 |
|
| 43 |
-
#
|
| 44 |
-
|
|
|
|
| 45 |
|
| 46 |
-
# Build conversation history
|
| 47 |
-
convo = []
|
| 48 |
-
for m in st.session_state.messages:
|
| 49 |
-
role = "USER" if m["sender"] != "assistant" else "ASSISTANT"
|
| 50 |
-
convo.append(f"{role}: {m['text']}")
|
| 51 |
-
prompt = (system + "\n\n" + "\n".join(convo) + "\nASSISTANT:").strip()
|
| 52 |
-
|
| 53 |
-
# Call Hugging Face text-generation
|
| 54 |
try:
|
| 55 |
-
response = client.
|
| 56 |
prompt,
|
| 57 |
max_new_tokens=220,
|
| 58 |
-
temperature=0.2
|
| 59 |
-
stream=False
|
| 60 |
)
|
| 61 |
-
|
| 62 |
-
# Handle different response formats
|
| 63 |
if isinstance(response, dict) and "generated_text" in response:
|
| 64 |
-
return response["generated_text"].strip()
|
| 65 |
elif isinstance(response, str):
|
| 66 |
return response.strip()
|
| 67 |
else:
|
| 68 |
-
raise ValueError(f"Unexpected response format: {response}")
|
| 69 |
-
|
| 70 |
except Exception as e:
|
| 71 |
err_text = ''.join(traceback.format_exception_only(type(e), e)).strip()
|
| 72 |
raise RuntimeError(f"Hugging Face API Error: {err_text}")
|
| 73 |
|
| 74 |
-
|
| 75 |
# -------------------------------
|
| 76 |
# Streamlit page
|
| 77 |
# -------------------------------
|
|
|
|
| 9 |
# Hugging Face model setup
|
| 10 |
# -------------------------------
|
| 11 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
| 12 |
+
GEN_MODEL = os.getenv("GEN_MODEL", "google/flan-t5-base") # ✅ CPU-friendly seq2seq
|
| 13 |
|
| 14 |
if not HF_TOKEN:
|
| 15 |
st.error("⚠️ HF_TOKEN is not set. In your Space, add a Secret named HF_TOKEN.")
|
|
|
|
| 33 |
)
|
| 34 |
|
| 35 |
|
| 36 |
+
TUTOR_PROMPT = (
|
| 37 |
+
"You are a kind Jamaican primary-school finance tutor. "
|
| 38 |
+
"Keep answers short, friendly, and age-appropriate. "
|
| 39 |
+
"Teach step-by-step with tiny examples. Avoid giving personal financial advice."
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
# -------------------------------
|
| 44 |
# Hugging Face reply function
|
| 45 |
# -------------------------------
|
| 46 |
+
def _format_history_for_flan(messages: list[dict]) -> str:
|
| 47 |
+
# Map your session_state messages → "User:" / "Tutor:" lines
|
| 48 |
+
lines = []
|
| 49 |
+
for m in messages:
|
| 50 |
+
txt = (m.get("text") or "").strip()
|
| 51 |
+
if not txt:
|
| 52 |
+
continue
|
| 53 |
+
if m.get("sender") == "assistant":
|
| 54 |
+
lines.append(f"Tutor: {txt}")
|
| 55 |
+
else:
|
| 56 |
+
lines.append(f"User: {txt}")
|
| 57 |
+
return "\n".join(lines)
|
| 58 |
+
|
| 59 |
def _reply_with_hf():
|
| 60 |
if "client" not in globals():
|
| 61 |
raise RuntimeError("HF client not initialized")
|
| 62 |
|
| 63 |
+
# Build single instruction string FLAN-T5 prefers
|
| 64 |
+
convo = _format_history_for_flan(st.session_state.get("messages", []))
|
| 65 |
+
prompt = f"{TUTOR_PROMPT}\n\n{convo}\n\nTutor:"
|
| 66 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
try:
|
| 68 |
+
response = client.text2text_generation(
|
| 69 |
prompt,
|
| 70 |
max_new_tokens=220,
|
| 71 |
+
temperature=0.2
|
|
|
|
| 72 |
)
|
|
|
|
|
|
|
| 73 |
if isinstance(response, dict) and "generated_text" in response:
|
| 74 |
+
return (response["generated_text"] or "").strip()
|
| 75 |
elif isinstance(response, str):
|
| 76 |
return response.strip()
|
| 77 |
else:
|
| 78 |
+
raise ValueError(f"Unexpected response format: {type(response)}")
|
|
|
|
| 79 |
except Exception as e:
|
| 80 |
err_text = ''.join(traceback.format_exception_only(type(e), e)).strip()
|
| 81 |
raise RuntimeError(f"Hugging Face API Error: {err_text}")
|
| 82 |
|
|
|
|
| 83 |
# -------------------------------
|
| 84 |
# Streamlit page
|
| 85 |
# -------------------------------
|