File size: 7,680 Bytes
fa7a3bc 08e67e4 62cd4c7 fa7a3bc 08e67e4 fa7a3bc 62cd4c7 fa7a3bc 15c380e 62cd4c7 15c380e edeae54 b58ee05 92fc829 edeae54 92fc829 edeae54 62cd4c7 edeae54 9aaaaba 92fc829 9aaaaba 92fc829 9aaaaba 92fc829 9aaaaba 92fc829 fa7a3bc 15c380e 92fc829 f69103b 62cd4c7 15c380e 92fc829 9aaaaba 92fc829 9aaaaba 92fc829 9aaaaba 92fc829 9aaaaba 92fc829 9aaaaba 15c380e 7c4dc86 62cd4c7 b58ee05 15c380e 08e67e4 fa7a3bc 08e67e4 b58ee05 08e67e4 b58ee05 08e67e4 fa7a3bc 08e67e4 fa7a3bc 08e67e4 15c380e 08e67e4 fa7a3bc 08e67e4 62cd4c7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 | # phase/Student_view/chatbot.py
import streamlit as st
import datetime, os, traceback
from huggingface_hub import InferenceClient
HF_TOKEN = os.getenv("HF_TOKEN")
GEN_MODEL = os.getenv("GEN_MODEL", "TinyLlama/TinyLlama-1.1B-Chat-v1.0") # <- default TinyLlama
if not HF_TOKEN:
st.error("⚠️ HF_TOKEN is not set. In your Space, add a Secret named HF_TOKEN.")
else:
client = InferenceClient(model=GEN_MODEL, token=HF_TOKEN, timeout=60)
TUTOR_PROMPT = (
"You are a kind Jamaican primary-school finance tutor. "
"Keep answers short, friendly, and age-appropriate. "
"Teach step-by-step with tiny examples. Avoid giving personal financial advice."
)
# -------------------------------
# History helpers
# -------------------------------
def _format_history_for_flan(messages: list[dict]) -> str:
"""Format history for text-generation style models."""
lines = []
for m in messages:
txt = (m.get("text") or "").strip()
if not txt:
continue
lines.append(("Tutor" if m.get("sender") == "assistant" else "User") + f": {txt}")
return "\n".join(lines)
def _history_as_chat_messages(messages: list[dict]) -> list[dict]:
"""Convert history to chat-completion style messages."""
msgs = [{"role": "system", "content": TUTOR_PROMPT}]
for m in messages:
txt = (m.get("text") or "").strip()
if not txt:
continue
role = "assistant" if m.get("sender") == "assistant" else "user"
msgs.append({"role": role, "content": txt})
return msgs
def _extract_chat_text(chat_resp) -> str:
"""Extract text from HF chat response."""
try:
return chat_resp.choices[0].message["content"] if isinstance(
chat_resp.choices[0].message, dict
) else chat_resp.choices[0].message.content
except Exception:
try:
return chat_resp["choices"][0]["message"]["content"]
except Exception:
return str(chat_resp)
# -------------------------------
# Reply logic
# -------------------------------
def _reply_with_hf():
if "client" not in globals():
raise RuntimeError("HF client not initialized")
try:
# 1) Prefer chat API
msgs = _history_as_chat_messages(st.session_state.get("messages", []))
chat = client.chat.completions.create(
model=GEN_MODEL,
messages=msgs,
max_tokens=300, # give enough room
temperature=0.2,
top_p=0.9,
)
return _extract_chat_text(chat).strip()
except ValueError as ve:
# 2) Fallback to text-generation if chat unsupported
if "Supported task: text-generation" in str(ve):
convo = _format_history_for_flan(st.session_state.get("messages", []))
tg_prompt = f"{TUTOR_PROMPT}\n\n{convo}\n\nTutor:"
resp = client.text_generation(
tg_prompt,
max_new_tokens=300,
temperature=0.2,
top_p=0.9,
repetition_penalty=1.1,
return_full_text=True,
stream=False,
)
return (resp.get("generated_text") if isinstance(resp, dict) else resp).strip()
raise # rethrow anything else
except Exception as e:
err_text = ''.join(traceback.format_exception_only(type(e), e)).strip()
raise RuntimeError(f"Hugging Face API Error: {err_text}")
# -------------------------------
# Session message helper
# -------------------------------
def add_message(text: str, sender: str):
if "messages" not in st.session_state:
st.session_state.messages = []
st.session_state.messages.append(
{
"id": str(datetime.datetime.now().timestamp()),
"text": text,
"sender": sender,
"timestamp": datetime.datetime.now()
}
)
def _coerce_ts(ts):
if isinstance(ts, datetime.datetime):
return ts
if isinstance(ts, (int, float)):
try:
return datetime.datetime.fromtimestamp(ts)
except Exception:
return None
if isinstance(ts, str):
# Try ISO 8601 first; fall back to float epoch
try:
return datetime.datetime.fromisoformat(ts)
except Exception:
try:
return datetime.datetime.fromtimestamp(float(ts))
except Exception:
return None
return None
def _normalize_messages():
msgs = st.session_state.get("messages", [])
normed = []
now = datetime.datetime.now()
for m in msgs:
text = (m.get("text") or "").strip()
sender = m.get("sender") or "user"
ts = _coerce_ts(m.get("timestamp")) or now
normed.append({**m, "text": text, "sender": sender, "timestamp": ts})
st.session_state.messages = normed
# -------------------------------
# Streamlit page
# -------------------------------
def show_page():
st.title("🤖 AI Financial Tutor")
st.caption("Get personalized help with your financial questions")
if "messages" not in st.session_state:
st.session_state.messages = [{
"id": "1",
"text": "Hi! I'm your AI Financial Tutor. What would you like to learn today?",
"sender": "assistant",
"timestamp": datetime.datetime.now()
}]
if "is_typing" not in st.session_state:
st.session_state.is_typing = False
_normalize_messages()
chat_container = st.container()
with chat_container:
for msg in st.session_state.messages:
time_str = msg["timestamp"].strftime("%H:%M") if hasattr(msg["timestamp"], "strftime") else datetime.datetime.now().strftime("%H:%M")
bubble = (
f"<div style='background-color:#e0e0e0; color:black; padding:10px; border-radius:12px; max-width:70%; margin-bottom:5px;'>"
f"{msg.get('text','')}<br><sub>{time_str}</sub></div>"
if msg.get("sender") == "assistant" else
f"<div style='background-color:#4CAF50; color:white; padding:10px; border-radius:12px; max-width:70%; margin-left:auto; margin-bottom:5px;'>"
f"{msg.get('text','')}<br><sub>{time_str}</sub></div>"
)
st.markdown(bubble, unsafe_allow_html=True)
if st.session_state.is_typing:
st.markdown("🤖 _FinanceBot is typing..._")
if len(st.session_state.messages) == 1:
st.markdown("Try asking about:")
cols = st.columns(2)
quick = [
"How does compound interest work?",
"How much should I save for emergencies?",
"What's a good budgeting strategy?",
"How do I start investing?"
]
for i, q in enumerate(quick):
if cols[i % 2].button(q):
add_message(q, "user")
st.session_state.is_typing = True
st.rerun()
user_input = st.chat_input("Ask me anything about personal finance...")
if user_input:
add_message(user_input, "user")
st.session_state.is_typing = True
st.rerun()
if st.session_state.is_typing:
try:
with st.spinner("FinanceBot is thinking..."):
bot_reply = _reply_with_hf()
add_message(bot_reply, "assistant")
except Exception as e:
add_message(f"⚠️ Error: {e}", "assistant")
finally:
st.session_state.is_typing = False
st.rerun()
if st.button("Back to Dashboard", key="ai_tutor_back_btn"):
st.session_state.current_page = "Student Dashboard"
st.rerun()
|