# phase/Student_view/chatbot.py import streamlit as st import datetime, os, traceback from huggingface_hub import InferenceClient HF_TOKEN = os.getenv("HF_TOKEN") GEN_MODEL = os.getenv("GEN_MODEL", "TinyLlama/TinyLlama-1.1B-Chat-v1.0") # <- default TinyLlama if not HF_TOKEN: st.error("⚠️ HF_TOKEN is not set. In your Space, add a Secret named HF_TOKEN.") else: client = InferenceClient(model=GEN_MODEL, token=HF_TOKEN, timeout=60) TUTOR_PROMPT = ( "You are a kind Jamaican primary-school finance tutor. " "Keep answers short, friendly, and age-appropriate. " "Teach step-by-step with tiny examples. Avoid giving personal financial advice." ) # ------------------------------- # History helpers # ------------------------------- def _format_history_for_flan(messages: list[dict]) -> str: """Format history for text-generation style models.""" lines = [] for m in messages: txt = (m.get("text") or "").strip() if not txt: continue lines.append(("Tutor" if m.get("sender") == "assistant" else "User") + f": {txt}") return "\n".join(lines) def _history_as_chat_messages(messages: list[dict]) -> list[dict]: """Convert history to chat-completion style messages.""" msgs = [{"role": "system", "content": TUTOR_PROMPT}] for m in messages: txt = (m.get("text") or "").strip() if not txt: continue role = "assistant" if m.get("sender") == "assistant" else "user" msgs.append({"role": role, "content": txt}) return msgs def _extract_chat_text(chat_resp) -> str: """Extract text from HF chat response.""" try: return chat_resp.choices[0].message["content"] if isinstance( chat_resp.choices[0].message, dict ) else chat_resp.choices[0].message.content except Exception: try: return chat_resp["choices"][0]["message"]["content"] except Exception: return str(chat_resp) # ------------------------------- # Reply logic # ------------------------------- def _reply_with_hf(): if "client" not in globals(): raise RuntimeError("HF client not initialized") try: # 1) Prefer chat API msgs = _history_as_chat_messages(st.session_state.get("messages", [])) chat = client.chat.completions.create( model=GEN_MODEL, messages=msgs, max_tokens=300, # give enough room temperature=0.2, top_p=0.9, ) return _extract_chat_text(chat).strip() except ValueError as ve: # 2) Fallback to text-generation if chat unsupported if "Supported task: text-generation" in str(ve): convo = _format_history_for_flan(st.session_state.get("messages", [])) tg_prompt = f"{TUTOR_PROMPT}\n\n{convo}\n\nTutor:" resp = client.text_generation( tg_prompt, max_new_tokens=300, temperature=0.2, top_p=0.9, repetition_penalty=1.1, return_full_text=True, stream=False, ) return (resp.get("generated_text") if isinstance(resp, dict) else resp).strip() raise # rethrow anything else except Exception as e: err_text = ''.join(traceback.format_exception_only(type(e), e)).strip() raise RuntimeError(f"Hugging Face API Error: {err_text}") # ------------------------------- # Session message helper # ------------------------------- def add_message(text: str, sender: str): if "messages" not in st.session_state: st.session_state.messages = [] st.session_state.messages.append( { "id": str(datetime.datetime.now().timestamp()), "text": text, "sender": sender, "timestamp": datetime.datetime.now() } ) def _coerce_ts(ts): if isinstance(ts, datetime.datetime): return ts if isinstance(ts, (int, float)): try: return datetime.datetime.fromtimestamp(ts) except Exception: return None if isinstance(ts, str): # Try ISO 8601 first; fall back to float epoch try: return datetime.datetime.fromisoformat(ts) except Exception: try: return datetime.datetime.fromtimestamp(float(ts)) except Exception: return None return None def _normalize_messages(): msgs = st.session_state.get("messages", []) normed = [] now = datetime.datetime.now() for m in msgs: text = (m.get("text") or "").strip() sender = m.get("sender") or "user" ts = _coerce_ts(m.get("timestamp")) or now normed.append({**m, "text": text, "sender": sender, "timestamp": ts}) st.session_state.messages = normed # ------------------------------- # Streamlit page # ------------------------------- def show_page(): st.title("🤖 AI Financial Tutor") st.caption("Get personalized help with your financial questions") if "messages" not in st.session_state: st.session_state.messages = [{ "id": "1", "text": "Hi! I'm your AI Financial Tutor. What would you like to learn today?", "sender": "assistant", "timestamp": datetime.datetime.now() }] if "is_typing" not in st.session_state: st.session_state.is_typing = False _normalize_messages() chat_container = st.container() with chat_container: for msg in st.session_state.messages: time_str = msg["timestamp"].strftime("%H:%M") if hasattr(msg["timestamp"], "strftime") else datetime.datetime.now().strftime("%H:%M") bubble = ( f"