Spaces:
Runtime error
Runtime error
| """Finance assistant: LLM-backed expense parsing and ledger actions.""" | |
| import json | |
| import re | |
| import logging | |
| from datetime import datetime | |
| from typing import Generator | |
| from huggingface_hub import InferenceClient | |
| from ledger import Ledger | |
| logger = logging.getLogger(__name__) | |
| MODEL = "openai/gpt-oss-20b" | |
| SYSTEM = """\ | |
| You are a personal finance assistant. Help the user log expenses, query spending summaries, and manage their ledger. | |
| When the user describes an expense, extract it and include a JSON action block in your response: | |
| ```json | |
| {"action": "add", "date": "YYYY-MM-DD", "description": "...", "category": "Food|Transport|Utilities|Entertainment|Health|Shopping|Rent|Other", "amount": 0.00} | |
| ``` | |
| When the user wants to undo or delete the last entry: | |
| ```json | |
| {"action": "delete_last"} | |
| ``` | |
| Use today's date if none is given. Keep replies brief and friendly. | |
| If the user asks about their spending, use the ledger context below to answer accurately. | |
| If no ledger action is needed, just respond conversationally β no JSON block.""" | |
| # ββ context & parsing βββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| def _ledger_context(ledger: Ledger) -> str: | |
| if ledger.df.empty: | |
| return "Ledger is empty." | |
| total = ledger.total() | |
| by_cat = ledger.by_category() | |
| cat_str = " | ".join( | |
| f"{k} ${v:.2f}" for k, v in sorted(by_cat.items(), key=lambda x: -x[1]) | |
| ) | |
| recent = ledger.recent(5).to_string(index=False) | |
| return f"Total: ${total:.2f} | {cat_str}\nRecent entries:\n{recent}" | |
| def _parse_action(text: str) -> dict | None: | |
| m = re.search(r"```json\s*(\{.*?\})\s*```", text, re.DOTALL) | |
| if m: | |
| try: | |
| return json.loads(m.group(1)) | |
| except json.JSONDecodeError: | |
| pass | |
| return None | |
| def _clean(text: str) -> str: | |
| """Strip JSON action blocks from visible reply.""" | |
| return re.sub(r"```json.*?```", "", text, flags=re.DOTALL).strip() | |
| def _build_messages(message: str, history: list[dict], ledger: Ledger) -> list[dict]: | |
| system = SYSTEM + "\n\nCurrent ledger:\n" + _ledger_context(ledger) | |
| return [{"role": "system", "content": system}] + history + [{"role": "user", "content": message}] | |
| # ββ actions βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| def execute(action: dict, ledger: Ledger, fallback_desc: str = "") -> str: | |
| """Run a parsed action against the ledger. Returns a confirmation string.""" | |
| if action.get("action") == "add": | |
| ok = ledger.add( | |
| date=action.get("date", datetime.now().strftime("%Y-%m-%d")), | |
| description=action.get("description", fallback_desc), | |
| category=action.get("category", "Other"), | |
| amount=float(action.get("amount", 0)), | |
| ) | |
| if ok: | |
| return f"β Logged **{action.get('category')}** β ${float(action.get('amount', 0)):.2f}" | |
| return "β Failed to save entry." | |
| if action.get("action") == "delete_last": | |
| return "ποΈ Last entry removed." if ledger.delete_last() else "Nothing to delete." | |
| return "" | |
| # ββ inference βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| def stream_response( | |
| message: str, history: list[dict], ledger: Ledger, token: str | |
| ) -> Generator[tuple[str, dict | None], None, None]: | |
| """ | |
| Yields (partial_reply, action) tuples. | |
| action is None on all intermediate yields; populated only on the final yield. | |
| """ | |
| client = InferenceClient(token=token, model=MODEL) | |
| messages = _build_messages(message, history, ledger) | |
| accumulated = "" | |
| for chunk in client.chat_completion(messages, max_tokens=512, stream=True, temperature=0.2): | |
| if chunk.choices and chunk.choices[0].delta.content: | |
| accumulated += chunk.choices[0].delta.content | |
| yield _clean(accumulated), None | |
| yield _clean(accumulated), _parse_action(accumulated) | |
| def batch_response( | |
| message: str, history: list[dict], ledger: Ledger, token: str | |
| ) -> tuple[str, dict | None]: | |
| """Synchronous single-call variant used by the Telegram bot.""" | |
| client = InferenceClient(token=token, model=MODEL) | |
| messages = _build_messages(message, history, ledger) | |
| raw = client.chat_completion(messages, max_tokens=512, temperature=0.2).choices[0].message.content | |
| return _clean(raw), _parse_action(raw) | |