Spaces:
Sleeping
Sleeping
| import os | |
| import re | |
| import json | |
| from typing import Optional | |
| import pandas as pd | |
| from sqlalchemy import engine | |
| from langchain_groq import ChatGroq | |
| from langchain_core.messages import BaseMessage, SystemMessage | |
| from langchain.utilities import SQLDatabase | |
| from langchain.agents import sqlagent | |
| from langchain.memory import Buffer | |
| GROQ_API = os.environ.get("GROQ_API") | |
| if not GROQ_API: | |
| raise ValueError("Add GROQ_API as an environment variable.") | |
| model_name = "llama-3.3-70b-versatile" | |
| llm = ChatGroq( | |
| model=model_name, | |
| temperature=0.0, | |
| max_tokens=512, | |
| api_key=GROQ_API | |
| ) | |
| db_path = "customer_orders.db" | |
| db = SQLDatabase.from_uri(f"sqlite:///{db_path}") | |
| sql_agent = sqlagent( | |
| llm=llm, | |
| db=db, | |
| verbose=False | |
| ) | |
| memory = Buffer( | |
| memory_key="chat_history", | |
| return_messages=True | |
| ) | |
| def findorder(text: str) -> Optional[str]: | |
| """Extract order id pattern (e.g. O12345) from user text.""" | |
| m = re.search(r"\b([Oo]\d{3,})\b", text) | |
| return m.group(1) if m else None | |
| def llmgenerate(messages): | |
| """Call LLM safely, fallback if needed.""" | |
| try: | |
| res = llm.generate([messages]) | |
| text = res.generations[0][0].text | |
| except Exception: | |
| resp = llm.invoke(messages) | |
| try: | |
| text = resp.content | |
| except Exception: | |
| text = str(resp) | |
| return text.strip() | |
| def orderqtool(orderid: str) -> str: | |
| """Fetch order data using SQL agent (or fallback to raw DB).""" | |
| if not orderid: | |
| return "ERROR: No orderid provided." | |
| try: | |
| query = f"SELECT * FROM orders WHERE orderid = '{orderid}';" | |
| raw_response = sql_agent.run(query) | |
| raw_text = str(raw_response) | |
| except Exception as e: | |
| try: | |
| rows = db.run(f"SELECT * FROM orders WHERE orderid = '{orderid}'") | |
| raw_text = json.dumps(rows, default=str, indent=2) | |
| except Exception as e2: | |
| raw_text = f"ERROR fetching order {orderid}: {e} / fallback: {e2}" | |
| return raw_text | |
| def answer_tool(raw_order_context: str, user_question: str) -> str: | |
| """convert the raw order information into a polite.""" | |
| system_prompt = SystemMessage( | |
| content=( | |
| "You are a polite" | |
| "don't reveal sensitive details" | |
| ) | |
| ) | |
| hprompt = BaseMessage( | |
| content=( | |
| f"Order context (raw):\n{raw_order_context}\n\n" | |
| f"Customer question:\n{user_question}\n\n" | |
| "Instructions:\n" | |
| "1) Answer in 2-4 sentences.\n" | |
| "2) If not found, ask politely for order id.\n" | |
| ) | |
| ) | |
| return llmgenerate([system_prompt, hprompt]) | |
| def chatagent(uinput: str) -> dict: | |
| """ | |
| Main agent entrypoint called by app.py | |
| Returns a dict with keys: uinput, orderid, mycontext, answer | |
| """ | |
| result = { | |
| "uinput": uinput, | |
| "orderid": None, | |
| "mycontext": None, | |
| "answer": None, | |
| } | |
| chat_history = memory.load_memory_variables({}).get("chat_history", []) | |
| orderid = findorder(uinput) | |
| result["orderid"] = orderid | |
| if orderid: | |
| mycontext = orderqtool(orderid) | |
| result["mycontext"] = mycontext | |
| answer = answer_tool(mycontext, uinput) | |
| result["answer"] = answer | |
| else: | |
| guardrail = SystemMessage( | |
| content=( | |
| "You are a food delivery assistant that helps users while strictly avoiding requests for sensitive information. " | |
| ) | |
| ) | |
| answer = llmgenerate([guardrail, *chat_history, BaseMessage(content=uinput)]) | |
| result["answer"] = answer | |
| memory.save_context({"input": uinput}, {"output": result["answer"]}) | |
| return result | |