assistant_ai / app.py
AakashJammula's picture
Update app.py
86c6189 verified
from langchain_core.messages import HumanMessage, SystemMessage
from langgraph.graph import MessagesState
from langgraph.graph import StateGraph, START, END
from langchain_tavily import TavilySearch
from langchain_experimental.utilities import PythonREPL
from langgraph.graph import MessagesState
from langchain_core.tools import Tool
from langgraph.prebuilt import tools_condition, ToolNode
from fastapi import FastAPI
from fastapi.responses import JSONResponse, HTMLResponse
from pydantic import BaseModel
from langgraph.checkpoint.memory import MemorySaver
import os
from langchain_google_genai import ChatGoogleGenerativeAI
app = FastAPI()
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0.5)
# Tavily Web Search Tool
search_tool = TavilySearch()
# Calculator Tool (simple math)
calculator_tool = Tool.from_function(
name="Calculator",
func=lambda x: str(eval(x)),
description="Performs basic arithmetic operations like add, subtract, multiply, divide."
)
# Python REPL Tool (advanced logic/math)
python_repl = PythonREPL()
python_tool = Tool.from_function(
name="PythonREPL",
func=python_repl.run,
description="Executes advanced Python code like loops, conditionals, etc."
)
# Combine all tools
tools = [search_tool, calculator_tool, python_tool]
llm_with_tools = llm.bind_tools(tools)
class State(MessagesState):
prompt_enhanced: str
def prompt_enhancer(state: State) -> State:
messages = state.get("messages", [])
last = messages[-1]
enhancer_system = SystemMessage(content=(
"You are PromptEnhancer (aka Jarvis), a smart, friendly assistant helping user. "
"Your job is to turn the user's raw request into a minimal JSON object with two fields:\n"
" • tools: a list of tool names to invoke\n"
" • action: a concise description of what to do\n\n"
"Available tools:\n"
" - search_tool = TavilySearch()\n"
" - calculator = Tool.from_function(name='Calculator', func=lambda x: str(eval(x)), description='Basic arithmetic')\n"
" - python_repl = PythonREPL()\n"
" - python_tool = Tool.from_function(name='PythonREPL', func=python_repl.run, description='Run Python code')\n\n"
"use multiple tools if needed, and make sure to include the action field. "
"if time is a factor, use the search tool to find the answer. "
"Output the raw JSON object exactly as-is, without any markdown or code fences, and no extra text."
))
enhanced = llm.invoke([enhancer_system] + [last])
state["prompt_enhanced"] = enhanced.content
return state
def assistant(state: State) -> State:
messages = state.get("messages", [])
thinking = state.get("prompt_enhanced", None)
sys_msg = SystemMessage(content=(
"You are Jarvis, a smart and friendly personal AI assistant helping user. "
"Your primary functions are helping with math, coding, and general questions. "
"For simple arithmetic, please use the Calculator Tool. "
"For tasks involving complex logic, loops, or functions, utilize the Python Tool. "
"To find answers about current events or real-world topics or news or weather or learning a new topic, use the Search Tool. "
"Always provide a brief explanation for your approach. "
"here is the JSON object you received from PromptEnhancer:\n\n"
f"{thinking}\n\n"
"this json object contains two fields: tools and action. "
"The tools field is a list of tool names to invoke, and the action field is a concise description of what to do. "
"Strive to be concise, accurate, and polite in all your responses. "
"VERY IMPORTANT: Deliver all responses strictly as plain text sentences. You must avoid using bullet points, lists, bolding, italics, or any similar special formatting."
))
if not messages:
return state
response = llm_with_tools.invoke([sys_msg] + messages)
state["messages"] = state["messages"] + [response]
return state
# Build Graph
builder = StateGraph(MessagesState)
builder.add_node("prompt_enhancer", prompt_enhancer)
builder.add_node("assistant", assistant)
builder.add_node("tools", ToolNode(tools))
# first run the enhancer
builder.add_edge(START, "prompt_enhancer")
builder.add_edge("prompt_enhancer", "assistant")
builder.add_conditional_edges("assistant", tools_condition)
builder.add_edge("tools", "assistant")
memory = MemorySaver()
react_graph = builder.compile(checkpointer=memory)
class ChatInput(BaseModel):
message: str
# Serve the static HTML file
@app.get("/", response_class=HTMLResponse)
async def get_index():
with open("index.html") as f:
return f.read()
# Health check endpoint
@app.get("/health")
async def health_check():
return {"status": "healthy"}
# The chat endpoint
@app.post("/chat")
async def chat(input: ChatInput):
config = {"configurable": {"thread_id": "1"}}
inputs = {"messages": [HumanMessage(content=input.message)]}
resp = react_graph.invoke(inputs, config)
last = resp.get("messages", [])[-1]
return JSONResponse({"response": last.content})
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=7860)