Sathwik3's picture
por t from 8000 to 7860
c8f3f28 verified
from langchain_tavily import TavilySearch
from fastapi import FastAPI
import os
from langchain.chat_models import init_chat_model
from langgraph.prebuilt import create_react_agent
from langgraph.checkpoint.memory import MemorySaver
from pydantic import BaseModel
from typing import Optional
from langchain_core.messages import HumanMessage
os.environ["GOOGLE_API_KEY"]=os.getenv("GOOGLE_API_KEY")
if not os.environ.get("GOOGLE_API_KEY"):
raise ValueError("Google API key not found. Please set the GOOGLE_API_KEY environment variable.")
os.environ["TAVILY_API_KEY"]=os.getenv("TAVILY_API_KEY")
if not os.environ.get("TAVILY_API_KEY"):
raise ValueError("Tavily API key not found. Please set the TAVILY_API_KEY environment variable.")
search=TavilySearch(max_results=2)
tools=[search]
llm=init_chat_model(model="gemini-2.5-flash",model_provider="google_genai")
memory=MemorySaver()
agent_executor = create_react_agent(llm,tools,checkpointer=memory)
class UserRequest(BaseModel):
message:str
thread_id:Optional[str]="default_thread" #Use a default thread_id if none is provided
app = FastAPI(
title="Agent with tavily server",
description="An API server for a RAG agent built with LangGraph"
)
@app.post("/invoke")
async def invoke_agent(request: UserRequest):
#Set up the configuration for memory
config={"configurable":{"thread_id":request.thread_id}}
input=request.message
response=agent_executor.invoke({"messages":[HumanMessage(input)]},config=config)
return {"response":response["messages"][-1]}
#This part is for local testing, can be removed if using a production server
if __name__=="__main__":
import uvicorn
uvicorn.run(app,host="0.0.0.0",port=7860)