T-K-O-H commited on
Commit
c6501a9
·
1 Parent(s): 7cbc944

use langchain directly

Browse files
Files changed (1) hide show
  1. app.py +72 -20
app.py CHANGED
@@ -15,11 +15,8 @@ from langchain_core.tools import tool
15
  from langchain.agents import AgentExecutor, create_openai_functions_agent
16
  from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
17
  from langchain.schema import SystemMessage
18
-
19
- # LangGraph
20
- from langgraph.graph import END, StateGraph
21
- from langgraph.prebuilt import ToolNode
22
- from langgraph.prebuilt.chat_agent import create_agent_executor
23
 
24
  # LangChain
25
  from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage
@@ -28,10 +25,22 @@ from langchain_core.output_parsers import JsonOutputParser
28
  from langchain_core.tools import tool
29
  from langchain_openai import ChatOpenAI
30
 
31
- # Load environment variables from .env file
 
 
 
 
 
 
 
 
 
 
 
32
  load_dotenv()
33
 
34
  # Setup FastAPI app
 
35
  app = FastAPI()
36
 
37
 
@@ -45,6 +54,7 @@ class AgentState(TypedDict):
45
  @tool
46
  def search_web(query: str) -> str:
47
  """Search the web for information on a given query."""
 
48
  # In a real implementation, this would connect to a search API
49
  return f"Found information about {query}: This is a simulated web search result for '{query}'."
50
 
@@ -52,39 +62,67 @@ def search_web(query: str) -> str:
52
  @tool
53
  def calculate(expression: str) -> str:
54
  """Calculate the result of a mathematical expression."""
 
55
  try:
56
  # CAUTION: eval can be dangerous in production; consider using a safer alternative
57
  return f"Result: {eval(expression)}"
58
  except Exception as e:
 
59
  return f"Error calculating: {str(e)}"
60
 
61
 
62
  @tool
63
  def generate_image_prompt(description: str) -> str:
64
  """Generate a detailed prompt for image generation based on a description."""
 
65
  enhanced_prompt = f"An image of {description}, high resolution, detailed lighting, professional quality"
66
  return f"Generated image prompt: {enhanced_prompt}"
67
 
68
 
69
  # Create tools list
 
70
  tools = [search_web, calculate, generate_image_prompt]
71
 
72
  # Set up the language model
73
- model = ChatOpenAI(temperature=0.5)
 
 
 
 
 
 
74
 
75
  # Create the prompt template
76
- prompt = ChatPromptTemplate.from_messages([
77
- SystemMessage(content="You are a helpful AI assistant with access to tools. Use them when appropriate."),
78
- MessagesPlaceholder(variable_name="chat_history"),
79
- ("human", "{input}"),
80
- MessagesPlaceholder(variable_name="agent_scratchpad"),
81
- ])
 
 
 
 
 
 
82
 
83
  # Create the agent
84
- agent = create_openai_functions_agent(model, tools, prompt)
 
 
 
 
 
 
85
 
86
  # Create the agent executor
87
- agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
 
 
 
 
 
 
88
 
89
 
90
  # Define agent nodes
@@ -174,27 +212,41 @@ agent_executor = build_agent_graph()
174
  # WebSocket for real-time communication
175
  @app.websocket("/ws")
176
  async def websocket_endpoint(websocket: WebSocket):
 
177
  await websocket.accept()
178
  try:
179
  while True:
180
  data = await websocket.receive_text()
181
- # Process the message with the agent
182
- response = agent_executor.invoke({"input": data, "chat_history": []})
183
- await websocket.send_json({"type": "ai_message", "content": response["output"]})
 
 
 
 
 
 
184
  except Exception as e:
185
- print(f"Error in WebSocket: {str(e)}")
186
  await websocket.close()
187
 
188
 
189
  # Serve the HTML frontend
190
  @app.get("/")
191
  async def get():
 
192
  return FileResponse("index.html")
193
 
194
 
195
  # Mount static files
 
196
  app.mount("/static", StaticFiles(directory="static"), name="static")
197
 
198
 
199
  if __name__ == "__main__":
200
- uvicorn.run(app, host="0.0.0.0", port=8000)
 
 
 
 
 
 
15
  from langchain.agents import AgentExecutor, create_openai_functions_agent
16
  from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
17
  from langchain.schema import SystemMessage
18
+ import logging
19
+ import sys
 
 
 
20
 
21
  # LangChain
22
  from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage
 
25
  from langchain_core.tools import tool
26
  from langchain_openai import ChatOpenAI
27
 
28
+ # Configure logging
29
+ logging.basicConfig(
30
+ level=logging.INFO,
31
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
32
+ handlers=[
33
+ logging.StreamHandler(sys.stdout)
34
+ ]
35
+ )
36
+ logger = logging.getLogger(__name__)
37
+
38
+ # Load environment variables
39
+ logger.info("Loading environment variables...")
40
  load_dotenv()
41
 
42
  # Setup FastAPI app
43
+ logger.info("Initializing FastAPI application...")
44
  app = FastAPI()
45
 
46
 
 
54
  @tool
55
  def search_web(query: str) -> str:
56
  """Search the web for information on a given query."""
57
+ logger.info(f"Searching web for query: {query}")
58
  # In a real implementation, this would connect to a search API
59
  return f"Found information about {query}: This is a simulated web search result for '{query}'."
60
 
 
62
  @tool
63
  def calculate(expression: str) -> str:
64
  """Calculate the result of a mathematical expression."""
65
+ logger.info(f"Calculating expression: {expression}")
66
  try:
67
  # CAUTION: eval can be dangerous in production; consider using a safer alternative
68
  return f"Result: {eval(expression)}"
69
  except Exception as e:
70
+ logger.error(f"Error calculating expression: {str(e)}")
71
  return f"Error calculating: {str(e)}"
72
 
73
 
74
  @tool
75
  def generate_image_prompt(description: str) -> str:
76
  """Generate a detailed prompt for image generation based on a description."""
77
+ logger.info(f"Generating image prompt for: {description}")
78
  enhanced_prompt = f"An image of {description}, high resolution, detailed lighting, professional quality"
79
  return f"Generated image prompt: {enhanced_prompt}"
80
 
81
 
82
  # Create tools list
83
+ logger.info("Creating tools list...")
84
  tools = [search_web, calculate, generate_image_prompt]
85
 
86
  # Set up the language model
87
+ logger.info("Initializing ChatOpenAI model...")
88
+ try:
89
+ model = ChatOpenAI(temperature=0.5)
90
+ logger.info("ChatOpenAI model initialized successfully")
91
+ except Exception as e:
92
+ logger.error(f"Failed to initialize ChatOpenAI model: {str(e)}")
93
+ raise
94
 
95
  # Create the prompt template
96
+ logger.info("Creating prompt template...")
97
+ try:
98
+ prompt = ChatPromptTemplate.from_messages([
99
+ SystemMessage(content="You are a helpful AI assistant with access to tools. Use them when appropriate."),
100
+ MessagesPlaceholder(variable_name="chat_history"),
101
+ ("human", "{input}"),
102
+ MessagesPlaceholder(variable_name="agent_scratchpad"),
103
+ ])
104
+ logger.info("Prompt template created successfully")
105
+ except Exception as e:
106
+ logger.error(f"Failed to create prompt template: {str(e)}")
107
+ raise
108
 
109
  # Create the agent
110
+ logger.info("Creating OpenAI functions agent...")
111
+ try:
112
+ agent = create_openai_functions_agent(model, tools, prompt)
113
+ logger.info("Agent created successfully")
114
+ except Exception as e:
115
+ logger.error(f"Failed to create agent: {str(e)}")
116
+ raise
117
 
118
  # Create the agent executor
119
+ logger.info("Creating agent executor...")
120
+ try:
121
+ agent_executor = AgentExecutor(agent=agent, tools=tools, verbose=True)
122
+ logger.info("Agent executor created successfully")
123
+ except Exception as e:
124
+ logger.error(f"Failed to create agent executor: {str(e)}")
125
+ raise
126
 
127
 
128
  # Define agent nodes
 
212
  # WebSocket for real-time communication
213
  @app.websocket("/ws")
214
  async def websocket_endpoint(websocket: WebSocket):
215
+ logger.info("New WebSocket connection established")
216
  await websocket.accept()
217
  try:
218
  while True:
219
  data = await websocket.receive_text()
220
+ logger.info(f"Received message: {data}")
221
+ try:
222
+ # Process the message with the agent
223
+ response = agent_executor.invoke({"input": data, "chat_history": []})
224
+ logger.info(f"Agent response: {response['output']}")
225
+ await websocket.send_json({"type": "ai_message", "content": response["output"]})
226
+ except Exception as e:
227
+ logger.error(f"Error processing message: {str(e)}")
228
+ await websocket.send_json({"type": "error", "content": f"Error processing message: {str(e)}"})
229
  except Exception as e:
230
+ logger.error(f"WebSocket error: {str(e)}")
231
  await websocket.close()
232
 
233
 
234
  # Serve the HTML frontend
235
  @app.get("/")
236
  async def get():
237
+ logger.info("Serving index.html")
238
  return FileResponse("index.html")
239
 
240
 
241
  # Mount static files
242
+ logger.info("Mounting static files...")
243
  app.mount("/static", StaticFiles(directory="static"), name="static")
244
 
245
 
246
  if __name__ == "__main__":
247
+ logger.info("Starting uvicorn server...")
248
+ try:
249
+ uvicorn.run(app, host="0.0.0.0", port=8000)
250
+ except Exception as e:
251
+ logger.error(f"Failed to start uvicorn server: {str(e)}")
252
+ raise