akashraut commited on
Commit
4285ba2
·
verified ·
1 Parent(s): a9d5aed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -19
app.py CHANGED
@@ -4,7 +4,8 @@ import asyncio, json, os, sys
4
  from dotenv import load_dotenv
5
  from langchain_core.tools import Tool
6
  from langchain_groq import ChatGroq
7
- from langgraph.prebuilt import create_react_agent
 
8
 
9
  load_dotenv()
10
 
@@ -16,7 +17,7 @@ class MCPClient:
16
  self._lock = asyncio.Lock()
17
  self._cmd = [command] + args
18
  self._req_id = 0
19
-
20
  async def _send_request(self, method: str, params: dict = None) -> dict:
21
  async with self._lock:
22
  self._req_id += 1
@@ -26,28 +27,34 @@ class MCPClient:
26
  while line := await self.process.stdout.readline():
27
  response = json.loads(line)
28
  if response.get("id") == self._req_id:
29
- if "error" in response: raise RuntimeError(f"Server error: {response['error']}")
 
30
  return response["result"]
31
  raise ConnectionError("Server process closed unexpectedly.")
32
-
33
  async def get_tools(self) -> list[Tool]:
34
- self.process = await asyncio.create_subprocess_exec(*self._cmd, stdin=asyncio.subprocess.PIPE, stdout=asyncio.subprocess.PIPE)
 
 
 
 
35
  tool_schemas = await self._send_request("discover")
36
  return [
37
  Tool(
38
  name=s['name'],
39
  description=s['description'],
40
- func=None, # <-- CRITICAL FIX: Add func=None for async tools
41
  coroutine=self._create_tool_coro(s['name']),
42
  args_schema=s['args_schema']
43
  ) for s in tool_schemas
44
  ]
45
-
46
  def _create_tool_coro(self, tool_name: str):
47
  async def _tool_coro(tool_input):
48
  return await self._send_request("execute", {"tool_name": tool_name, "tool_args": tool_input})
49
  return _tool_coro
50
 
 
51
  # --- Global Agent Executor ---
52
  _agent_executor = None
53
 
@@ -55,34 +62,68 @@ async def get_agent_executor():
55
  """Initializes and returns the agent executor, ensuring it's a singleton."""
56
  global _agent_executor
57
  if _agent_executor is None:
58
- if not os.getenv("GROQ_API_KEY"): raise ValueError("GROQ_API_KEY secret not set.")
 
 
 
59
  client = MCPClient(command=sys.executable, args=["server.py"])
60
  tools = await client.get_tools()
61
- model = ChatGroq(model="openai/gpt-oss-20b")
62
- _agent_executor = create_react_agent(model, tools)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
63
  return _agent_executor
64
 
 
65
  # --- Gradio Chat Logic ---
66
  async def respond_to_chat(message: str, history: list):
67
- agent = await get_agent_executor()
68
- # The new 'messages' format expects a list of dicts. We convert the history.
69
- history_langchain_format = []
 
 
 
70
  for human, ai in history:
71
- history_langchain_format.append({"role": "user", "content": human})
72
- history_langchain_format.append({"role": "assistant", "content": ai})
73
- history_langchain_format.append({"role": "user", "content": message})
74
 
75
  try:
76
- response = await agent.ainvoke({"messages": history_langchain_format})
77
- return response['messages'][-1].content
 
 
 
 
 
78
  except Exception as e:
79
  print(f"ERROR: {e}", file=sys.stderr)
80
  return "Sorry, an error occurred while processing your request."
81
 
 
82
  # --- Gradio User Interface ---
83
  demo = gr.ChatInterface(
84
  fn=respond_to_chat,
85
- title="Gold & Silver AI forefast",
86
  description="Ask about live prices and future forecasts for gold and silver.",
87
  examples=["What's the price of silver today?", "Give me a 5-day forecast for gold."],
88
  )
 
4
  from dotenv import load_dotenv
5
  from langchain_core.tools import Tool
6
  from langchain_groq import ChatGroq
7
+ from langchain.agents import AgentExecutor, create_openai_functions_agent
8
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
9
 
10
  load_dotenv()
11
 
 
17
  self._lock = asyncio.Lock()
18
  self._cmd = [command] + args
19
  self._req_id = 0
20
+
21
  async def _send_request(self, method: str, params: dict = None) -> dict:
22
  async with self._lock:
23
  self._req_id += 1
 
27
  while line := await self.process.stdout.readline():
28
  response = json.loads(line)
29
  if response.get("id") == self._req_id:
30
+ if "error" in response:
31
+ raise RuntimeError(f"Server error: {response['error']}")
32
  return response["result"]
33
  raise ConnectionError("Server process closed unexpectedly.")
34
+
35
  async def get_tools(self) -> list[Tool]:
36
+ self.process = await asyncio.create_subprocess_exec(
37
+ *self._cmd,
38
+ stdin=asyncio.subprocess.PIPE,
39
+ stdout=asyncio.subprocess.PIPE
40
+ )
41
  tool_schemas = await self._send_request("discover")
42
  return [
43
  Tool(
44
  name=s['name'],
45
  description=s['description'],
46
+ func=None,
47
  coroutine=self._create_tool_coro(s['name']),
48
  args_schema=s['args_schema']
49
  ) for s in tool_schemas
50
  ]
51
+
52
  def _create_tool_coro(self, tool_name: str):
53
  async def _tool_coro(tool_input):
54
  return await self._send_request("execute", {"tool_name": tool_name, "tool_args": tool_input})
55
  return _tool_coro
56
 
57
+
58
  # --- Global Agent Executor ---
59
  _agent_executor = None
60
 
 
62
  """Initializes and returns the agent executor, ensuring it's a singleton."""
63
  global _agent_executor
64
  if _agent_executor is None:
65
+ if not os.getenv("GROQ_API_KEY"):
66
+ raise ValueError("GROQ_API_KEY secret not set.")
67
+
68
+ # Initialize MCP client and get tools
69
  client = MCPClient(command=sys.executable, args=["server.py"])
70
  tools = await client.get_tools()
71
+
72
+ # Initialize LLM
73
+ model = ChatGroq(model="llama3-groq-70b-8192-tool-use-preview", temperature=0)
74
+
75
+ # Create prompt template for OpenAI Functions agent
76
+ prompt = ChatPromptTemplate.from_messages([
77
+ ("system", "You are a helpful assistant that provides information about gold and silver prices and forecasts."),
78
+ MessagesPlaceholder(variable_name="chat_history", optional=True),
79
+ ("human", "{input}"),
80
+ MessagesPlaceholder(variable_name="agent_scratchpad"),
81
+ ])
82
+
83
+ # Create OpenAI Functions agent
84
+ agent = create_openai_functions_agent(model, tools, prompt)
85
+
86
+ # Create AgentExecutor
87
+ _agent_executor = AgentExecutor(
88
+ agent=agent,
89
+ tools=tools,
90
+ verbose=False,
91
+ handle_parsing_errors=True,
92
+ max_iterations=5
93
+ )
94
+
95
  return _agent_executor
96
 
97
+
98
  # --- Gradio Chat Logic ---
99
  async def respond_to_chat(message: str, history: list):
100
+ agent_executor = await get_agent_executor()
101
+
102
+ # Convert Gradio history to LangChain message format
103
+ from langchain_core.messages import HumanMessage, AIMessage
104
+
105
+ chat_history = []
106
  for human, ai in history:
107
+ chat_history.append(HumanMessage(content=human))
108
+ chat_history.append(AIMessage(content=ai))
 
109
 
110
  try:
111
+ # Invoke agent with input and chat history
112
+ response = await agent_executor.ainvoke({
113
+ "input": message,
114
+ "chat_history": chat_history
115
+ })
116
+ return response['output']
117
+
118
  except Exception as e:
119
  print(f"ERROR: {e}", file=sys.stderr)
120
  return "Sorry, an error occurred while processing your request."
121
 
122
+
123
  # --- Gradio User Interface ---
124
  demo = gr.ChatInterface(
125
  fn=respond_to_chat,
126
+ title="Gold & Silver AI Forecast",
127
  description="Ask about live prices and future forecasts for gold and silver.",
128
  examples=["What's the price of silver today?", "Give me a 5-day forecast for gold."],
129
  )