sampsong commited on
Commit
72908c3
·
1 Parent(s): cf41d5a

test websearch

Browse files
Files changed (2) hide show
  1. Agents/agent.py +4 -4
  2. app.py +7 -9
Agents/agent.py CHANGED
@@ -218,8 +218,8 @@ def build_graph(provider: str="groq"):
218
  print(f"Running llm groq")
219
  llm=ChatGroq(groq_api_key=GROQ_API_KEY,model="qwen/qwen3-32b",temperature=0)
220
  #webreturn = webSearch.invoke({"searchQuery":"principle of double effect Wikipedia page history St. Thomas Aquinas image added date"})
221
- multiple = str(multiply.invoke({"a": 2, "b": 3}))
222
- print(f"WebReturn : {multiple}")
223
  elif provider == "huggingface":
224
  print(f"huggingface")
225
  llm = ChatHuggingFace(
@@ -239,9 +239,9 @@ def build_graph(provider: str="groq"):
239
 
240
  def assistant(state: MessagesState):
241
  msg = state["messages"]
242
- print(f"invoke llmwithtools with state message = {msg}")
243
  msgbind = llmWithTools.invoke(state["messages"])
244
- print(f"msgBind{msgbind}")
245
  return {"messages": [llmWithTools.invoke(state["messages"])]}
246
  '''
247
  def retriever(state: MessagesState):
 
218
  print(f"Running llm groq")
219
  llm=ChatGroq(groq_api_key=GROQ_API_KEY,model="qwen/qwen3-32b",temperature=0)
220
  #webreturn = webSearch.invoke({"searchQuery":"principle of double effect Wikipedia page history St. Thomas Aquinas image added date"})
221
+ #multiple = str(multiply.invoke({"a": 2, "b": 3}))
222
+ #print(f"WebReturn : {multiple}")
223
  elif provider == "huggingface":
224
  print(f"huggingface")
225
  llm = ChatHuggingFace(
 
239
 
240
  def assistant(state: MessagesState):
241
  msg = state["messages"]
242
+ #print(f"invoke llmwithtools with state message = {msg}")
243
  msgbind = llmWithTools.invoke(state["messages"])
244
+ #print(f"msgBind{msgbind}")
245
  return {"messages": [llmWithTools.invoke(state["messages"])]}
246
  '''
247
  def retriever(state: MessagesState):
app.py CHANGED
@@ -52,15 +52,14 @@ class BasicAgent:
52
  print(f"Message = {messages}")
53
  messages = self.graph.invoke({"messages": formattedMessage},config={"callbacks": [langfuse_handler]})
54
 
55
- self.graph.get_graph().draw_mermaid_png()
56
  answer = messages['messages'][-1].content
57
- tools_calls = messages[-1].tool_calls
58
- print(f"tool calls : {str(tools_calls)}")
59
- print(messages[-1].__dict__)
60
- print(f"full answer : {answer}")
61
- print("Assistant Output:", messages[-1])
62
-
63
- return answer[14:]
64
 
65
  def run_and_submit_all( profile: gr.OAuthProfile | None):
66
  """
@@ -114,7 +113,6 @@ def run_and_submit_all( profile: gr.OAuthProfile | None):
114
  results_df = pd.DataFrame([])
115
  return status_message, results_df
116
  else:
117
-
118
  # 2. Fetch Questions
119
  print(f"Fetching questions from: {questions_url}")
120
  try:
 
52
  print(f"Message = {messages}")
53
  messages = self.graph.invoke({"messages": formattedMessage},config={"callbacks": [langfuse_handler]})
54
 
55
+ #self.graph.get_graph().draw_mermaid_png()
56
  answer = messages['messages'][-1].content
57
+ #tools_calls = messages[-1].tool_calls
58
+ #print(f"tool calls : {str(tools_calls)}")
59
+ #print(messages[-1].__dict__)
60
+ #print(f"full answer : {answer}")
61
+ #print("Assistant Output:", messages[-1])
62
+ return answer[50:]
 
63
 
64
  def run_and_submit_all( profile: gr.OAuthProfile | None):
65
  """
 
113
  results_df = pd.DataFrame([])
114
  return status_message, results_df
115
  else:
 
116
  # 2. Fetch Questions
117
  print(f"Fetching questions from: {questions_url}")
118
  try: