PD03 commited on
Commit
77f67f6
·
verified ·
1 Parent(s): 6c96152

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +43 -6
app.py CHANGED
@@ -197,6 +197,34 @@ After calling a tool, I will provide you with the results to interpret for the u
197
 
198
  return tool_calls
199
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
  def process_message(self, user_message: str) -> Tuple[str, str]:
201
  """Process user message and handle tool calls"""
202
  tool_info = ""
@@ -213,7 +241,7 @@ After calling a tool, I will provide you with the results to interpret for the u
213
  model="gpt-3.5-turbo",
214
  messages=messages,
215
  temperature=0.7,
216
- max_tokens=1000
217
  )
218
  ai_response = response.choices[0].message.content
219
  else:
@@ -222,7 +250,7 @@ After calling a tool, I will provide you with the results to interpret for the u
222
  model="gpt-3.5-turbo",
223
  messages=messages,
224
  temperature=0.7,
225
- max_tokens=1000
226
  )
227
  ai_response = response.choices[0].message.content
228
  tool_calls = self.extract_tool_calls(ai_response)
@@ -243,9 +271,12 @@ After calling a tool, I will provide you with the results to interpret for the u
243
  tool_call['arguments']
244
  )
245
 
 
 
 
246
  tool_results.append({
247
  'tool': tool_call['name'],
248
- 'result': result
249
  })
250
 
251
  if result.get('success'):
@@ -253,8 +284,9 @@ After calling a tool, I will provide you with the results to interpret for the u
253
  else:
254
  tool_info += f"❌ {tool_call['name']} failed: {result.get('error', 'Unknown error')}\n"
255
 
 
256
  tool_results_text = "\n\n".join([
257
- f"Tool: {tr['tool']}\nResult: {json.dumps(tr['result'], indent=2)}"
258
  for tr in tool_results
259
  ])
260
 
@@ -269,7 +301,7 @@ After calling a tool, I will provide you with the results to interpret for the u
269
  model="gpt-3.5-turbo",
270
  messages=final_messages,
271
  temperature=0.7,
272
- max_tokens=1000
273
  )
274
  return final_response.choices[0].message.content, tool_info
275
  else:
@@ -277,7 +309,7 @@ After calling a tool, I will provide you with the results to interpret for the u
277
  model="gpt-3.5-turbo",
278
  messages=final_messages,
279
  temperature=0.7,
280
- max_tokens=1000
281
  )
282
  return final_response.choices[0].message.content, tool_info
283
  else:
@@ -362,6 +394,11 @@ def chat_interface(message, history, openai_key, mcp_url):
362
  try:
363
  print(f"Calling process_message with: {message}")
364
 
 
 
 
 
 
365
  # Make sure we call the synchronous method
366
  result = assistant.process_message(message)
367
  print(f"process_message returned: {type(result)} - {result}")
 
197
 
198
  return tool_calls
199
 
200
+ def truncate_tool_result(self, result: Dict[str, Any], max_chars: int = 2000) -> Dict[str, Any]:
201
+ """Truncate tool results to prevent context overflow"""
202
+ if not isinstance(result, dict):
203
+ return result
204
+
205
+ result_copy = result.copy()
206
+ result_str = json.dumps(result_copy, indent=2)
207
+
208
+ if len(result_str) > max_chars:
209
+ # Try to truncate data arrays/lists first
210
+ for key, value in result_copy.items():
211
+ if isinstance(value, list) and len(value) > 3:
212
+ result_copy[key] = value[:3] + [f"... ({len(value) - 3} more items truncated)"]
213
+ elif isinstance(value, str) and len(value) > 500:
214
+ result_copy[key] = value[:500] + "... (truncated)"
215
+
216
+ # If still too long, add truncation notice
217
+ result_str = json.dumps(result_copy, indent=2)
218
+ if len(result_str) > max_chars:
219
+ result_copy = {
220
+ "success": result.get("success", False),
221
+ "truncated": True,
222
+ "message": f"Result truncated due to size. Original had {len(result_str)} characters.",
223
+ "sample_data": str(result)[:1000] + "..." if len(str(result)) > 1000 else str(result)
224
+ }
225
+
226
+ return result_copy
227
+
228
  def process_message(self, user_message: str) -> Tuple[str, str]:
229
  """Process user message and handle tool calls"""
230
  tool_info = ""
 
241
  model="gpt-3.5-turbo",
242
  messages=messages,
243
  temperature=0.7,
244
+ max_tokens=800 # Reduced to leave more room for context
245
  )
246
  ai_response = response.choices[0].message.content
247
  else:
 
250
  model="gpt-3.5-turbo",
251
  messages=messages,
252
  temperature=0.7,
253
+ max_tokens=800
254
  )
255
  ai_response = response.choices[0].message.content
256
  tool_calls = self.extract_tool_calls(ai_response)
 
271
  tool_call['arguments']
272
  )
273
 
274
+ # Truncate large results to prevent context overflow
275
+ truncated_result = self.truncate_tool_result(result)
276
+
277
  tool_results.append({
278
  'tool': tool_call['name'],
279
+ 'result': truncated_result
280
  })
281
 
282
  if result.get('success'):
 
284
  else:
285
  tool_info += f"❌ {tool_call['name']} failed: {result.get('error', 'Unknown error')}\n"
286
 
287
+ # Create concise tool results summary
288
  tool_results_text = "\n\n".join([
289
+ f"Tool: {tr['tool']}\nResult: {json.dumps(tr['result'], indent=2)[:1500]}{'...(truncated)' if len(json.dumps(tr['result'], indent=2)) > 1500 else ''}"
290
  for tr in tool_results
291
  ])
292
 
 
301
  model="gpt-3.5-turbo",
302
  messages=final_messages,
303
  temperature=0.7,
304
+ max_tokens=800 # Reduced max tokens
305
  )
306
  return final_response.choices[0].message.content, tool_info
307
  else:
 
309
  model="gpt-3.5-turbo",
310
  messages=final_messages,
311
  temperature=0.7,
312
+ max_tokens=800
313
  )
314
  return final_response.choices[0].message.content, tool_info
315
  else:
 
394
  try:
395
  print(f"Calling process_message with: {message}")
396
 
397
+ # Limit conversation history to prevent context overflow
398
+ # Keep only the last 5 exchanges (10 messages total)
399
+ if len(history) > 10:
400
+ history = history[-10:]
401
+
402
  # Make sure we call the synchronous method
403
  result = assistant.process_message(message)
404
  print(f"process_message returned: {type(result)} - {result}")