harry-lu-0708 commited on
Commit
8a3c981
·
1 Parent(s): c6ec446

added claude api dependency

Browse files
requirements-space.txt CHANGED
@@ -3,6 +3,7 @@
3
  # They are removed from this list as they are not available on PyPI
4
 
5
  beautifulsoup4>=4.14.2
 
6
  ddgs>=9.6.1
7
  feedparser>=6.0.12
8
  filetype>=1.2.0
 
3
  # They are removed from this list as they are not available on PyPI
4
 
5
  beautifulsoup4>=4.14.2
6
+ claude-agent-sdk>=0.1.35
7
  ddgs>=9.6.1
8
  feedparser>=6.0.12
9
  filetype>=1.2.0
scievo/agents/experiment_agent/exec_subagent/execute.py CHANGED
@@ -282,24 +282,55 @@ def summary_node(agent_state: ExecAgentState) -> ExecAgentState:
282
  agent_state.add_message(summary_prompt)
283
 
284
  # Get summary from LLM
285
- msg = ModelRegistry.completion(
286
- LLM_NAME,
287
- agent_state.patched_history,
288
- system_prompt=(
289
- Message(
290
- role="system",
291
- content=PROMPTS.experiment_exec.summary_system_prompt.render(),
292
- )
293
- .with_log(cond=constant.LOG_SYSTEM_PROMPT)
294
- .content
295
- ),
296
- agent_sender=AGENT_NAME,
297
- tools=None, # No tools needed for summary
298
- ).with_log()
 
299
 
300
- # Store the summary text
301
- agent_state.execution_summary = msg.content or ""
302
- agent_state.add_message(msg)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
303
 
304
  # Parse JSON summary from the response
305
  try:
 
282
  agent_state.add_message(summary_prompt)
283
 
284
  # Get summary from LLM
285
+ try:
286
+ msg = ModelRegistry.completion(
287
+ LLM_NAME,
288
+ agent_state.patched_history,
289
+ system_prompt=(
290
+ Message(
291
+ role="system",
292
+ content=PROMPTS.experiment_exec.summary_system_prompt.render(),
293
+ )
294
+ .with_log(cond=constant.LOG_SYSTEM_PROMPT)
295
+ .content
296
+ ),
297
+ agent_sender=AGENT_NAME,
298
+ tools=None, # No tools needed for summary
299
+ ).with_log()
300
 
301
+ # Store the summary text
302
+ agent_state.execution_summary = msg.content or ""
303
+ agent_state.add_message(msg)
304
+ except Exception as e:
305
+ logger.exception("Failed to generate execution summary")
306
+ # Create a fallback summary based on execution history
307
+ fallback_summary = (
308
+ "Execution completed. Unable to generate detailed summary due to API error."
309
+ )
310
+ if agent_state.history:
311
+ # Try to extract information from history
312
+ last_messages = agent_state.history[-5:]
313
+ execution_info = []
314
+ for msg in last_messages:
315
+ if msg.role == "tool" and msg.content:
316
+ execution_info.append(f"Tool output: {msg.content[:200]}")
317
+ elif msg.role == "assistant" and msg.content:
318
+ execution_info.append(f"Assistant: {msg.content[:200]}")
319
+ if execution_info:
320
+ fallback_summary = (
321
+ "Execution completed. Summary generation failed, but execution appears to have run.\n\n"
322
+ + "\n".join(execution_info[:3])
323
+ )
324
+
325
+ agent_state.execution_summary = fallback_summary
326
+ agent_state.add_message(
327
+ Message(
328
+ role="assistant",
329
+ content=fallback_summary,
330
+ agent_sender=AGENT_NAME,
331
+ ).with_log()
332
+ )
333
+ logger.warning(f"Using fallback summary due to error: {e}")
334
 
335
  # Parse JSON summary from the response
336
  try:
scievo/core/llms.py CHANGED
@@ -238,7 +238,18 @@ class ModelRegistry:
238
 
239
  response = ll_completion(**params)
240
  if response.choices is None or len(response.choices) == 0:
241
- raise ZeroChoiceError("No choices returned from completion API")
 
 
 
 
 
 
 
 
 
 
 
242
  msg: Message = Message.from_ll_message(response.choices[0].message) # type: ignore
243
  usage: Usage = response.usage # type: ignore
244
  msg.llm_sender = name
 
238
 
239
  response = ll_completion(**params)
240
  if response.choices is None or len(response.choices) == 0:
241
+ # Log detailed error information for debugging
242
+ logger.error(f"ZeroChoiceError for model '{name}': No choices returned")
243
+ logger.error(f"Model params: {model_params}")
244
+ logger.error(f"Response object: {response}")
245
+ if hasattr(response, "error"):
246
+ logger.error(f"API error: {response.error}")
247
+ if hasattr(response, "model"):
248
+ logger.error(f"Model used: {response.model}")
249
+ raise ZeroChoiceError(
250
+ f"No choices returned from completion API for model '{name}'. "
251
+ f"This may indicate an API key issue, rate limit, or model unavailability."
252
+ )
253
  msg: Message = Message.from_ll_message(response.choices[0].message) # type: ignore
254
  usage: Usage = response.usage # type: ignore
255
  msg.llm_sender = name