bool:
"""Determine if output should be returned without ReAct processing.
Args:
output: The raw LLM output string
Returns:
True if this output should be returned verbatim, False otherwise
"""
# Check for tool response markers that indicate this is a complete tool response
tool_response_indicators = [
'Perfect! ✅', # Booking confirmation start
'✅ Meeting', # Cancellation or meeting updates
'📅 **', # Date/time formatting
'Meeting ID:', # Meeting ID display
'🎥 Google Meet', # Google Meet link
'📧 Email invitations', # Email status
'
str:
"""Extract the clean verbatim response from the output.
Args:
output: The raw LLM output
Returns:
Cleaned response suitable for direct user display
"""
# If this looks like a tool response, try to extract just the tool output part
# Look for "Observation:" followed by the tool response
observation_pattern = r'Observation:\s*(.*?)(?=\n\nThought:|$)'
observation_match = re.search(observation_pattern, output, re.DOTALL)
if observation_match:
tool_response = observation_match.group(1).strip()
self.logger.info(f"Extracted tool response from Observation: {tool_response[:100]}...")
return tool_response
# Look for responses that start with common tool response patterns
response_start_patterns = [
r'(Perfect! ✅.*)',
r'(✅.*Meeting.*)',
r'(I need to reconnect.*)',
r'(Oops! You already have.*)', # Conflict messages
r'(I\'m having trouble.*)', # Error messages
]
for pattern in response_start_patterns:
match = re.search(pattern, output, re.DOTALL)
if match:
response = match.group(1).strip()
self.logger.info(f"Extracted response by pattern: {response[:100]}...")
return response
# Handle cases where the output is already clean (direct tool response)
if any(indicator in output for indicator in ['✅', '📅', '🎥', '📧', 'Meeting ID:']):
self.logger.info("Output appears to be clean tool response already")
return output.strip()
# Fallback: return the output as-is if we can't extract cleanly
self.logger.warning("Could not extract clean tool response, returning full output")
return output.strip()
def parse(self, output: str, is_streaming: bool = False) -> BaseReasoningStep:
"""Parse the LLM output, returning verbatim responses when appropriate.
Args:
output: Raw LLM output string
is_streaming: Whether this is part of a streaming response
Returns:
BaseReasoningStep - either verbatim ResponseReasoningStep or normal parsing
"""
try:
# Check if this should be returned verbatim
if self.should_return_verbatim(output):
# Extract the clean response
verbatim_response = self.extract_verbatim_response(output)
# Return as a ResponseReasoningStep to end the ReAct loop
return ResponseReasoningStep(
thought="Tool provided complete response that should be returned verbatim.",
response=verbatim_response,
is_streaming=is_streaming
)
# Otherwise, use the default ReAct parsing
return super().parse(output, is_streaming)
except Exception as e:
self.logger.error(f"Error in custom parser: {e}, falling back to default parsing")
# Fallback to default parsing if our custom logic fails
return super().parse(output, is_streaming)
# Configuration for the verbatim parser
VERBATIM_RESPONSE_CONFIG = {
'enabled': True,
'tools': ['create_appointment', 'cancel_meeting_by_id', 'cancel_meeting_by_details', 'check_availability', 'list_upcoming_events'],
'patterns': {
'booking_confirmation': r'Perfect! ✅.*Meeting confirmed.*📅',
'meeting_ids': r'Meeting ID:.*Calendar ID:',
'google_meet': r'🎥 Google Meet',
'cancellation_success': r'✅.*cancelled.*successfully',
'email_notification': r'📧 Email invitations',
'celebration_marker': r'