AgentGraph / utils /fix_litellm_stop_param.py
wu981526092's picture
🚀 Deploy AgentGraph: Complete agent monitoring and knowledge graph system
c2ea5ed
"""
fix_litellm_stop_param.py
This script fixes issues with unsupported parameters when using LiteLLM:
1. Fixes the "Unsupported parameter: 'stop' is not supported with this model" error
when using certain models like o1-preview or Perplexity models.
2. Handles models that require 'max_completion_tokens' instead of 'max_tokens'.
3. Fixes the "'Message' object has no attribute 'audio'" error in Message initialization.
Import this module before any other imports that use LiteLLM.
"""
import litellm
from litellm.types.utils import Message
# Store the original completion function
original_completion = litellm.completion
# Store the original Message.__init__ method
original_message_init = Message.__init__
# Patch the Message.__init__ method to handle missing 'audio' attribute
def patched_message_init(self, *args, **kwargs):
"""
Patched version of Message.__init__ that safely handles the 'audio' attribute
"""
try:
# Try to call the original __init__ method
original_message_init(self, *args, **kwargs)
except AttributeError as e:
if "'Message' object has no attribute 'audio'" in str(e):
# If the error is about the audio attribute, initialize manually
# Set default values for Message attributes
self.content = kwargs.get('content', '')
self.role = kwargs.get('role', 'user')
self.name = kwargs.get('name', None)
self.tool_calls = kwargs.get('tool_calls', None)
self.tool_call_id = kwargs.get('tool_call_id', None)
# Don't set audio attribute at all
else:
# Re-raise other AttributeErrors
raise
# Apply the patch to Message.__init__
Message.__init__ = patched_message_init
def patched_completion(*args, **kwargs):
"""
Patched version of litellm.completion that handles unsupported parameters:
- Removes 'stop' parameter if it exists in the kwargs
- Converts 'max_tokens' to 'max_completion_tokens' if necessary
"""
if 'stop' in kwargs:
# Silently remove the stop parameter
kwargs.pop('stop')
# Handle max_tokens parameter for models that require max_completion_tokens instead
if 'max_tokens' in kwargs:
# Create a new parameter with the right name
kwargs['max_completion_tokens'] = kwargs.pop('max_tokens')
return original_completion(*args, **kwargs)
# Replace the original completion function with our patched version
# Note: This replacement happens automatically when this module is imported
litellm.completion = patched_completion
print("LiteLLM has been patched to fix 'stop' parameter and 'audio' attribute issues")