zasharepw77
My model call 7
6e437ce
raw
history blame
2.4 kB
from smolagents import ToolCallingAgent, CodeAgent, DuckDuckGoSearchTool, LiteLLMModel
import os
import requests
#import litellm
#litellm._turn_on_debug() # Enable debugging for litellm
# Set the API key for Sambanova
os.environ['SAMBANOVA_API_KEY'] = os.getenv('sambanova_token')
# Create the model with explicit provider specification
model = LiteLLMModel(
model_id="sambanova/Meta-Llama-3.1-405B-Instruct-8k", # Explicitly specify the sambanova provider
max_tokens=2096,
temperature=0.5,
api_base="https://api.sambanova.ai/v1", # Specify the base URL for Sambanova, as in your working requests example
)
print(dir(model))
# Создание модели с обработкой лимитов
def model_call(query, tools_to_call_from=None, stop_sequences=None):
print(query)
print(tools_to_call_from)
print(stop_sequences)
try:
url = "https://api.sambanova.ai/v1/chat/completions" # Проверьте, что это правильная конечная точка
headers = {
"Authorization": f"Bearer {os.getenv('sambanova_token')}",
"Content-Type": "application/json"
}
data = {
"model": "Qwen2.5-Coder-32B-Instruct",
"messages": query,
"tools": tools_to_call_from,
"max_tokens": 2096
}
response = requests.post(url, headers=headers, json=data)
print(response.status_code)
print(response.text)
return response.choices[0].message.content
except openai.RateLimitError:
time.sleep(60) # Ждем 60 секунд перед повторной попыткой
return model_call(query) # Рекурсивный вызов
except Exception as e:
return f"An unexpected error occurred: {str(e)}"
# Create the agent
agent = ToolCallingAgent(
tools=[DuckDuckGoSearchTool()],
model=model_call
)
# Когда запуск идет через streamlingt то ответы переводятс в json автоматом
#agent = CodeAgent(
# tools=[DuckDuckGoSearchTool()],
# model=model
#)
# Run the agent with error handling
try:
result = agent.run("Search for the best music recommendations for a party at the Wayne's mansion.")
print("\n++++\nResult:\n")
print(result)
except Exception as e:
print(f"\n++++\nError occurred:\n")
print(f"{e}")