File size: 2,391 Bytes
017e4e7
e42ee9c
ca94485
f8d7e03
 
e42ee9c
 
 
 
 
 
6255a71
f8d7e03
27b0c70
e42ee9c
 
ac8cef6
e42ee9c
ca94485
3d06d38
3f33a3a
 
 
ca94485
 
 
 
 
 
 
 
f78d166
ec386d6
ca94485
 
 
 
 
 
 
 
 
 
 
 
 
e42ee9c
d56d7b0
017e4e7
ca94485
e42ee9c
 
d56d7b0
 
 
 
 
 
e42ee9c
 
 
 
 
9495824
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
from smolagents import ToolCallingAgent, CodeAgent, DuckDuckGoSearchTool, LiteLLMModel
import os
import requests
#import litellm
#litellm._turn_on_debug()  # Enable debugging for litellm

# Set the API key for Sambanova
os.environ['SAMBANOVA_API_KEY'] = os.getenv('sambanova_token')

# Create the model with explicit provider specification
model = LiteLLMModel(
    model_id="sambanova/Meta-Llama-3.1-405B-Instruct-8k",  # Explicitly specify the sambanova provider
    max_tokens=2096,
    temperature=0.5,
    api_base="https://api.sambanova.ai/v1",  # Specify the base URL for Sambanova, as in your working requests example
)
print(model)

# Создание модели с обработкой лимитов
def model_call(query, tools_to_call_from=None, stop_sequences=None):
    print(query)
    print(tools_to_call_from)
    print(stop_sequences)
    try:
        url = "https://api.sambanova.ai/v1/chat/completions"  # Проверьте, что это правильная конечная точка
        headers = {
            "Authorization": f"Bearer {os.getenv('sambanova_token')}",
            "Content-Type": "application/json"
        }
        data = {
            "model": "Qwen2.5-Coder-32B-Instruct",
            "messages": query,
            "tools": tools_to_call_from,
            "max_tokens": 2096
        }

        response = requests.post(url, headers=headers, json=data)
        print(response.status_code)
        print(response.text)
        return response.choices[0].message.content
    except openai.RateLimitError:
        time.sleep(60)  # Ждем 60 секунд перед повторной попыткой
        return model_call(query)  # Рекурсивный вызов
    except Exception as e:
        return f"An unexpected error occurred: {str(e)}"

# Create the agent
agent = ToolCallingAgent(
    tools=[DuckDuckGoSearchTool()],
    model=model_call
)

# Когда запуск идет через streamlingt то ответы переводятс в json автоматом
#agent = CodeAgent(
#    tools=[DuckDuckGoSearchTool()],
#    model=model
#)

# Run the agent with error handling
try:
    result = agent.run("Search for the best music recommendations for a party at the Wayne's mansion.")
    print("\n++++\nResult:\n")
    print(result)
except Exception as e:
    print(f"\n++++\nError occurred:\n")
    print(f"{e}")