Spaces:
Paused
Paused
ffreemt
commited on
Commit
·
2027c04
1
Parent(s):
4d96293
Update openai_model default to llama4-scout
Browse files- basic_agent.py +3 -1
- get_model.py +3 -1
- openai_model.py +17 -8
- requirements.txt +4 -0
basic_agent.py
CHANGED
|
@@ -11,7 +11,8 @@ import rich
|
|
| 11 |
import smolagents
|
| 12 |
import wikipediaapi
|
| 13 |
from loguru import logger
|
| 14 |
-
from smolagents import CodeAgent, DuckDuckGoSearchTool, FinalAnswerTool,
|
|
|
|
| 15 |
|
| 16 |
from get_model import get_model
|
| 17 |
from litellm_model import litellm_model
|
|
@@ -46,6 +47,7 @@ AUTHORIZED_IMPORTS = [
|
|
| 46 |
"csv",
|
| 47 |
"io",
|
| 48 |
"glob",
|
|
|
|
| 49 |
]
|
| 50 |
|
| 51 |
|
|
|
|
| 11 |
import smolagents
|
| 12 |
import wikipediaapi
|
| 13 |
from loguru import logger
|
| 14 |
+
from smolagents import CodeAgent, DuckDuckGoSearchTool, FinalAnswerTool, Tool, VisitWebpageTool
|
| 15 |
+
from smolagents import InferenceClientModel as HfApiModel
|
| 16 |
|
| 17 |
from get_model import get_model
|
| 18 |
from litellm_model import litellm_model
|
|
|
|
| 47 |
"csv",
|
| 48 |
"io",
|
| 49 |
"glob",
|
| 50 |
+
"chess",
|
| 51 |
]
|
| 52 |
|
| 53 |
|
get_model.py
CHANGED
|
@@ -6,8 +6,10 @@ import re
|
|
| 6 |
from platform import node
|
| 7 |
|
| 8 |
from loguru import logger
|
| 9 |
-
from smolagents import
|
|
|
|
| 10 |
|
|
|
|
| 11 |
from get_gemini_keys import get_gemini_keys
|
| 12 |
|
| 13 |
|
|
|
|
| 6 |
from platform import node
|
| 7 |
|
| 8 |
from loguru import logger
|
| 9 |
+
from smolagents import InferenceClientModel as HfApiModel
|
| 10 |
+
from smolagents import LiteLLMRouterModel, OpenAIServerModel
|
| 11 |
|
| 12 |
+
# FutureWarning: HfApiModel was renamed to InferenceClientModel in version 1.14.0 and will be removed in 1.17.0.
|
| 13 |
from get_gemini_keys import get_gemini_keys
|
| 14 |
|
| 15 |
|
openai_model.py
CHANGED
|
@@ -3,10 +3,11 @@
|
|
| 3 |
import os
|
| 4 |
import sys
|
| 5 |
|
| 6 |
-
|
| 7 |
from loguru import logger
|
| 8 |
from smolagents import LiteLLMModel, OpenAIServerModel
|
| 9 |
-
|
|
|
|
| 10 |
|
| 11 |
print = rich.get_console().print # noqa
|
| 12 |
|
|
@@ -15,19 +16,27 @@ def openai_model(
|
|
| 15 |
model_id = None,
|
| 16 |
api_base = None,
|
| 17 |
api_key = None,
|
| 18 |
-
kwargs
|
| 19 |
):
|
| 20 |
kwargs = kwargs or {}
|
| 21 |
# default siliconflow
|
| 22 |
-
api_base = api_base or "https://api.siliconflow.cn/v1"
|
| 23 |
-
api_key = api_key or os.getenv("SILICONFLOW_API_KEY")
|
| 24 |
-
model_id = model_id or "deepseek-ai/DeepSeek-V3"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
|
| 26 |
return OpenAIServerModel(
|
| 27 |
model_id,
|
| 28 |
api_base=api_base,
|
| 29 |
api_key=api_key,
|
| 30 |
-
temperature=0.,
|
| 31 |
**kwargs,
|
| 32 |
)
|
| 33 |
|
|
@@ -73,4 +82,4 @@ if __name__ == "__main__":
|
|
| 73 |
|
| 74 |
#
|
| 75 |
|
| 76 |
-
# LLM API proxy: https://linux.do/t/topic/290871
|
|
|
|
| 3 |
import os
|
| 4 |
import sys
|
| 5 |
|
| 6 |
+
import rich
|
| 7 |
from loguru import logger
|
| 8 |
from smolagents import LiteLLMModel, OpenAIServerModel
|
| 9 |
+
|
| 10 |
+
from exit_gracefully import exit_gracefully
|
| 11 |
|
| 12 |
print = rich.get_console().print # noqa
|
| 13 |
|
|
|
|
| 16 |
model_id = None,
|
| 17 |
api_base = None,
|
| 18 |
api_key = None,
|
| 19 |
+
**kwargs,
|
| 20 |
):
|
| 21 |
kwargs = kwargs or {}
|
| 22 |
# default siliconflow
|
| 23 |
+
# api_base = api_base or "https://api.siliconflow.cn/v1"
|
| 24 |
+
# api_key = api_key or os.getenv("SILICONFLOW_API_KEY")
|
| 25 |
+
# model_id = model_id or "deepseek-ai/DeepSeek-V3"
|
| 26 |
+
|
| 27 |
+
# default llama4
|
| 28 |
+
api_base = api_base or "https://api.llama.com/compat/v1"
|
| 29 |
+
api_key = api_key or os.getenv("LLAMA_API_KEY")
|
| 30 |
+
|
| 31 |
+
# "Llama-4-Maverick-17B-128E-Instruct-FP8"
|
| 32 |
+
# "Llama-4-Scout-17B-16E-Instruct-FP8"
|
| 33 |
+
model_id = model_id or "Llama-4-Scout-17B-16E-Instruct-FP8"
|
| 34 |
|
| 35 |
return OpenAIServerModel(
|
| 36 |
model_id,
|
| 37 |
api_base=api_base,
|
| 38 |
api_key=api_key,
|
| 39 |
+
# temperature=0.,
|
| 40 |
**kwargs,
|
| 41 |
)
|
| 42 |
|
|
|
|
| 82 |
|
| 83 |
#
|
| 84 |
|
| 85 |
+
# LLM API proxy: https://linux.do/t/topic/290871
|
requirements.txt
CHANGED
|
@@ -18,3 +18,7 @@ pyyaml
|
|
| 18 |
rich
|
| 19 |
python-dotenv
|
| 20 |
duckduckgo-search
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
rich
|
| 19 |
python-dotenv
|
| 20 |
duckduckgo-search
|
| 21 |
+
|
| 22 |
+
requests
|
| 23 |
+
markdownify
|
| 24 |
+
chess
|