eong commited on
Commit
cdfe26b
·
verified ·
1 Parent(s): 700898c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -6
app.py CHANGED
@@ -6,6 +6,7 @@ import yaml
6
  from tools.final_answer import FinalAnswerTool
7
 
8
  from smolagents import GradioUI
 
9
 
10
  # Below is an example of a tool that does nothing. Amaze us with your creativity !
11
  @tool
@@ -60,12 +61,20 @@ final_answer = FinalAnswerTool()
60
  # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
61
  # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
62
 
63
- model = HfApiModel(
64
- max_tokens=2096,
65
- temperature=0.5,
66
- model_id='meta-llama/Llama-3.1-8B-Instruct',# it is possible that this model may be overloaded
67
- custom_role_conversions=None,
68
- )
 
 
 
 
 
 
 
 
69
 
70
 
71
  # Import tool from Hub
 
6
  from tools.final_answer import FinalAnswerTool
7
 
8
  from smolagents import GradioUI
9
+ from smolagents import LiteLLMModel
10
 
11
  # Below is an example of a tool that does nothing. Amaze us with your creativity !
12
  @tool
 
61
  # If the agent does not answer, the model is overloaded, please use another model or the following Hugging Face Endpoint that also contains qwen2.5 coder:
62
  # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
63
 
64
+ # model = HfApiModel(
65
+ # max_tokens=2096,
66
+ # temperature=0.5,
67
+ # model_id='meta-llama/Llama-3.1-8B-Instruct',# it is possible that this model may be overloaded
68
+ # custom_role_conversions=None,
69
+ # )
70
+
71
+
72
+ model = LiteLLMModel(
73
+ model_id="ollama_chat/qwen2:7b", # Or try other Ollama-supported models
74
+ api_base="http://127.0.0.1:11434", # Default Ollama local server
75
+ num_ctx=8192,
76
+ )
77
+
78
 
79
 
80
  # Import tool from Hub