eong commited on
Commit
332e135
·
verified ·
1 Parent(s): f412262

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -6
app.py CHANGED
@@ -7,6 +7,7 @@ from tools.final_answer import FinalAnswerTool
7
 
8
  from smolagents import GradioUI
9
  from smolagents import LiteLLMModel
 
10
 
11
  import litellm
12
 
@@ -71,13 +72,8 @@ final_answer = FinalAnswerTool()
71
  # model_id='meta-llama/Llama-3.1-8B-Instruct',# it is possible that this model may be overloaded
72
  # custom_role_conversions=None,
73
  # )
74
- model = LiteLLMModel(
75
- model_id="ollama_chat/qwen2:7b", # Or try other Ollama-supported models
76
- api_base="http://127.0.0.1:11434", # Default Ollama local server
77
- num_ctx=8192,
78
- )
79
-
80
 
 
81
 
82
  # Import tool from Hub
83
  image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)
 
7
 
8
  from smolagents import GradioUI
9
  from smolagents import LiteLLMModel
10
+ import os
11
 
12
  import litellm
13
 
 
72
  # model_id='meta-llama/Llama-3.1-8B-Instruct',# it is possible that this model may be overloaded
73
  # custom_role_conversions=None,
74
  # )
 
 
 
 
 
 
75
 
76
+ model = LiteLLMModel(model_id="gemini/gemini-2.0-flash-lite", api_key=os.getenv(key="gemini_api"))
77
 
78
  # Import tool from Hub
79
  image_generation_tool = load_tool("agents-course/text-to-image", trust_remote_code=True)