lpolich commited on
Commit
0d60424
·
verified ·
1 Parent(s): 361085c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -14
app.py CHANGED
@@ -63,22 +63,22 @@ final_answer = FinalAnswerTool()
63
  # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
64
 
65
 
66
- # model = HfApiModel(
67
- # max_tokens=2096,
68
- # temperature=0.5,
69
- # model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
70
- # custom_role_conversions=None,
71
- # )
72
-
73
- model = LiteLLMModel(
74
- # model_id="ollama_chat/qwen2:7b", # Or try other Ollama-supported models
75
- # model_id="ollama/qwen2:7b", # Or try other Ollama-supported models
76
- model_id="ollama_chat/gemma3:1b",
77
- api_base="http://127.0.0.1:11434", # Default Ollama local server
78
- # api_base="http://0.0.0.0:11434",
79
- # num_ctx=8192,
80
  )
81
 
 
 
 
 
 
 
 
 
 
82
 
83
 
84
  with open("prompts.yaml", 'r') as stream:
 
63
  # model_id='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud'
64
 
65
 
66
+ model = HfApiModel(
67
+ max_tokens=2096,
68
+ temperature=0.5,
69
+ model_id='Qwen/Qwen2.5-Coder-32B-Instruct',# it is possible that this model may be overloaded
70
+ custom_role_conversions=None,
 
 
 
 
 
 
 
 
 
71
  )
72
 
73
+ # model = LiteLLMModel(
74
+ # # model_id="ollama_chat/qwen2:7b", # Or try other Ollama-supported models
75
+ # # model_id="ollama/qwen2:7b", # Or try other Ollama-supported models
76
+ # model_id="ollama_chat/gemma3:1b",
77
+ # api_base="http://127.0.0.1:11434", # Default Ollama local server
78
+ # # api_base="http://0.0.0.0:11434",
79
+ # # num_ctx=8192,
80
+ # )
81
+
82
 
83
 
84
  with open("prompts.yaml", 'r') as stream: