cuga-agent / src /cuga /configurations /models /settings.watsonx.toml
Sami Marreed
feat: docker-v1 with optimized frontend
0646b18
[agent.task_decomposition.model]
platform = "watsonx"
model_name = "meta-llama/llama-4-maverick-17b-128e-instruct-fp8"
temperature = 0.1
max_tokens = 16000
[agent.shortlister.model]
platform = "watsonx"
model_name = "meta-llama/llama-4-maverick-17b-128e-instruct-fp8"
temperature = 0.1
max_tokens = 16000
[agent.planner.model]
platform = "watsonx"
model_name = "meta-llama/llama-4-maverick-17b-128e-instruct-fp8"
temperature = 0.1
max_tokens = 16000
[agent.chat.model]
platform = "watsonx"
model_name = "meta-llama/llama-4-maverick-17b-128e-instruct-fp8"
temperature = 0.1
max_tokens = 16000
[agent.plan_controller.model]
platform = "watsonx"
model_name = "meta-llama/llama-4-maverick-17b-128e-instruct-fp8"
temperature = 0.1
max_tokens = 16000
[agent.final_answer.model]
platform = "watsonx"
model_name = "meta-llama/llama-4-maverick-17b-128e-instruct-fp8"
api_version ="2024-08-06"
temperature = 0.1
max_tokens = 32000
[agent.code.model]
platform = "watsonx"
model_name = "meta-llama/llama-4-maverick-17b-128e-instruct-fp8"
temperature = 0.1
max_tokens = 16000
[agent.code_planner.model]
platform = "watsonx"
model_name = "meta-llama/llama-4-maverick-17b-128e-instruct-fp8"
temperature = 0.1
max_tokens = 16000
[agent.qa.model]
platform = "watsonx"
model_name = "meta-llama/llama-4-maverick-17b-128e-instruct-fp8"
temperature = 0.1
max_tokens = 16000
[agent.action.model]
platform = "watsonx"
model_name = "meta-llama/llama-4-maverick-17b-128e-instruct-fp8"
temperature = 0.1
max_tokens = 2000