litellm2 / litellm_config.yaml
ffreemt
Remove v0, add cpa.applane
27545ff
model_list:
# - model_name: "anthropic/*"
# litellm_params:
# model: "openrouter/qwen/qwen3-coder" # Qwen/Qwen3-Coder-480B-A35B-Instruct
# max_tokens: 65536
# repetition_penalty: 1.05
# temperature: 0.7
# top_k: 20
# top_p: 0.8
# cpa in hf
# gemini-2.5-pro
# gemini-2.5-flash
# gemini-3-pro-preview
# gemini-2.5-flash-lite
# gemini-3-flash-preview
# - model_name: gemini-2.5-pro
# litellm_params:
# model: openai/gemini-2.5-pro
# api_base: https://cpa.applane.qzz.io/v1
# api_key: os.environ/CPAHF_API_KEY
- model_name: gpt-5.3-codex
litellm_params:
model: openai/gpt-5.3-codex
api_base: https://cpa.applane.qzz.io/v1
api_key: os.environ/CPAHF_API_KEY
- model_name: gpt-5.2-codex
litellm_params:
model: openai/gpt-5.2-codex
api_base: https://cpa.applane.qzz.io/v1
api_key: os.environ/CPAHF_API_KEY
general_settings:
# master_key: "sk-1234" # Disabled for easy testing
# master_key: ${LITELLM_MASTER_KEY}
master_key: os.environ/LITELLM_MASTER_KEY
# litellm --config parent_config.yaml --detailed_debug/--debug
# os.environ["LITELLM_LOG"] = "INFO" os.environ["LITELLM_LOG"] = "DEBUG"
# https://docs.litellm.ai/docs/proxy/config_settings
litellm_settings:
# set_verbose: true # deprecated, use `os.environ['LITELLM_LOG'] = 'DEBUG'
request_timeout: 180
drop_params: true
router_settings:
model_group_alias:
gpt-4o: gpt-5.3-codex
gpt-4.1-mini: gpt-5.3-codex
# gpt-5-codex: claude-4.5-sonnet
# gpt-5.1-codex: claude-4.5-sonnet