lllm-beta2 / config.yaml
krinlove's picture
Update config.yaml
6c8c86d verified
# aistudio gemini free, Rate Limits https://ai.google.dev/pricing#1_5pro
# gemini-1.5-pro: 2 rpm, 32,000 tpm, 1,500 RPD
# gemini-1.5-flash: 15 rpm, 1,000,000 tpm, 50 RPD
# model_list:
# # northflank
# - model_name: gpt-4o-mini-northflank
# litellm_params:
# model: openai/gpt-4o-mini
# api_base: https://yc--northflank-duckapi--b69bn8cbbs7k.code.run/v1
# api_key: os.environ/NF_API_KEY
# - model_name: gpt-4o-mini
# litellm_params:
# model: openai/gpt-4o-mini
# api_base: https://yc--northflank-duckapi--b69bn8cbbs7k.code.run/v1
# api_key: os.environ/NF_API_KEY
# - model_name: gpt-3.5-turbo-northflank
# litellm_params:
# model: openai/gpt-3.5-turbo
# api_base: https://yc--northflank-duckapi--b69bn8cbbs7k.code.run/v1
# api_key: os.environ/NF_API_KEY
# - model_name: gpt-3.5-turbo
# litellm_params:
# model: openai/gpt-3.5-turbo
# api_base: https://yc--northflank-duckapi--b69bn8cbbs7k.code.run/v1
# api_key: os.environ/NF_API_KEY
# # github
# - model_name: gpt-4o-k1
# litellm_params:
# model: github/gpt-4o
# api_base: https://models.inference.ai.azure.com
# api_key: os.environ/GITHUB_API_KEY1
# - model_name: github/gpt-4o
# litellm_params:
# model: github/gpt-4o
# api_key: os.environ/GITHUB_API_KEY1
# rpm: 30 # 15 * # of keys
# tpm: 2000000 # 1,000,000 * # of keys
# - model_name: gpt-4o-k2
# litellm_params:
# model: github/gpt-4o
# api_base: https://models.inference.ai.azure.com
# api_key: os.environ/GITHUB_API_KEY2
# - model_name: github/gpt-4o
# litellm_params:
# model: github/gpt-4o
# api_key: os.environ/GITHUB_API_KEY2
# rpm: 30 # 15 * # of keys
# tpm: 2000000 # 1,000,000 * # of keys
# - model_name: Mistral-large-2411-k1
# litellm_params:
# model: github/Mistral-large-2411
# api_base: https://models.inference.ai.azure.com
# api_key: os.environ/GITHUB_API_KEY1
# - model_name: github/Mistral-large-2411
# litellm_params:
# model: github/Mistral-large-2411
# api_key: os.environ/GITHUB_API_KEY1
# rpm: 30 # 15 * # of keys
# tpm: 2000000 # 1,000,000 * # of keys
# - model_name: Mistral-large-2411-k2
# litellm_params:
# model: github/Mistral-large-2411
# api_base: https://models.inference.ai.azure.com
# api_key: os.environ/GITHUB_API_KEY2
# - model_name: github/Mistral-large-2411
# litellm_params:
# model: github/Mistral-large-2411
# api_key: os.environ/GITHUB_API_KEY2
# rpm: 30 # 15 * # of keys
# tpm: 2000000 # 1,000,000 * # of keys
# - model_name: gpt-4o-mini-k1
# litellm_params:
# model: github/gpt-4o-mini
# api_base: https://models.inference.ai.azure.com
# api_key: os.environ/GITHUB_API_KEY1
# - model_name: github/gpt-4o-mini
# litellm_params:
# model: github/gpt-4o-mini
# api_key: os.environ/GITHUB_API_KEY1
# rpm: 30 # 15 * # of keys
# tpm: 2000000 # 1,000,000 * # of keys
# - model_name: gpt-4o-mini-k2
# litellm_params:
# model: github/gpt-4o-mini
# api_base: https://models.inference.ai.azure.com
# api_key: os.environ/GITHUB_API_KEY2
# - model_name: github/gpt-4o-mini
# litellm_params:
# model: github/gpt-4o-mini
# api_key: os.environ/GITHUB_API_KEY2
# rpm: 30 # 15 * # of keys
# tpm: 2000000 # 1,000,000 * # of keys
# # --------------gemini-1.5-pro--------------------
# - model_name: gemini-1.5-pro-k1
# litellm_params:
# model: gemini/gemini-1.5-pro
# api_key: os.environ/GEMINI_API_KEY1
# - model_name: gemini-1.5-pro
# litellm_params:
# model: gemini/gemini-1.5-pro
# api_key: os.environ/GEMINI_API_KEY1
# rpm: 4 # 2 * # of keys
# tpm: 6400 # 32,000 * # of keys
# - model_name: gemini-1.5-pro-k2
# litellm_params:
# model: gemini/gemini-1.5-pro
# api_key: os.environ/GEMINI_API_KEY2
# - model_name: gemini-1.5-pro
# litellm_params:
# model: gemini/gemini-1.5-pro
# api_key: os.environ/GEMINI_API_KEY
# rpm: 4 # 2 * # of keys
# tpm: 6400 # 32,000 * # of keys
# - model_name: gemini-1.5-pro-002-k1
# litellm_params:
# model: gemini/gemini-1.5-pro-002
# api_key: os.environ/GEMINI_API_KEY1
# - model_name: gemini-1.5-pro-002
# litellm_params:
# model: gemini/gemini-1.5-flash-002
# api_key: os.environ/GEMINI_API_KEY1
# rpm: 75 # 15 * # of keys
# tpm: 5000000 # 1,000,000 * # of keys
# - model_name: gemini-1.5-pro-002-k2
# litellm_params:
# model: gemini/gemini-1.5-pro-002
# api_key: os.environ/GEMINI_API_KEY2
# - model_name: gemini-1.5-pro-002
# litellm_params:
# model: gemini/gemini-1.5-pro-002
# api_key: os.environ/GEMINI_API_KEY2
# rpm: 75 # 15 * # of keys
# tpm: 5000000 # 1,000,000 * # of keys
# - model_name: gemini-1.5-pro-exp-k1
# litellm_params:
# model: gemini/gemini-1.5-pro-exp
# api_key: os.environ/GEMINI_API_KEY1
# - model_name: gemini-1.5-pro-exp
# litellm_params:
# model: gemini/gemini-1.5-pro-exp
# api_key: os.environ/GEMINI_API_KEY1
# rpm: 75 # 15 * # of keys
# tpm: 5000000 # 1,000,000 * # of keys
# - model_name: gemini-1.5-pro-exp-k2
# litellm_params:
# model: gemini/gemini-1.5-pro-exp
# api_key: os.environ/GEMINI_API_KEY2
# - model_name: gemini-1.5-pro-exp
# litellm_params:
# model: gemini/gemini-1.5-pro-exp
# api_key: os.environ/GEMINI_API_KEY2
# rpm: 75 # 15 * # of keys
# tpm: 5000000 # 1,000,000 * # of keys
# # --------------gemini-2.0-flash-exp--------------------
# - model_name: gemini-2.0-flash-exp-k1
# litellm_params:
# model: gemini/gemini-2.0-flash-exp
# api_key: os.environ/GEMINI_API_KEY1
# - model_name: gemini-2.0-flash-exp
# litellm_params:
# model: gemini/gemini-2.0-flash-exp
# api_key: os.environ/GEMINI_API_KEY1
# rpm: 50 # 2 * # of keys
# tpm: 5000000 # 32,000 * # of keys
# - model_name: gemini-2.0-flash-exp-k2
# litellm_params:
# model: gemini/gemini-2.0-flash-exp
# api_key: os.environ/GEMINI_API_KEY2
# - model_name: gemini-2.0-flash-exp
# litellm_params:
# model: gemini/gemini-2.0-flash-exp
# api_key: os.environ/GEMINI_API_KEY2
# rpm: 50 # 2 * # of keys
# tpm: 5000000 # 32,000 * # of keys
# - model_name: gemini-2.0-flash-thinking-exp-k1
# litellm_params:
# model: gemini/gemini-2.0-flash-thinking-exp
# api_key: os.environ/GEMINI_API_KEY1
# - model_name: gemini-2.0-flash-thinking-exp
# litellm_params:
# model: gemini/gemini-2.0-flash-thinking-exp
# api_key: os.environ/GEMINI_API_KEY1
# rpm: 50 # 2 * # of keys
# tpm: 5000000 # 32,000 * # of keys
# - model_name: gemini-2.0-flash-thinking-exp-k2
# litellm_params:
# model: gemini/gemini-2.0-flash-thinking-exp
# api_key: os.environ/GEMINI_API_KEY2
# - model_name: gemini-2.0-flash-thinking-exp
# litellm_params:
# model: gemini/gemini-2.0-flash-thinking-exp
# api_key: os.environ/GEMINI_API_KEY2
# rpm: 50 # 2 * # of keys
# tpm: 5000000 # 32,000 * # of keys
# - model_name: gemini-2.0-flash-thinking-exp-1219-k1
# litellm_params:
# model: gemini/gemini-2.0-flash-thinking-exp-1219
# api_key: os.environ/GEMINI_API_KEY1
# - model_name: gemini-2.0-flash-thinking-exp-1219
# litellm_params:
# model: gemini/gemini-2.0-flash-thinking-exp-1219
# api_key: os.environ/GEMINI_API_KEY1
# rpm: 50 # 2 * # of keys
# tpm: 5000000 # 32,000 * # of keys
# - model_name: gemini-2.0-flash-thinking-exp-1219-k2
# litellm_params:
# model: gemini/gemini-2.0-flash-thinking-exp-1219
# api_key: os.environ/GEMINI_API_KEY2
# - model_name: gemini-2.0-flash-thinking-exp-1219
# litellm_params:
# model: gemini/gemini-2.0-flash-thinking-exp-1219
# api_key: os.environ/GEMINI_API_KEY2
# rpm: 50 # 2 * # of keys
# tpm: 5000000 # 32,000 * # of keys
# # --------------gemini-exp-1206--------------------
# - model_name: gemini-exp-1206-k1
# litellm_params:
# model: gemini/gemini-exp-1206
# api_key: os.environ/GEMINI_API_KEY1
# - model_name: gemini-exp-1206
# litellm_params:
# model: gemini/gemini-exp-1206
# api_key: os.environ/GEMINI_API_KEY1
# rpm: 20 # 2 * # of keys
# tpm: 5000000 # 32,000 * # of keys
# - model_name: gemini-exp-1206-k2
# litellm_params:
# model: gemini/gemini-exp-1206
# api_key: os.environ/GEMINI_API_KEY2
# - model_name: gemini-exp-1206
# litellm_params:
# model: gemini/gemini-exp-1206
# api_key: os.environ/GEMINI_API_KEY2
# rpm: 20 # 2 * # of keys
# tpm: 5000000 # 32,000 * # of keys
# # --------------gemini-exp-1121--------------------
# - model_name: gemini-exp-1121-k1
# litellm_params:
# model: gemini/gemini-exp-1121
# api_key: os.environ/GEMINI_API_KEY1
# - model_name: gemini-exp-1121
# litellm_params:
# model: gemini/gemini-exp-1121
# api_key: os.environ/GEMINI_API_KEY1
# rpm: 20 # 2 * # of keys
# tpm: 5000000 # 32,000 * # of keys
# - model_name: gemini-exp-1121-k2
# litellm_params:
# model: gemini/gemini-exp-1121
# api_key: os.environ/GEMINI_API_KEY2
# - model_name: gemini-exp-1121
# litellm_params:
# model: gemini/gemini-exp-1121
# api_key: os.environ/GEMINI_API_KEY2
# rpm: 20 # 2 * # of keys
# tpm: 5000000 # 32,000 * # of keys
# # --------------gemini-exp-1114--------------------
# - model_name: gemini-exp-1114-k1
# litellm_params:
# model: gemini/gemini-exp-1114
# api_key: os.environ/GEMINI_API_KEY1
# - model_name: gemini-exp-1114
# litellm_params:
# model: gemini/gemini-exp-1114
# api_key: os.environ/GEMINI_API_KEY1
# rpm: 20 # 2 * # of keys
# tpm: 5000000 # 32,000 * # of keys
# - model_name: gemini-exp-1114-k2
# litellm_params:
# model: gemini/gemini-exp-1114
# api_key: os.environ/GEMINI_API_KEY2
# - model_name: gemini-exp-1114
# litellm_params:
# model: gemini/gemini-exp-1114
# api_key: os.environ/GEMINI_API_KEY2
# rpm: 20 # 2 * # of keys
# tpm: 5000000 # 32,000 * # of keys
litellm_settings:
# Networking settings
# request_timeout: 20 # (int) llm request timeout in seconds. Raise Timeout error if call takes longer than 10s. Sets litellm.request_timeout
# num_retries: 3
# fallbacks: [{"gemini-1.5-pro": ["gemini-1.5-flash"]}]
# allowed_fails: 3 # cooldown model if it fails > 1 call in a minute.
# cooldown_time: 30 # how long to cooldown model if fails/min > allowed_fails
success_callback: ["langfuse"]
failure_callback: ["langfuse"]
drop_params: true
general_settings:
master_key: os.environ/MASTER_KEY # sk-1234 # [OPTIONAL] Only use this if you require all calls to contain this key (Authorization: Bearer sk-1234)
# router_settings:
# fallbacks: [{"gpt-4o-mini": ["gemini-1.5-flash"]}, {"gpt-3.5-turbo": ["gemini-1.5-flash"]}]
# model_group_alias: {"gpt-4": "gemini-1.5-pro"}
# routing_strategy: simple-shuffle