ffreemt commited on
Commit
27545ff
·
1 Parent(s): 3179b87

Remove v0, add cpa.applane

Browse files
Files changed (3) hide show
  1. Dockerfile +4 -0
  2. litellm_config.yaml +12 -44
  3. litellm_config_v0_bk.yaml +84 -0
Dockerfile CHANGED
@@ -13,6 +13,10 @@
13
  # --config /app/config.yaml
14
  # # --detailed_debug \
15
 
 
 
 
 
16
  from ghcr.io/berriai/litellm:main-latest
17
 
18
  # SERVER_ROOT_PATH=/hf/v1
 
13
  # --config /app/config.yaml
14
  # # --detailed_debug \
15
 
16
+ # PostgreSQL-aiven-litellm-virtual-keys.txt / PostgreSQL-memo.txt
17
+ # acone postgresql worked
18
+ # edge browser github aiven does not quite work
19
+
20
  from ghcr.io/berriai/litellm:main-latest
21
 
22
  # SERVER_ROOT_PATH=/hf/v1
litellm_config.yaml CHANGED
@@ -8,47 +8,6 @@ model_list:
8
  # top_k: 20
9
  # top_p: 0.8
10
 
11
- # OK curl, ccr not OK, ccapi use litellm-v0 OK, anthropic-curl OK
12
- - model_name: v0-a
13
- litellm_params:
14
- model: v0/v0-1.5-md
15
- api_base: https://api.v0.dev/v1
16
- api_key: os.environ/V0_API_KEY1
17
- model_info:
18
- max_tokens: 200_000
19
- id: v0/v0-1.5-md
20
- additionalProp1:
21
- info: v0-bug-team
22
- - model_name: v0-b
23
- litellm_params:
24
- model: v0/v0-1.5-md
25
- api_base: https://api.v0.dev/v1
26
- api_key: os.environ/V0_API_KEY2
27
- - model_name: v0-c
28
- litellm_params:
29
- model: v0/v0-1.5-md
30
- api_base: https://api.v0.dev/v1
31
- api_key: os.environ/V0_API_KEY3
32
- - model_name: v0
33
- litellm_params:
34
- model: v0/v0-1.5-md
35
- api_base: https://api.v0.dev/v1
36
- api_key: os.environ/V0_API_KEY1
37
- model_info:
38
- max_tokens: 200_000
39
- id: v0/v0-1.5-md
40
- additionalProp1:
41
- info: v0-bug-team
42
- - model_name: v0
43
- litellm_params:
44
- model: v0/v0-1.5-md
45
- api_base: https://api.v0.dev/v1
46
- api_key: os.environ/V0_API_KEY2
47
- - model_name: v0
48
- litellm_params:
49
- model: v0/v0-1.5-md
50
- api_base: https://api.v0.dev/v1
51
- api_key: os.environ/V0_API_KEY3
52
 
53
  # cpa in hf
54
  # gemini-2.5-pro
@@ -61,7 +20,16 @@ model_list:
61
  # model: openai/gemini-2.5-pro
62
  # api_base: https://cpa.applane.qzz.io/v1
63
  # api_key: os.environ/CPAHF_API_KEY
64
-
 
 
 
 
 
 
 
 
 
65
 
66
  general_settings:
67
  # master_key: "sk-1234" # Disabled for easy testing
@@ -78,7 +46,7 @@ litellm_settings:
78
 
79
  router_settings:
80
  model_group_alias:
81
- gpt-4o: v0
82
- gpt-4.1-mini: v0
83
  # gpt-5-codex: claude-4.5-sonnet
84
  # gpt-5.1-codex: claude-4.5-sonnet
 
8
  # top_k: 20
9
  # top_p: 0.8
10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  # cpa in hf
13
  # gemini-2.5-pro
 
20
  # model: openai/gemini-2.5-pro
21
  # api_base: https://cpa.applane.qzz.io/v1
22
  # api_key: os.environ/CPAHF_API_KEY
23
+ - model_name: gpt-5.3-codex
24
+ litellm_params:
25
+ model: openai/gpt-5.3-codex
26
+ api_base: https://cpa.applane.qzz.io/v1
27
+ api_key: os.environ/CPAHF_API_KEY
28
+ - model_name: gpt-5.2-codex
29
+ litellm_params:
30
+ model: openai/gpt-5.2-codex
31
+ api_base: https://cpa.applane.qzz.io/v1
32
+ api_key: os.environ/CPAHF_API_KEY
33
 
34
  general_settings:
35
  # master_key: "sk-1234" # Disabled for easy testing
 
46
 
47
  router_settings:
48
  model_group_alias:
49
+ gpt-4o: gpt-5.3-codex
50
+ gpt-4.1-mini: gpt-5.3-codex
51
  # gpt-5-codex: claude-4.5-sonnet
52
  # gpt-5.1-codex: claude-4.5-sonnet
litellm_config_v0_bk.yaml ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_list:
2
+ # - model_name: "anthropic/*"
3
+ # litellm_params:
4
+ # model: "openrouter/qwen/qwen3-coder" # Qwen/Qwen3-Coder-480B-A35B-Instruct
5
+ # max_tokens: 65536
6
+ # repetition_penalty: 1.05
7
+ # temperature: 0.7
8
+ # top_k: 20
9
+ # top_p: 0.8
10
+
11
+ # OK curl, ccr not OK, ccapi use litellm-v0 OK, anthropic-curl OK
12
+ - model_name: v0-a
13
+ litellm_params:
14
+ model: v0/v0-1.5-md
15
+ api_base: https://api.v0.dev/v1
16
+ api_key: os.environ/V0_API_KEY1
17
+ model_info:
18
+ max_tokens: 200_000
19
+ id: v0/v0-1.5-md
20
+ additionalProp1:
21
+ info: v0-bug-team
22
+ - model_name: v0-b
23
+ litellm_params:
24
+ model: v0/v0-1.5-md
25
+ api_base: https://api.v0.dev/v1
26
+ api_key: os.environ/V0_API_KEY2
27
+ - model_name: v0-c
28
+ litellm_params:
29
+ model: v0/v0-1.5-md
30
+ api_base: https://api.v0.dev/v1
31
+ api_key: os.environ/V0_API_KEY3
32
+ - model_name: v0
33
+ litellm_params:
34
+ model: v0/v0-1.5-md
35
+ api_base: https://api.v0.dev/v1
36
+ api_key: os.environ/V0_API_KEY1
37
+ model_info:
38
+ max_tokens: 200_000
39
+ id: v0/v0-1.5-md
40
+ additionalProp1:
41
+ info: v0-bug-team
42
+ - model_name: v0
43
+ litellm_params:
44
+ model: v0/v0-1.5-md
45
+ api_base: https://api.v0.dev/v1
46
+ api_key: os.environ/V0_API_KEY2
47
+ - model_name: v0
48
+ litellm_params:
49
+ model: v0/v0-1.5-md
50
+ api_base: https://api.v0.dev/v1
51
+ api_key: os.environ/V0_API_KEY3
52
+
53
+ # cpa in hf
54
+ # gemini-2.5-pro
55
+ # gemini-2.5-flash
56
+ # gemini-3-pro-preview
57
+ # gemini-2.5-flash-lite
58
+ # gemini-3-flash-preview
59
+ # - model_name: gemini-2.5-pro
60
+ # litellm_params:
61
+ # model: openai/gemini-2.5-pro
62
+ # api_base: https://cpa.applane.qzz.io/v1
63
+ # api_key: os.environ/CPAHF_API_KEY
64
+
65
+
66
+ general_settings:
67
+ # master_key: "sk-1234" # Disabled for easy testing
68
+ # master_key: ${LITELLM_MASTER_KEY}
69
+ master_key: os.environ/LITELLM_MASTER_KEY
70
+
71
+ # litellm --config parent_config.yaml --detailed_debug/--debug
72
+ # os.environ["LITELLM_LOG"] = "INFO" os.environ["LITELLM_LOG"] = "DEBUG"
73
+ # https://docs.litellm.ai/docs/proxy/config_settings
74
+ litellm_settings:
75
+ # set_verbose: true # deprecated, use `os.environ['LITELLM_LOG'] = 'DEBUG'
76
+ request_timeout: 180
77
+ drop_params: true
78
+
79
+ router_settings:
80
+ model_group_alias:
81
+ gpt-4o: v0
82
+ gpt-4.1-mini: v0
83
+ # gpt-5-codex: claude-4.5-sonnet
84
+ # gpt-5.1-codex: claude-4.5-sonnet