msj19 commited on
Commit
7652cf9
·
verified ·
1 Parent(s): 0c923f4

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. build/lib/opencompass/configs/models/judge_llm/auto_j/hf_autoj_bilingual_6b.py +24 -0
  2. build/lib/opencompass/configs/models/judge_llm/auto_j/hf_autoj_eng_13b.py +18 -0
  3. build/lib/opencompass/configs/models/judge_llm/auto_j/hf_autoj_eng_13b_4bit.py +23 -0
  4. build/lib/opencompass/configs/models/judge_llm/auto_j/hf_autoj_scen_classifier.py +18 -0
  5. build/lib/opencompass/configs/models/judge_llm/judgelm/hf_judgelm_13b_v1.py +18 -0
  6. build/lib/opencompass/configs/models/judge_llm/judgelm/hf_judgelm_33b_v1.py +18 -0
  7. build/lib/opencompass/configs/models/judge_llm/judgelm/hf_judgelm_7b_v1.py +18 -0
  8. build/lib/opencompass/configs/models/judge_llm/pandalm/hf_alpaca_pandalm_7b_v1.py +18 -0
  9. build/lib/opencompass/configs/models/judge_llm/pandalm/hf_pandalm_7b_v1.py +18 -0
  10. build/lib/opencompass/configs/models/qwen2_5/hf_qwen_2_5_32b.py +12 -0
  11. build/lib/opencompass/configs/models/qwen2_5/hf_qwen_2_5_7b.py +12 -0
  12. build/lib/opencompass/configs/models/qwen2_5/lmdeploy_qwen2_5_0_5b_instruct.py +15 -0
  13. build/lib/opencompass/configs/models/qwen2_5/lmdeploy_qwen2_5_14b.py +15 -0
  14. build/lib/opencompass/configs/models/qwen2_5/lmdeploy_qwen2_5_14b_instruct.py +15 -0
  15. build/lib/opencompass/configs/models/qwen2_5/lmdeploy_qwen2_5_1_5b.py +15 -0
  16. build/lib/opencompass/configs/models/qwen2_5/lmdeploy_qwen2_5_1_5b_instruct.py +15 -0
  17. build/lib/opencompass/configs/models/qwen2_5/lmdeploy_qwen2_5_32b.py +15 -0
  18. build/lib/opencompass/configs/models/qwen2_5/lmdeploy_qwen2_5_32b_instruct.py +15 -0
  19. build/lib/opencompass/configs/models/qwen2_5/lmdeploy_qwen2_5_3b_instruct.py +15 -0
  20. build/lib/opencompass/configs/models/qwen2_5/lmdeploy_qwen2_5_72b.py +17 -0
  21. build/lib/opencompass/configs/models/qwen2_5/lmdeploy_qwen2_5_72b_instruct.py +15 -0
  22. build/lib/opencompass/configs/models/qwen2_5/lmdeploy_qwen2_5_7b.py +15 -0
  23. build/lib/opencompass/configs/models/qwen2_5/lmdeploy_qwen2_5_7b_instruct.py +15 -0
  24. build/lib/opencompass/configs/models/qwen2_5/vllm_qwen2_5_0_5b_instruct.py +14 -0
  25. build/lib/opencompass/configs/models/qwen2_5/vllm_qwen2_5_14b_instruct.py +14 -0
  26. build/lib/opencompass/configs/models/qwen2_5/vllm_qwen2_5_14b_instruct_128k.py +21 -0
  27. build/lib/opencompass/configs/models/qwen2_5/vllm_qwen2_5_1_5b_instruct.py +14 -0
  28. build/lib/opencompass/configs/models/qwen2_5/vllm_qwen2_5_32b_instruct.py +14 -0
  29. build/lib/opencompass/configs/models/qwen2_5/vllm_qwen2_5_32b_instruct_128k.py +21 -0
  30. build/lib/opencompass/configs/models/qwen2_5/vllm_qwen2_5_3b_instruct.py +14 -0
  31. build/lib/opencompass/configs/models/qwen2_5/vllm_qwen2_5_72b_instruct.py +14 -0
  32. build/lib/opencompass/configs/models/qwen2_5/vllm_qwen2_5_72b_instruct_128k.py +21 -0
  33. build/lib/opencompass/configs/models/qwen2_5/vllm_qwen2_5_7b_instruct.py +14 -0
  34. build/lib/opencompass/configs/models/qwen2_5/vllm_qwen2_5_7b_instruct_128k.py +21 -0
  35. build/lib/opencompass/configs/models/qwen3/lmdeploy_qwen3_0_6b.py +19 -0
  36. build/lib/opencompass/configs/models/qwq/lmdeploy_qwq_32b.py +17 -0
  37. build/lib/opencompass/configs/models/qwq/lmdeploy_qwq_32b_preview.py +15 -0
  38. build/lib/opencompass/configs/models/rwkv/rwkv5_3b.py +25 -0
  39. build/lib/opencompass/configs/models/skywork/hf_skywork_13b.py +12 -0
  40. build/lib/opencompass/configs/models/skywork/lmdeploy_skywork_o1_open_llama3_1_8b_instruct.py +16 -0
  41. build/lib/opencompass/configs/models/tigerbot/hf_tigerbot_13b_base_v1.py +21 -0
  42. build/lib/opencompass/configs/models/tigerbot/hf_tigerbot_13b_base_v2.py +21 -0
  43. build/lib/opencompass/configs/models/tigerbot/hf_tigerbot_13b_chat_v1.py +29 -0
  44. build/lib/opencompass/configs/models/tigerbot/hf_tigerbot_13b_chat_v2.py +29 -0
  45. build/lib/opencompass/configs/models/tigerbot/hf_tigerbot_70b_base.py +24 -0
  46. build/lib/opencompass/configs/models/tigerbot/hf_tigerbot_70b_chat_v2.py +29 -0
  47. build/lib/opencompass/configs/models/tigerbot/hf_tigerbot_70b_chat_v3.py +32 -0
  48. build/lib/opencompass/configs/models/tigerbot/hf_tigerbot_7b_base.py +21 -0
  49. build/lib/opencompass/configs/models/tigerbot/hf_tigerbot_7b_base_v3.py +21 -0
  50. build/lib/opencompass/configs/models/tigerbot/hf_tigerbot_7b_chat_v3.py +29 -0
build/lib/opencompass/configs/models/judge_llm/auto_j/hf_autoj_bilingual_6b.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import HuggingFaceCausalLM
2
+
3
+ '''
4
+ This is a bilingual 6B version of Auto-J.
5
+ It is trained on both the original training data
6
+ and its Chinese translation, which can be find in
7
+ https://huggingface.co/GAIR/autoj-bilingual-6b
8
+ '''
9
+
10
+ models = [dict(
11
+ type=HuggingFaceCausalLM,
12
+ abbr='autoj-bilingual-6b',
13
+ path='GAIR/autoj-bilingual-6b',
14
+ tokenizer_path='GAIR/autoj-bilingual-6b',
15
+ tokenizer_kwargs=dict(padding_side='left',
16
+ truncation_side='left',
17
+ trust_remote_code=True,
18
+ use_fast=False,),
19
+ max_out_len=1024,
20
+ max_seq_len=4096,
21
+ batch_size=8,
22
+ model_kwargs=dict(device_map='auto', trust_remote_code=True),
23
+ run_cfg=dict(num_gpus=1, num_procs=1),
24
+ )]
build/lib/opencompass/configs/models/judge_llm/auto_j/hf_autoj_eng_13b.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import HuggingFaceCausalLM
2
+
3
+
4
+ models = [dict(
5
+ type=HuggingFaceCausalLM,
6
+ abbr='autoj-13b',
7
+ path='GAIR/autoj-13b',
8
+ tokenizer_path='GAIR/autoj-13b',
9
+ tokenizer_kwargs=dict(padding_side='left',
10
+ truncation_side='left',
11
+ trust_remote_code=True,
12
+ use_fast=False,),
13
+ max_out_len=1024,
14
+ max_seq_len=4096,
15
+ batch_size=8,
16
+ model_kwargs=dict(device_map='auto', trust_remote_code=True),
17
+ run_cfg=dict(num_gpus=1, num_procs=1),
18
+ )]
build/lib/opencompass/configs/models/judge_llm/auto_j/hf_autoj_eng_13b_4bit.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import HuggingFaceCausalLM
2
+
3
+ '''
4
+ #This is a 4bits quantized version of Auto-J by using AutoGPTQ,
5
+ which is available on huggingface-hub:
6
+ https://huggingface.co/GAIR/autoj-13b-GPTQ-4bits
7
+ '''
8
+
9
+ models = [dict(
10
+ type=HuggingFaceCausalLM,
11
+ abbr='autoj-13b-GPTQ-4bits',
12
+ path='GAIR/autoj-13b-GPTQ-4bits',
13
+ tokenizer_path='GAIR/autoj-13b-GPTQ-4bits',
14
+ tokenizer_kwargs=dict(padding_side='left',
15
+ truncation_side='left',
16
+ trust_remote_code=True,
17
+ use_fast=False,),
18
+ max_out_len=1024,
19
+ max_seq_len=4096,
20
+ batch_size=8,
21
+ model_kwargs=dict(device_map='auto', trust_remote_code=True),
22
+ run_cfg=dict(num_gpus=1, num_procs=1),
23
+ )]
build/lib/opencompass/configs/models/judge_llm/auto_j/hf_autoj_scen_classifier.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import HuggingFaceCausalLM
2
+
3
+
4
+ models = [dict(
5
+ type=HuggingFaceCausalLM,
6
+ abbr='autoj-scenario-classifier',
7
+ path='GAIR/autoj-scenario-classifier',
8
+ tokenizer_path='GAIR/autoj-scenario-classifier',
9
+ tokenizer_kwargs=dict(padding_side='left',
10
+ truncation_side='left',
11
+ trust_remote_code=True,
12
+ use_fast=False,),
13
+ max_out_len=1024,
14
+ max_seq_len=4096,
15
+ batch_size=8,
16
+ model_kwargs=dict(device_map='auto', trust_remote_code=True),
17
+ run_cfg=dict(num_gpus=1, num_procs=1),
18
+ )]
build/lib/opencompass/configs/models/judge_llm/judgelm/hf_judgelm_13b_v1.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import HuggingFaceCausalLM
2
+
3
+
4
+ models = [dict(
5
+ type=HuggingFaceCausalLM,
6
+ abbr='judgelm-13b-v1-hf',
7
+ path='BAAI/JudgeLM-13B-v1.0',
8
+ tokenizer_path='BAAI/JudgeLM-13B-v1.0',
9
+ tokenizer_kwargs=dict(padding_side='left',
10
+ truncation_side='left',
11
+ trust_remote_code=True,
12
+ use_fast=False,),
13
+ max_out_len=1024,
14
+ max_seq_len=4096,
15
+ batch_size=8,
16
+ model_kwargs=dict(device_map='auto', trust_remote_code=True),
17
+ run_cfg=dict(num_gpus=1, num_procs=1),
18
+ )]
build/lib/opencompass/configs/models/judge_llm/judgelm/hf_judgelm_33b_v1.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import HuggingFaceCausalLM
2
+
3
+
4
+ models = [dict(
5
+ type=HuggingFaceCausalLM,
6
+ abbr='judgelm-33b-v1-hf',
7
+ path='BAAI/JudgeLM-33B-v1.0',
8
+ tokenizer_path='BAAI/JudgeLM-33B-v1.0',
9
+ tokenizer_kwargs=dict(padding_side='left',
10
+ truncation_side='left',
11
+ trust_remote_code=True,
12
+ use_fast=False,),
13
+ max_out_len=1024,
14
+ max_seq_len=4096,
15
+ batch_size=8,
16
+ model_kwargs=dict(device_map='auto', trust_remote_code=True),
17
+ run_cfg=dict(num_gpus=4, num_procs=1),
18
+ )]
build/lib/opencompass/configs/models/judge_llm/judgelm/hf_judgelm_7b_v1.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import HuggingFaceCausalLM
2
+
3
+
4
+ models = [dict(
5
+ type=HuggingFaceCausalLM,
6
+ abbr='judgelm-7b-v1-hf',
7
+ path='BAAI/JudgeLM-7B-v1.0',
8
+ tokenizer_path='BAAI/JudgeLM-7B-v1.0',
9
+ tokenizer_kwargs=dict(padding_side='left',
10
+ truncation_side='left',
11
+ trust_remote_code=True,
12
+ use_fast=False,),
13
+ max_out_len=1024,
14
+ max_seq_len=4096,
15
+ batch_size=8,
16
+ model_kwargs=dict(device_map='auto', trust_remote_code=True),
17
+ run_cfg=dict(num_gpus=1, num_procs=1),
18
+ )]
build/lib/opencompass/configs/models/judge_llm/pandalm/hf_alpaca_pandalm_7b_v1.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import HuggingFaceCausalLM
2
+
3
+
4
+ models = [dict(
5
+ type=HuggingFaceCausalLM,
6
+ abbr='alpaca-pandalm-7b-v1-hf',
7
+ path='WeOpenML/PandaLM-Alpaca-7B-v1',
8
+ tokenizer_path='WeOpenML/PandaLM-Alpaca-7B-v1',
9
+ tokenizer_kwargs=dict(padding_side='left',
10
+ truncation_side='left',
11
+ trust_remote_code=True,
12
+ use_fast=False,),
13
+ max_out_len=1024,
14
+ max_seq_len=4096,
15
+ batch_size=8,
16
+ model_kwargs=dict(device_map='auto', trust_remote_code=True),
17
+ run_cfg=dict(num_gpus=1, num_procs=1),
18
+ )]
build/lib/opencompass/configs/models/judge_llm/pandalm/hf_pandalm_7b_v1.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import HuggingFaceCausalLM
2
+
3
+
4
+ models = [dict(
5
+ type=HuggingFaceCausalLM,
6
+ abbr='pandalm-7b-v1-hf',
7
+ path='WeOpenML/PandaLM-7B-v1',
8
+ tokenizer_path='WeOpenML/PandaLM-7B-v1',
9
+ tokenizer_kwargs=dict(padding_side='left',
10
+ truncation_side='left',
11
+ trust_remote_code=True,
12
+ use_fast=False,),
13
+ max_out_len=1024,
14
+ max_seq_len=4096,
15
+ batch_size=8,
16
+ model_kwargs=dict(device_map='auto', trust_remote_code=True),
17
+ run_cfg=dict(num_gpus=1, num_procs=1),
18
+ )]
build/lib/opencompass/configs/models/qwen2_5/hf_qwen_2_5_32b.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import HuggingFaceBaseModel
2
+
3
+ models = [
4
+ dict(
5
+ type=HuggingFaceBaseModel,
6
+ abbr='qwen2.5-32b-hf',
7
+ path='Qwen/Qwen2.5-32B',
8
+ max_out_len=1024,
9
+ batch_size=8,
10
+ run_cfg=dict(num_gpus=2),
11
+ )
12
+ ]
build/lib/opencompass/configs/models/qwen2_5/hf_qwen_2_5_7b.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import HuggingFaceBaseModel
2
+
3
+ models = [
4
+ dict(
5
+ type=HuggingFaceBaseModel,
6
+ abbr='qwen2.5-7b-hf',
7
+ path='Qwen/Qwen2.5-7B',
8
+ max_out_len=1024,
9
+ batch_size=8,
10
+ run_cfg=dict(num_gpus=1),
11
+ )
12
+ ]
build/lib/opencompass/configs/models/qwen2_5/lmdeploy_qwen2_5_0_5b_instruct.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import TurboMindModelwithChatTemplate
2
+
3
+ models = [
4
+ dict(
5
+ type=TurboMindModelwithChatTemplate,
6
+ abbr='qwen2.5-0.5b-instruct-turbomind',
7
+ path='Qwen/Qwen2.5-0.5B-Instruct',
8
+ engine_config=dict(session_len=16384, max_batch_size=16, tp=1),
9
+ gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=4096),
10
+ max_seq_len=16384,
11
+ max_out_len=4096,
12
+ batch_size=16,
13
+ run_cfg=dict(num_gpus=1),
14
+ )
15
+ ]
build/lib/opencompass/configs/models/qwen2_5/lmdeploy_qwen2_5_14b.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import TurboMindModel
2
+
3
+ models = [
4
+ dict(
5
+ type=TurboMindModel,
6
+ abbr='qwen2.5-14b-turbomind',
7
+ path='Qwen/Qwen2.5-14B',
8
+ engine_config=dict(session_len=7168, max_batch_size=16, tp=2),
9
+ gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=1024),
10
+ max_seq_len=7168,
11
+ max_out_len=1024,
12
+ batch_size=16,
13
+ run_cfg=dict(num_gpus=2),
14
+ )
15
+ ]
build/lib/opencompass/configs/models/qwen2_5/lmdeploy_qwen2_5_14b_instruct.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import TurboMindModelwithChatTemplate
2
+
3
+ models = [
4
+ dict(
5
+ type=TurboMindModelwithChatTemplate,
6
+ abbr='qwen2.5-14b-instruct-turbomind',
7
+ path='Qwen/Qwen2.5-14B-Instruct',
8
+ engine_config=dict(session_len=16384, max_batch_size=16, tp=2),
9
+ gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=4096),
10
+ max_seq_len=16384,
11
+ max_out_len=4096,
12
+ batch_size=16,
13
+ run_cfg=dict(num_gpus=2),
14
+ )
15
+ ]
build/lib/opencompass/configs/models/qwen2_5/lmdeploy_qwen2_5_1_5b.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import TurboMindModel
2
+
3
+ models = [
4
+ dict(
5
+ type=TurboMindModel,
6
+ abbr='qwen2.5-1.5b-turbomind',
7
+ path='Qwen/Qwen2.5-1.5B',
8
+ engine_config=dict(session_len=7168, max_batch_size=16, tp=1),
9
+ gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=1024),
10
+ max_seq_len=7168,
11
+ max_out_len=1024,
12
+ batch_size=16,
13
+ run_cfg=dict(num_gpus=1),
14
+ )
15
+ ]
build/lib/opencompass/configs/models/qwen2_5/lmdeploy_qwen2_5_1_5b_instruct.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import TurboMindModelwithChatTemplate
2
+
3
+ models = [
4
+ dict(
5
+ type=TurboMindModelwithChatTemplate,
6
+ abbr='qwen2.5-1.5b-instruct-turbomind',
7
+ path='Qwen/Qwen2.5-1.5B-Instruct',
8
+ engine_config=dict(session_len=16384, max_batch_size=16, tp=1),
9
+ gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=4096),
10
+ max_seq_len=16384,
11
+ max_out_len=4096,
12
+ batch_size=16,
13
+ run_cfg=dict(num_gpus=1),
14
+ )
15
+ ]
build/lib/opencompass/configs/models/qwen2_5/lmdeploy_qwen2_5_32b.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import TurboMindModel
2
+
3
+ models = [
4
+ dict(
5
+ type=TurboMindModel,
6
+ abbr='qwen2.5-32b-turbomind',
7
+ path='Qwen/Qwen2.5-32B',
8
+ engine_config=dict(session_len=7168, max_batch_size=16, tp=2),
9
+ gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=1024),
10
+ max_seq_len=7168,
11
+ max_out_len=1024,
12
+ batch_size=16,
13
+ run_cfg=dict(num_gpus=2),
14
+ )
15
+ ]
build/lib/opencompass/configs/models/qwen2_5/lmdeploy_qwen2_5_32b_instruct.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import TurboMindModelwithChatTemplate
2
+
3
+ models = [
4
+ dict(
5
+ type=TurboMindModelwithChatTemplate,
6
+ abbr='qwen2.5-32b-instruct-turbomind',
7
+ path='Qwen/Qwen2.5-32B-Instruct',
8
+ engine_config=dict(session_len=16384, max_batch_size=16, tp=2),
9
+ gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=4096),
10
+ max_seq_len=16384,
11
+ max_out_len=4096,
12
+ batch_size=16,
13
+ run_cfg=dict(num_gpus=2),
14
+ )
15
+ ]
build/lib/opencompass/configs/models/qwen2_5/lmdeploy_qwen2_5_3b_instruct.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import TurboMindModelwithChatTemplate
2
+
3
+ models = [
4
+ dict(
5
+ type=TurboMindModelwithChatTemplate,
6
+ abbr='qwen2.5-3b-instruct-turbomind',
7
+ path='Qwen/Qwen2.5-3B-Instruct',
8
+ engine_config=dict(session_len=16384, max_batch_size=16, tp=1),
9
+ gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=4096),
10
+ max_seq_len=16384,
11
+ max_out_len=4096,
12
+ batch_size=16,
13
+ run_cfg=dict(num_gpus=1),
14
+ )
15
+ ]
build/lib/opencompass/configs/models/qwen2_5/lmdeploy_qwen2_5_72b.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import TurboMindModel
2
+
3
+ models = [
4
+ dict(
5
+ type=TurboMindModel,
6
+ abbr='qwen2.5-72b-turbomind',
7
+ path='Qwen/Qwen2.5-72B',
8
+ engine_config=dict(session_len=7168, max_batch_size=16, tp=4),
9
+ gen_config=dict(
10
+ top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=1024
11
+ ),
12
+ max_seq_len=7168,
13
+ max_out_len=1024,
14
+ batch_size=16,
15
+ run_cfg=dict(num_gpus=4),
16
+ )
17
+ ]
build/lib/opencompass/configs/models/qwen2_5/lmdeploy_qwen2_5_72b_instruct.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import TurboMindModelwithChatTemplate
2
+
3
+ models = [
4
+ dict(
5
+ type=TurboMindModelwithChatTemplate,
6
+ abbr='qwen2.5-72b-instruct-turbomind',
7
+ path='Qwen/Qwen2.5-72B-Instruct',
8
+ engine_config=dict(session_len=16384, max_batch_size=16, tp=4),
9
+ gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=4096),
10
+ max_seq_len=16384,
11
+ max_out_len=4096,
12
+ batch_size=16,
13
+ run_cfg=dict(num_gpus=4),
14
+ )
15
+ ]
build/lib/opencompass/configs/models/qwen2_5/lmdeploy_qwen2_5_7b.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import TurboMindModel
2
+
3
+ models = [
4
+ dict(
5
+ type=TurboMindModel,
6
+ abbr='qwen2.5-7b-turbomind',
7
+ path='Qwen/Qwen2.5-7B',
8
+ engine_config=dict(session_len=7168, max_batch_size=16, tp=1),
9
+ gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=1024),
10
+ max_seq_len=7168,
11
+ max_out_len=1024,
12
+ batch_size=16,
13
+ run_cfg=dict(num_gpus=1),
14
+ )
15
+ ]
build/lib/opencompass/configs/models/qwen2_5/lmdeploy_qwen2_5_7b_instruct.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import TurboMindModelwithChatTemplate
2
+
3
+ models = [
4
+ dict(
5
+ type=TurboMindModelwithChatTemplate,
6
+ abbr='qwen2.5-7b-instruct-turbomind',
7
+ path='Qwen/Qwen2.5-7B-Instruct',
8
+ engine_config=dict(session_len=16384, max_batch_size=16, tp=1),
9
+ gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=4096),
10
+ max_seq_len=16384,
11
+ max_out_len=4096,
12
+ batch_size=16,
13
+ run_cfg=dict(num_gpus=1),
14
+ )
15
+ ]
build/lib/opencompass/configs/models/qwen2_5/vllm_qwen2_5_0_5b_instruct.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import VLLMwithChatTemplate
2
+
3
+ models = [
4
+ dict(
5
+ type=VLLMwithChatTemplate,
6
+ abbr='qwen2.5-0.5b-instruct-vllm',
7
+ path='Qwen/Qwen2.5-0.5B-Instruct',
8
+ model_kwargs=dict(tensor_parallel_size=1, gpu_memory_utilization=0.5),
9
+ max_out_len=4096,
10
+ batch_size=16,
11
+ generation_kwargs=dict(temperature=0),
12
+ run_cfg=dict(num_gpus=1),
13
+ )
14
+ ]
build/lib/opencompass/configs/models/qwen2_5/vllm_qwen2_5_14b_instruct.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import VLLMwithChatTemplate
2
+
3
+ models = [
4
+ dict(
5
+ type=VLLMwithChatTemplate,
6
+ abbr='qwen2.5-14b-instruct-vllm',
7
+ path='Qwen/Qwen2.5-14B-Instruct',
8
+ model_kwargs=dict(tensor_parallel_size=2),
9
+ max_out_len=4096,
10
+ batch_size=16,
11
+ generation_kwargs=dict(temperature=0),
12
+ run_cfg=dict(num_gpus=2),
13
+ )
14
+ ]
build/lib/opencompass/configs/models/qwen2_5/vllm_qwen2_5_14b_instruct_128k.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import VLLMwithChatTemplate
2
+
3
+ models = [
4
+ dict(
5
+ type=VLLMwithChatTemplate,
6
+ abbr='qwen2.5-14b-instruct-vllm',
7
+ path='Qwen/Qwen2.5-14B-Instruct',
8
+ model_kwargs=dict(
9
+ tensor_parallel_size=4,
10
+ rope_scaling={
11
+ 'factor': 4.0,
12
+ 'original_max_position_embeddings': 32768,
13
+ 'rope_type': 'yarn'
14
+ },
15
+ ),
16
+ max_out_len=4096,
17
+ batch_size=1,
18
+ generation_kwargs=dict(temperature=0),
19
+ run_cfg=dict(num_gpus=4),
20
+ )
21
+ ]
build/lib/opencompass/configs/models/qwen2_5/vllm_qwen2_5_1_5b_instruct.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import VLLMwithChatTemplate
2
+
3
+ models = [
4
+ dict(
5
+ type=VLLMwithChatTemplate,
6
+ abbr='qwen2.5-1.5b-instruct-vllm',
7
+ path='Qwen/Qwen2.5-1.5B-Instruct',
8
+ model_kwargs=dict(tensor_parallel_size=1, gpu_memory_utilization=0.5),
9
+ max_out_len=4096,
10
+ batch_size=16,
11
+ generation_kwargs=dict(temperature=0),
12
+ run_cfg=dict(num_gpus=1),
13
+ )
14
+ ]
build/lib/opencompass/configs/models/qwen2_5/vllm_qwen2_5_32b_instruct.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import VLLMwithChatTemplate
2
+
3
+ models = [
4
+ dict(
5
+ type=VLLMwithChatTemplate,
6
+ abbr='qwen2.5-32b-instruct-vllm',
7
+ path='Qwen/Qwen2.5-32B-Instruct',
8
+ model_kwargs=dict(tensor_parallel_size=2),
9
+ max_out_len=4096,
10
+ batch_size=16,
11
+ generation_kwargs=dict(temperature=0),
12
+ run_cfg=dict(num_gpus=2),
13
+ )
14
+ ]
build/lib/opencompass/configs/models/qwen2_5/vllm_qwen2_5_32b_instruct_128k.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import VLLMwithChatTemplate
2
+
3
+ models = [
4
+ dict(
5
+ type=VLLMwithChatTemplate,
6
+ abbr='qwen2.5-32b-instruct-vllm',
7
+ path='Qwen/Qwen2.5-32B-Instruct',
8
+ model_kwargs=dict(
9
+ tensor_parallel_size=8,
10
+ rope_scaling={
11
+ 'factor': 4.0,
12
+ 'original_max_position_embeddings': 32768,
13
+ 'rope_type': 'yarn'
14
+ },
15
+ ),
16
+ max_out_len=4096,
17
+ batch_size=1,
18
+ generation_kwargs=dict(temperature=0),
19
+ run_cfg=dict(num_gpus=8),
20
+ )
21
+ ]
build/lib/opencompass/configs/models/qwen2_5/vllm_qwen2_5_3b_instruct.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import VLLMwithChatTemplate
2
+
3
+ models = [
4
+ dict(
5
+ type=VLLMwithChatTemplate,
6
+ abbr='qwen2.5-3b-instruct-vllm',
7
+ path='Qwen/Qwen2.5-3B-Instruct',
8
+ model_kwargs=dict(tensor_parallel_size=1, gpu_memory_utilization=0.5),
9
+ max_out_len=4096,
10
+ batch_size=16,
11
+ generation_kwargs=dict(temperature=0),
12
+ run_cfg=dict(num_gpus=1),
13
+ )
14
+ ]
build/lib/opencompass/configs/models/qwen2_5/vllm_qwen2_5_72b_instruct.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import VLLMwithChatTemplate
2
+
3
+ models = [
4
+ dict(
5
+ type=VLLMwithChatTemplate,
6
+ abbr='qwen2_5-72b-instruct-vllm',
7
+ path='Qwen/Qwen2.5-72B-Instruct',
8
+ model_kwargs=dict(tensor_parallel_size=4),
9
+ max_out_len=4096,
10
+ batch_size=16,
11
+ generation_kwargs=dict(temperature=0),
12
+ run_cfg=dict(num_gpus=4),
13
+ )
14
+ ]
build/lib/opencompass/configs/models/qwen2_5/vllm_qwen2_5_72b_instruct_128k.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import VLLMwithChatTemplate
2
+
3
+ models = [
4
+ dict(
5
+ type=VLLMwithChatTemplate,
6
+ abbr='qwen2_5-72b-instruct-vllm',
7
+ path='Qwen/Qwen2.5-72B-Instruct',
8
+ model_kwargs=dict(
9
+ tensor_parallel_size=8,
10
+ rope_scaling={
11
+ 'factor': 4.0,
12
+ 'original_max_position_embeddings': 32768,
13
+ 'rope_type': 'yarn'
14
+ },
15
+ ),
16
+ max_out_len=4096,
17
+ batch_size=1,
18
+ generation_kwargs=dict(temperature=0),
19
+ run_cfg=dict(num_gpus=8),
20
+ )
21
+ ]
build/lib/opencompass/configs/models/qwen2_5/vllm_qwen2_5_7b_instruct.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import VLLMwithChatTemplate
2
+
3
+ models = [
4
+ dict(
5
+ type=VLLMwithChatTemplate,
6
+ abbr='qwen2.5-7b-instruct-vllm',
7
+ path='Qwen/Qwen2.5-7B-Instruct',
8
+ model_kwargs=dict(tensor_parallel_size=1),
9
+ max_out_len=4096,
10
+ batch_size=16,
11
+ generation_kwargs=dict(temperature=0),
12
+ run_cfg=dict(num_gpus=1),
13
+ )
14
+ ]
build/lib/opencompass/configs/models/qwen2_5/vllm_qwen2_5_7b_instruct_128k.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import VLLMwithChatTemplate
2
+
3
+ models = [
4
+ dict(
5
+ type=VLLMwithChatTemplate,
6
+ abbr='qwen2.5-7b-instruct-vllm',
7
+ path='Qwen/Qwen2.5-7B-Instruct',
8
+ model_kwargs=dict(
9
+ tensor_parallel_size=4,
10
+ rope_scaling={
11
+ 'factor': 4.0,
12
+ 'original_max_position_embeddings': 32768,
13
+ 'rope_type': 'yarn'
14
+ },
15
+ ),
16
+ max_out_len=4096,
17
+ batch_size=1,
18
+ generation_kwargs=dict(temperature=0),
19
+ run_cfg=dict(num_gpus=4),
20
+ )
21
+ ]
build/lib/opencompass/configs/models/qwen3/lmdeploy_qwen3_0_6b.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import TurboMindModelwithChatTemplate
2
+ from opencompass.utils.text_postprocessors import extract_non_reasoning_content
3
+
4
+ models = [
5
+ dict(
6
+ type=TurboMindModelwithChatTemplate,
7
+ abbr='qwen_3_0.6b_thinking-turbomind',
8
+ path='Qwen/Qwen3-0.6B',
9
+ engine_config=dict(session_len=32768, max_batch_size=16, tp=1),
10
+ gen_config=dict(
11
+ top_k=20, temperature=0.6, top_p=0.95, do_sample=True, enable_thinking=True
12
+ ),
13
+ max_seq_len=32768,
14
+ max_out_len=32000,
15
+ batch_size=16,
16
+ run_cfg=dict(num_gpus=1),
17
+ pred_postprocessor=dict(type=extract_non_reasoning_content)
18
+ ),
19
+ ]
build/lib/opencompass/configs/models/qwq/lmdeploy_qwq_32b.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import TurboMindModelwithChatTemplate
2
+ from opencompass.utils.text_postprocessors import extract_non_reasoning_content
3
+
4
+ models = [
5
+ dict(
6
+ type=TurboMindModelwithChatTemplate,
7
+ abbr='QwQ-32B',
8
+ path='Qwen/QwQ-32B',
9
+ engine_config=dict(session_len=32768, max_batch_size=16, tp=2),
10
+ gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=8192),
11
+ max_seq_len=32768,
12
+ max_out_len=8192,
13
+ batch_size=16,
14
+ run_cfg=dict(num_gpus=2),
15
+ pred_postprocessor=dict(type=extract_non_reasoning_content)
16
+ )
17
+ ]
build/lib/opencompass/configs/models/qwq/lmdeploy_qwq_32b_preview.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import TurboMindModelwithChatTemplate
2
+
3
+ models = [
4
+ dict(
5
+ type=TurboMindModelwithChatTemplate,
6
+ abbr='QwQ-32B-Preview',
7
+ path='Qwen/QwQ-32B-Preview',
8
+ engine_config=dict(session_len=32768, max_batch_size=16, tp=2),
9
+ gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=8192),
10
+ max_seq_len=32768,
11
+ max_out_len=8192,
12
+ batch_size=16,
13
+ run_cfg=dict(num_gpus=2),
14
+ )
15
+ ]
build/lib/opencompass/configs/models/rwkv/rwkv5_3b.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import HuggingFaceCausalLM
2
+
3
+ models = [
4
+ dict(
5
+ type=HuggingFaceCausalLM,
6
+ abbr='rwkv-5-3b',
7
+ path='RWKV/rwkv-5-world-3b',
8
+ tokenizer_path='RWKV/rwkv-5-world-3b',
9
+ model_kwargs=dict(
10
+ device_map='auto',
11
+ trust_remote_code=True,
12
+ ),
13
+ tokenizer_kwargs=dict(
14
+ padding_side='left',
15
+ truncation_side='left',
16
+ trust_remote_code=True,
17
+ use_fast=False,
18
+ ),
19
+ max_out_len=100,
20
+ max_seq_len=2048,
21
+ batch_padding=True,
22
+ batch_size=16,
23
+ run_cfg=dict(num_gpus=1, num_procs=1),
24
+ )
25
+ ]
build/lib/opencompass/configs/models/skywork/hf_skywork_13b.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import HuggingFaceBaseModel
2
+
3
+ models = [
4
+ dict(
5
+ type=HuggingFaceBaseModel,
6
+ abbr='skywork-13b-hf',
7
+ path='Skywork/Skywork-13B-base',
8
+ max_out_len=1024,
9
+ batch_size=8,
10
+ run_cfg=dict(num_gpus=1),
11
+ )
12
+ ]
build/lib/opencompass/configs/models/skywork/lmdeploy_skywork_o1_open_llama3_1_8b_instruct.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import TurboMindModelwithChatTemplate
2
+
3
+ models = [
4
+ dict(
5
+ type=TurboMindModelwithChatTemplate,
6
+ abbr='Skywork-o1-Open-Llama-3_1-8B-turbomind',
7
+ path='Skywork/Skywork-o1-Open-Llama-3.1-8B',
8
+ engine_config=dict(max_batch_size=16, tp=1),
9
+ gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=4096),
10
+ max_seq_len=16384,
11
+ max_out_len=8192,
12
+ batch_size=16,
13
+ run_cfg=dict(num_gpus=1),
14
+ stop_words=['<|end_of_text|>', '<|eot_id|>'],
15
+ )
16
+ ]
build/lib/opencompass/configs/models/tigerbot/hf_tigerbot_13b_base_v1.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import HuggingFaceCausalLM
2
+
3
+
4
+ models = [
5
+ dict(
6
+ type=HuggingFaceCausalLM,
7
+ abbr='tigerbot-13b-base-v1-hf',
8
+ path='TigerResearch/tigerbot-13b-base-v1',
9
+ tokenizer_path='TigerResearch/tigerbot-13b-base-v1',
10
+ tokenizer_kwargs=dict(
11
+ padding_side='left',
12
+ truncation_side='left',
13
+ trust_remote_code=True,
14
+ ),
15
+ max_out_len=100,
16
+ max_seq_len=2048,
17
+ batch_size=8,
18
+ model_kwargs=dict(trust_remote_code=True, device_map='auto'),
19
+ run_cfg=dict(num_gpus=2, num_procs=1),
20
+ ),
21
+ ]
build/lib/opencompass/configs/models/tigerbot/hf_tigerbot_13b_base_v2.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import HuggingFaceCausalLM
2
+
3
+
4
+ models = [
5
+ dict(
6
+ type=HuggingFaceCausalLM,
7
+ abbr='tigerbot-13b-base-v2-hf',
8
+ path='TigerResearch/tigerbot-13b-base',
9
+ tokenizer_path='TigerResearch/tigerbot-13b-base',
10
+ tokenizer_kwargs=dict(
11
+ padding_side='left',
12
+ truncation_side='left',
13
+ trust_remote_code=True,
14
+ ),
15
+ max_out_len=100,
16
+ max_seq_len=2048,
17
+ batch_size=8,
18
+ model_kwargs=dict(trust_remote_code=True, device_map='auto'),
19
+ run_cfg=dict(num_gpus=2, num_procs=1),
20
+ ),
21
+ ]
build/lib/opencompass/configs/models/tigerbot/hf_tigerbot_13b_chat_v1.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import HuggingFaceCausalLM
2
+
3
+
4
+ _meta_template = dict(
5
+ round=[
6
+ dict(role='HUMAN', begin='\n\n### Instruction:\n'),
7
+ dict(role='BOT', begin='\n\n### Response:\n', generate=True),
8
+ ],
9
+ )
10
+
11
+ models = [
12
+ dict(
13
+ type=HuggingFaceCausalLM,
14
+ abbr='tigerbot-13b-chat-v1-hf',
15
+ path='TigerResearch/tigerbot-13b-chat-v1',
16
+ tokenizer_path='TigerResearch/tigerbot-13b-chat-v1',
17
+ tokenizer_kwargs=dict(
18
+ padding_side='left',
19
+ truncation_side='left',
20
+ trust_remote_code=True,
21
+ ),
22
+ max_out_len=100,
23
+ max_seq_len=2048,
24
+ batch_size=8,
25
+ meta_template=_meta_template,
26
+ model_kwargs=dict(trust_remote_code=True, device_map='auto'),
27
+ run_cfg=dict(num_gpus=2, num_procs=1),
28
+ )
29
+ ]
build/lib/opencompass/configs/models/tigerbot/hf_tigerbot_13b_chat_v2.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import HuggingFaceCausalLM
2
+
3
+
4
+ _meta_template = dict(
5
+ round=[
6
+ dict(role='HUMAN', begin='\n\n### Instruction:\n'),
7
+ dict(role='BOT', begin='\n\n### Response:\n', generate=True),
8
+ ],
9
+ )
10
+
11
+ models = [
12
+ dict(
13
+ type=HuggingFaceCausalLM,
14
+ abbr='tigerbot-13b-chat-v2-hf',
15
+ path='TigerResearch/tigerbot-13b-chat',
16
+ tokenizer_path='TigerResearch/tigerbot-13b-chat',
17
+ tokenizer_kwargs=dict(
18
+ padding_side='left',
19
+ truncation_side='left',
20
+ trust_remote_code=True,
21
+ ),
22
+ max_out_len=100,
23
+ max_seq_len=2048,
24
+ batch_size=8,
25
+ meta_template=_meta_template,
26
+ model_kwargs=dict(trust_remote_code=True, device_map='auto'),
27
+ run_cfg=dict(num_gpus=2, num_procs=1),
28
+ )
29
+ ]
build/lib/opencompass/configs/models/tigerbot/hf_tigerbot_70b_base.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import HuggingFaceCausalLM
2
+
3
+
4
+ models = [
5
+ dict(
6
+ type=HuggingFaceCausalLM,
7
+ abbr='tigerbot-70b-base-v1-hf',
8
+ path='TigerResearch/tigerbot-70b-base',
9
+ tokenizer_path='TigerResearch/tigerbot-70b-base',
10
+ model_kwargs=dict(
11
+ trust_remote_code=True,
12
+ device_map='auto',
13
+ ),
14
+ tokenizer_kwargs=dict(
15
+ padding_side='left',
16
+ truncation_side='left',
17
+ trust_remote_code=True,
18
+ ),
19
+ max_out_len=100,
20
+ max_seq_len=2048,
21
+ batch_size=8,
22
+ run_cfg=dict(num_gpus=4, num_procs=1),
23
+ ),
24
+ ]
build/lib/opencompass/configs/models/tigerbot/hf_tigerbot_70b_chat_v2.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import HuggingFaceCausalLM
2
+
3
+
4
+ _meta_template = dict(
5
+ round=[
6
+ dict(role='HUMAN', begin='\n\n### Instruction:\n'),
7
+ dict(role='BOT', begin='\n\n### Response:\n', generate=True),
8
+ ],
9
+ )
10
+
11
+ models = [
12
+ dict(
13
+ type=HuggingFaceCausalLM,
14
+ abbr='tigerbot-70b-chat-v2-hf',
15
+ path='TigerResearch/tigerbot-70b-chat-v2',
16
+ tokenizer_path='TigerResearch/tigerbot-70b-chat-v2',
17
+ tokenizer_kwargs=dict(
18
+ padding_side='left',
19
+ truncation_side='left',
20
+ trust_remote_code=True,
21
+ ),
22
+ max_out_len=100,
23
+ max_seq_len=2048,
24
+ batch_size=8,
25
+ meta_template=_meta_template,
26
+ model_kwargs=dict(trust_remote_code=True, device_map='auto'),
27
+ run_cfg=dict(num_gpus=4, num_procs=1),
28
+ )
29
+ ]
build/lib/opencompass/configs/models/tigerbot/hf_tigerbot_70b_chat_v3.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import HuggingFaceCausalLM
2
+
3
+
4
+ _meta_template = dict(
5
+ round=[
6
+ dict(role='HUMAN', begin='\n\n### Instruction:\n'),
7
+ dict(role='BOT', begin='\n\n### Response:\n', generate=True),
8
+ ],
9
+ )
10
+
11
+ models = [
12
+ dict(
13
+ type=HuggingFaceCausalLM,
14
+ abbr='tigerbot-70b-chat-v3-hf',
15
+ path='TigerResearch/tigerbot-70b-chat-v3',
16
+ tokenizer_path='TigerResearch/tigerbot-70b-chat-v3',
17
+ model_kwargs=dict(
18
+ trust_remote_code=True,
19
+ device_map='auto',
20
+ ),
21
+ tokenizer_kwargs=dict(
22
+ padding_side='left',
23
+ truncation_side='left',
24
+ trust_remote_code=True,
25
+ ),
26
+ meta_template=_meta_template,
27
+ max_out_len=100,
28
+ max_seq_len=2048,
29
+ batch_size=8,
30
+ run_cfg=dict(num_gpus=4, num_procs=1),
31
+ )
32
+ ]
build/lib/opencompass/configs/models/tigerbot/hf_tigerbot_7b_base.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import HuggingFaceCausalLM
2
+
3
+
4
+ models = [
5
+ dict(
6
+ type=HuggingFaceCausalLM,
7
+ abbr='tigerbot-base-7b-hf',
8
+ path='TigerResearch/tigerbot-7b-base',
9
+ tokenizer_path='TigerResearch/tigerbot-7b-base',
10
+ tokenizer_kwargs=dict(
11
+ padding_side='left',
12
+ truncation_side='left',
13
+ trust_remote_code=True,
14
+ ),
15
+ max_out_len=100,
16
+ max_seq_len=2048,
17
+ batch_size=8,
18
+ model_kwargs=dict(trust_remote_code=True, device_map='auto'),
19
+ run_cfg=dict(num_gpus=1, num_procs=1),
20
+ ),
21
+ ]
build/lib/opencompass/configs/models/tigerbot/hf_tigerbot_7b_base_v3.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import HuggingFaceCausalLM
2
+
3
+
4
+ models = [
5
+ dict(
6
+ type=HuggingFaceCausalLM,
7
+ abbr='tigerbot-7b-base-v3-hf',
8
+ path='TigerResearch/tigerbot-7b-base',
9
+ tokenizer_path='TigerResearch/tigerbot-7b-base',
10
+ tokenizer_kwargs=dict(
11
+ padding_side='left',
12
+ truncation_side='left',
13
+ trust_remote_code=True,
14
+ ),
15
+ max_out_len=100,
16
+ max_seq_len=2048,
17
+ batch_size=8,
18
+ model_kwargs=dict(trust_remote_code=True, device_map='auto'),
19
+ run_cfg=dict(num_gpus=1, num_procs=1),
20
+ ),
21
+ ]
build/lib/opencompass/configs/models/tigerbot/hf_tigerbot_7b_chat_v3.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from opencompass.models import HuggingFaceCausalLM
2
+
3
+
4
+ _meta_template = dict(
5
+ round=[
6
+ dict(role='HUMAN', begin='\n\n### Instruction:\n'),
7
+ dict(role='BOT', begin='\n\n### Response:\n', generate=True),
8
+ ],
9
+ )
10
+
11
+ models = [
12
+ dict(
13
+ type=HuggingFaceCausalLM,
14
+ abbr='tigerbot-7b-chat-v3-hf',
15
+ path='TigerResearch/tigerbot-7b-chat',
16
+ tokenizer_path='TigerResearch/tigerbot-7b-chat',
17
+ tokenizer_kwargs=dict(
18
+ padding_side='left',
19
+ truncation_side='left',
20
+ trust_remote_code=True,
21
+ ),
22
+ max_out_len=100,
23
+ max_seq_len=2048,
24
+ batch_size=8,
25
+ meta_template=_meta_template,
26
+ model_kwargs=dict(trust_remote_code=True, device_map='auto'),
27
+ run_cfg=dict(num_gpus=1, num_procs=1),
28
+ )
29
+ ]