JunHowie commited on
Commit
da89521
·
verified ·
1 Parent(s): ea420b9

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. .ipynb_checkpoints/README-checkpoint.md +260 -0
  3. .ipynb_checkpoints/config-checkpoint.json +111 -0
  4. .mdl +0 -0
  5. .msc +0 -0
  6. .mv +1 -0
  7. README.md +260 -0
  8. chat_template.jinja +165 -0
  9. config.json +111 -0
  10. configuration.json +1 -0
  11. configuration_minimax_m2.py +200 -0
  12. docs/sglang_deploy_guide.md +110 -0
  13. docs/sglang_deploy_guide_cn.md +119 -0
  14. docs/tool_calling_guide.md +487 -0
  15. docs/tool_calling_guide_cn.md +499 -0
  16. docs/transformers_deploy_guide.md +91 -0
  17. docs/transformers_deploy_guide_cn.md +92 -0
  18. docs/vllm_deploy_guide.md +113 -0
  19. docs/vllm_deploy_guide_cn.md +123 -0
  20. generation_config.json +9 -0
  21. merges.txt +0 -0
  22. model-00001-of-00042.safetensors +3 -0
  23. model-00003-of-00042.safetensors +3 -0
  24. model-00005-of-00042.safetensors +3 -0
  25. model-00006-of-00042.safetensors +3 -0
  26. model-00007-of-00042.safetensors +3 -0
  27. model-00014-of-00042.safetensors +3 -0
  28. model-00015-of-00042.safetensors +3 -0
  29. model-00016-of-00042.safetensors +3 -0
  30. model-00017-of-00042.safetensors +3 -0
  31. model-00019-of-00042.safetensors +3 -0
  32. model-00020-of-00042.safetensors +3 -0
  33. model-00021-of-00042.safetensors +3 -0
  34. model-00022-of-00042.safetensors +3 -0
  35. model-00023-of-00042.safetensors +3 -0
  36. model-00024-of-00042.safetensors +3 -0
  37. model-00027-of-00042.safetensors +3 -0
  38. model-00028-of-00042.safetensors +3 -0
  39. model-00029-of-00042.safetensors +3 -0
  40. model-00030-of-00042.safetensors +3 -0
  41. model-00032-of-00042.safetensors +3 -0
  42. model-00033-of-00042.safetensors +3 -0
  43. model-00035-of-00042.safetensors +3 -0
  44. model-00036-of-00042.safetensors +3 -0
  45. model-00039-of-00042.safetensors +3 -0
  46. model-00042-of-00042.safetensors +3 -0
  47. model.safetensors.index.json +3 -0
  48. modeling_minimax_m2.py +706 -0
  49. tokenizer.json +0 -0
  50. tokenizer_config.json +495 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ model.safetensors.index.json filter=lfs diff=lfs merge=lfs -text
.ipynb_checkpoints/README-checkpoint.md ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ license_name: modified-mit
4
+ license_link: https://github.com/MiniMax-AI/MiniMax-M2.1/blob/main/LICENSE
5
+ library_name: transformers
6
+ pipeline_tag: text-generation
7
+ tags:
8
+ - vLLM
9
+ - AWQ
10
+ base_model:
11
+ - MiniMaxAI/MiniMax-M2.1
12
+ base_model_relation: quantized
13
+
14
+ ---
15
+ # MiniMax-M2.1-AWQ
16
+ Base model: [MiniMaxAI/MiniMax-M2.1](https://huggingface.co/MiniMaxAI/MiniMax-M2.1)
17
+
18
+ ### 【Dependencies / Installation】
19
+
20
+ ```python
21
+ vllm==0.13.0
22
+ ```
23
+
24
+ As of **2025-12-27**, make sure your system has cuda12.8 installed.
25
+
26
+ Then, create a fresh Python environment (e.g. python3.12 venv) and run:
27
+ ```bash
28
+ pip install -U vllm==0.13.0
29
+ ```
30
+ [vLLM Official Guide](https://docs.vllm.ai/projects/recipes/en/latest/MiniMax/MiniMax-M2.html)
31
+
32
+
33
+ ### 【vLLM Startup Command】
34
+ <i>Note: When launching with TP=8, include `--enable-expert-parallel`;
35
+ otherwise the expert tensors wouldn’t be evenly sharded across GPU devices.</i>
36
+
37
+ ```
38
+ export VLLM_USE_DEEP_GEMM=0
39
+ export VLLM_USE_FLASHINFER_MOE_FP16=1
40
+ export VLLM_USE_FLASHINFER_SAMPLER=0
41
+ export OMP_NUM_THREADS=4
42
+
43
+ vllm serve \
44
+ __YOUR_PATH__/tclf90/MiniMax-M2.1-AWQ \
45
+ --served-model-name MY_MODEL \
46
+ --swap-space 16 \
47
+ --max-num-seqs 32 \
48
+ --max-model-len 32768 \
49
+ --gpu-memory-utilization 0.9 \
50
+ --tensor-parallel-size 8 \
51
+ --enable-expert-parallel \
52
+ --enable-auto-tool-choice \
53
+ --tool-call-parser minimax_m2 \
54
+ --reasoning-parser minimax_m2_append_think \
55
+ --trust-remote-code \
56
+ --host 0.0.0.0 \
57
+ --port 8000
58
+ ```
59
+
60
+ ### 【Logs】
61
+ ```
62
+ 2025-12-27
63
+ 1. Initial commit
64
+ ```
65
+
66
+ ### 【Model Files】
67
+ | File Size | Last Updated |
68
+ |-----------|--------------|
69
+ | `117GiB` | `2025-12-27` |
70
+
71
+ ### 【Model Download】
72
+ ```python
73
+ from modelscope import snapshot_download
74
+ snapshot_download('tclf90/MiniMax-M2.1-AWQ', cache_dir="your_local_path")
75
+ ```
76
+
77
+ ### 【Overview】
78
+ <div align="center">
79
+
80
+ <svg width="60%" height="auto" viewBox="0 0 144 48" fill="none" xmlns="http://www.w3.org/2000/svg">
81
+ <path d="M26.6782 7.96523C26.6782 7.02436 25.913 6.26087 24.9739 6.26087C24.0348 6.26087 23.2695 7.0261 23.2695 7.96523V36.2139C23.2695 38.4 21.4904 40.1791 19.3043 40.1791C17.1183 40.1791 15.3391 38.4 15.3391 36.2139V18.0904C15.3391 17.1496 14.5739 16.3861 13.6348 16.3861C12.6956 16.3861 11.9304 17.1513 11.9304 18.0904V25.7722C11.9304 27.9583 10.1513 29.7374 7.96518 29.7374C5.7791 29.7374 4 27.9583 4 25.7722V22.9878C4 22.3635 4.50609 21.8574 5.13043 21.8574C5.75478 21.8574 6.26087 22.3635 6.26087 22.9878V25.7722C6.26087 26.713 7.02605 27.4765 7.96518 27.4765C8.90431 27.4765 9.66954 26.7113 9.66954 25.7722V18.0904C9.66954 15.9044 11.4487 14.1252 13.6348 14.1252C15.8209 14.1252 17.6 15.9044 17.6 18.0904V36.2139C17.6 37.1548 18.3652 37.9183 19.3043 37.9183C20.2435 37.9183 21.0087 37.153 21.0087 36.2139V25.1322V7.96523C21.0087 5.77914 22.7878 4 24.9739 4C27.16 4 28.9391 5.77914 28.9391 7.96523V31.3565C28.9391 31.9809 28.433 32.487 27.8087 32.487C27.1843 32.487 26.6782 31.9809 26.6782 31.3565V7.96523ZM47.6539 14.1252C45.4678 14.1252 43.6887 15.9044 43.6887 18.0904V33.2296C43.6887 34.1704 42.9235 34.9339 41.9843 34.9339C41.0452 34.9339 40.28 34.1687 40.28 33.2296V7.96523C40.28 5.77914 38.5008 4 36.3148 4C34.1287 4 32.3496 5.77914 32.3496 7.96523V40.0348C32.3496 40.9756 31.5843 41.7391 30.6452 41.7391C29.7061 41.7391 28.9409 40.9739 28.9409 40.0348V36.0643C28.9409 35.44 28.4348 34.9339 27.8104 34.9339C27.1861 34.9339 26.68 35.44 26.68 36.0643V40.0348C26.68 42.2209 28.4591 44 30.6452 44C32.8313 44 34.6104 42.2209 34.6104 40.0348V7.96523C34.6104 7.02436 35.3756 6.26087 36.3148 6.26087C37.2539 6.26087 38.0191 7.0261 38.0191 7.96523V33.2296C38.0191 35.4156 39.7982 37.1948 41.9843 37.1948C44.1704 37.1948 45.9496 35.4156 45.9496 33.2296V18.0904C45.9496 17.1496 46.7148 16.3861 47.6539 16.3861C48.593 16.3861 49.3582 17.1513 49.3582 18.0904V31.3565C49.3582 31.9809 49.8643 32.487 50.4887 32.487C51.113 32.487 51.6191 31.9809 51.6191 31.3565V18.0904C51.6191 15.9044 49.84 14.1252 47.6539 14.1252Z" fill="url(#paint0_linear_17_483)"/>
82
+ <path d="M68.7671 16.5615H71.2541C71.3254 16.5615 71.3845 16.5859 71.435 16.6363C71.4836 16.6868 71.5097 16.7459 71.5097 16.8172V31.1824C71.5097 31.2537 71.4854 31.3128 71.435 31.3633C71.3845 31.4137 71.3254 31.4381 71.2541 31.4381H68.7671C68.6958 31.4381 68.6367 31.4137 68.5862 31.3633C68.5358 31.3146 68.5115 31.2537 68.5115 31.1824V21.812C68.5115 21.7563 68.4976 21.7268 68.4697 21.7268C68.4419 21.7268 68.4123 21.7476 68.3845 21.7911L66.1323 25.318C66.061 25.4311 65.9619 25.4885 65.8349 25.4885H64.581C64.4541 25.4885 64.3549 25.4328 64.2836 25.318L62.0315 21.7911C62.0036 21.7494 61.9741 21.7302 61.9462 21.7372C61.9184 21.7441 61.9045 21.7772 61.9045 21.8328V31.1824C61.9045 31.2537 61.8802 31.3128 61.8297 31.3633C61.7793 31.4137 61.7202 31.4381 61.6489 31.4381H59.1619C59.0906 31.4381 59.0315 31.4137 58.981 31.3633C58.9306 31.3146 58.9062 31.2537 58.9062 31.1824V16.8172C58.9062 16.7459 58.9306 16.6868 58.981 16.6363C59.0315 16.5859 59.0906 16.5615 59.1619 16.5615H61.6489C61.7758 16.5615 61.8749 16.6189 61.9462 16.732L65.1341 21.6833C65.1758 21.7685 65.2193 21.7685 65.261 21.6833L68.4697 16.732C68.541 16.6189 68.6402 16.5615 68.7671 16.5615Z" fill="currentColor"/>
83
+ <path d="M74.1764 31.3633C74.1259 31.3146 74.1016 31.2537 74.1016 31.1824V16.8172C74.1016 16.7459 74.1259 16.6868 74.1764 16.6363C74.2268 16.5859 74.2859 16.5615 74.3572 16.5615H76.8442C76.9155 16.5615 76.9746 16.5859 77.0251 16.6363C77.0737 16.6868 77.0998 16.7459 77.0998 16.8172V31.1824C77.0998 31.2537 77.0755 31.3128 77.0251 31.3633C76.9746 31.4137 76.9155 31.4381 76.8442 31.4381H74.3572C74.2859 31.4381 74.2268 31.4137 74.1764 31.3633Z" fill="currentColor"/>
84
+ <path d="M88.3066 16.6361C88.3553 16.5874 88.4162 16.5613 88.4875 16.5613H90.9744C91.0457 16.5613 91.1049 16.5857 91.1553 16.6361C91.204 16.6865 91.2301 16.7457 91.2301 16.817V31.1822C91.2301 31.2535 91.2057 31.3126 91.1553 31.363C91.1049 31.4135 91.0457 31.4378 90.9744 31.4378H88.5727C88.4301 31.4378 88.331 31.3822 88.2753 31.2674L82.771 22.1717C82.7431 22.13 82.7136 22.1109 82.6858 22.1178C82.6579 22.1248 82.644 22.1578 82.644 22.2135L82.6858 31.1805C82.6858 31.2518 82.6614 31.3109 82.611 31.3613C82.5606 31.4117 82.5014 31.4361 82.4301 31.4361H79.9431C79.8718 31.4361 79.8127 31.4117 79.7623 31.3613C79.7118 31.3126 79.6875 31.2518 79.6875 31.1805V16.8152C79.6875 16.7439 79.7118 16.6848 79.7623 16.6344C79.8127 16.5839 79.8718 16.5596 79.9431 16.5596H82.3449C82.4858 16.5596 82.5849 16.617 82.6423 16.73L88.124 25.7822C88.1518 25.8239 88.1797 25.8431 88.2092 25.8361C88.2371 25.8292 88.251 25.7978 88.251 25.7404L88.2301 16.8152C88.2301 16.7439 88.2545 16.6848 88.3049 16.6344L88.3066 16.6361Z" fill="currentColor"/>
85
+ <path d="M93.8951 31.3633C93.8446 31.3146 93.8203 31.2537 93.8203 31.1824V16.8172C93.8203 16.7459 93.8446 16.6868 93.8951 16.6363C93.9455 16.5859 94.0047 16.5615 94.076 16.5615H96.5629C96.6342 16.5615 96.6934 16.5859 96.7438 16.6363C96.7925 16.6868 96.8186 16.7459 96.8186 16.8172V31.1824C96.8186 31.2537 96.7942 31.3128 96.7438 31.3633C96.6934 31.4137 96.6342 31.4381 96.5629 31.4381H94.076C94.0047 31.4381 93.9455 31.4137 93.8951 31.3633Z" fill="currentColor"/>
86
+ <path d="M109.267 16.5615H111.754C111.825 16.5615 111.885 16.5859 111.935 16.6363C111.984 16.6868 112.01 16.7459 112.01 16.8172V31.1824C112.01 31.2537 111.985 31.3128 111.935 31.3633C111.885 31.4137 111.825 31.4381 111.754 31.4381H109.267C109.196 31.4381 109.137 31.4137 109.086 31.3633C109.036 31.3146 109.011 31.2537 109.011 31.1824V21.812C109.011 21.7563 108.998 21.7268 108.97 21.7268C108.942 21.7268 108.912 21.7476 108.885 21.7911L106.632 25.318C106.561 25.4311 106.462 25.4885 106.335 25.4885H105.081C104.954 25.4885 104.855 25.4328 104.784 25.318L102.531 21.7911C102.504 21.7494 102.474 21.7302 102.446 21.7372C102.418 21.7441 102.405 21.7772 102.405 21.8328V31.1824C102.405 31.2537 102.38 31.3128 102.33 31.3633C102.279 31.4137 102.22 31.4381 102.149 31.4381H99.6619C99.5906 31.4381 99.5315 31.4137 99.481 31.3633C99.4306 31.3146 99.4062 31.2537 99.4062 31.1824V16.8172C99.4062 16.7459 99.4306 16.6868 99.481 16.6363C99.5315 16.5859 99.5906 16.5615 99.6619 16.5615H102.149C102.276 16.5615 102.375 16.6189 102.446 16.732L105.634 21.6833C105.676 21.7685 105.719 21.7685 105.761 21.6833L108.97 16.732C109.041 16.6189 109.14 16.5615 109.267 16.5615Z" fill="currentColor"/>
87
+ <path d="M123.782 31.2241L123.144 29.1424C123.116 29.0867 123.079 29.0572 123.038 29.0572H117.81C117.768 29.0572 117.732 29.085 117.704 29.1424L117.088 31.2241C117.046 31.3668 116.954 31.4363 116.812 31.4363H114.112C114.027 31.4363 113.963 31.412 113.921 31.3615C113.879 31.3128 113.871 31.2381 113.9 31.1389L118.49 16.7737C118.532 16.6328 118.624 16.5615 118.766 16.5615H122.102C122.243 16.5615 122.335 16.6328 122.379 16.7737L126.968 31.1389C126.982 31.1668 126.989 31.2033 126.989 31.245C126.989 31.372 126.911 31.4363 126.756 31.4363H124.057C123.916 31.4363 123.824 31.365 123.78 31.2241H123.782ZM118.554 26.7407H122.295C122.38 26.7407 122.408 26.6989 122.38 26.6137L120.467 20.3024C120.453 20.2467 120.432 20.2207 120.403 20.2276C120.375 20.2346 120.352 20.2589 120.339 20.3024L118.469 26.6137C118.455 26.6989 118.483 26.7407 118.554 26.7407Z" fill="currentColor"/>
88
+ <path d="M128.222 31.353C128.18 31.2974 128.187 31.2261 128.243 31.1409L132.365 24.0643C132.393 24.0226 132.393 23.9791 132.365 23.9374L128.243 16.8609L128.201 16.7339C128.201 16.6209 128.28 16.5635 128.434 16.5635H131.133C131.274 16.5635 131.38 16.6209 131.452 16.7339L134.213 21.6C134.255 21.6852 134.299 21.6852 134.34 21.6L137.102 16.7339C137.173 16.6209 137.28 16.5635 137.42 16.5635H140.099C140.198 16.5635 140.269 16.5913 140.311 16.6487C140.353 16.7061 140.346 16.7756 140.29 16.8609L136.168 23.9374C136.154 23.9791 136.154 24.0226 136.168 24.0643L140.29 31.1409L140.332 31.2678C140.332 31.3809 140.253 31.4383 140.099 31.4383H137.42C137.278 31.4383 137.172 31.3826 137.102 31.2678L134.34 26.4226C134.299 26.3374 134.255 26.3374 134.213 26.4226L131.429 31.2678C131.358 31.3809 131.252 31.4383 131.111 31.4383H128.433C128.333 31.4383 128.262 31.4104 128.22 31.353H128.222Z" fill="currentColor"/>
89
+ <defs>
90
+ <linearGradient id="paint0_linear_17_483" x1="3.99826" y1="24" x2="51.6208" y2="24" gradientUnits="userSpaceOnUse">
91
+ <stop stop-color="#E21680"/>
92
+ <stop offset="1" stop-color="#FF633A"/>
93
+ </linearGradient>
94
+ </defs>
95
+ </svg>
96
+
97
+ </div>
98
+ <hr>
99
+
100
+ <div align="center" style="line-height: 1.4; font-size:16px; margin-top: 30px;">
101
+ Join Our
102
+ <a href="https://platform.minimaxi.com/docs/faq/contact-us" target="_blank" style="font-size:17px; margin: 2px;">
103
+ 💬 WeChat
104
+ </a> |
105
+ <a href="https://discord.com/invite/hvvt8hAye6" target="_blank" style="font-size:17px; margin: 2px;">
106
+ 🧩 Discord
107
+ </a>
108
+ community.
109
+ </div>
110
+ <div align="center" style="line-height: 1.2; font-size:16px;">
111
+ <a href="https://agent.minimax.io/" target="_blank" style="display: inline-block; margin: 4px;">
112
+ MiniMax Agent
113
+ </a> |
114
+ <a href="https://platform.minimax.io/docs/guides/text-generation" target="_blank" style="display: inline-block; margin: 4px;">
115
+ ⚡️ API
116
+ </a> |
117
+ <a href="https://github.com/MiniMax-AI/MiniMax-MCP" style="display: inline-block; margin: 4px;">
118
+ MCP
119
+ </a> |
120
+ <a href="https://www.minimax.io" target="_blank" style="display: inline-block; margin: 4px;">
121
+ MiniMax Website
122
+ </a>
123
+ </div>
124
+ <div align="center" style="line-height: 1.2; font-size:16px; margin-bottom: 30px;">
125
+ <a href="https://huggingface.co/MiniMaxAI" target="_blank" style="margin: 2px;">
126
+ 🤗 Hugging Face
127
+ </a> |
128
+ <a href="https://github.com/MiniMax-AI/MiniMax-M2.1" target="_blank" style="margin: 2px;">
129
+ 🐙 GitHub
130
+ </a> |
131
+ <a href="https://www.modelscope.cn/organization/MiniMax" target="_blank" style="margin: 2px;">
132
+ 🤖️ ModelScope
133
+ </a> |
134
+ <a href="https://github.com/MiniMax-AI/MiniMax-M2.1/blob/main/LICENSE" style="margin: 2px;">
135
+ 📄 License: Modified-MIT
136
+ </a>
137
+ </div>
138
+
139
+ # Meet MiniMax-M2.1
140
+
141
+ Today, we are handing **MiniMax-M2.1** over to the open-source community. This release is more than just a parameter update; it is a significant step toward democratizing top-tier agentic capabilities.
142
+
143
+ M2.1 was built to shatter the stereotype that high-performance agents must remain behind closed doors. We have optimized the model specifically for robustness in coding, tool use, instruction following, and long-horizon planning. From automating multilingual software development to executing complex, multi-step office workflows, MiniMax-M2.1 empowers developers to build the next generation of autonomous applications—all while being fully transparent, controllable, and accessible.
144
+
145
+ We believe true intelligence should be within reach. M2.1 is our commitment to the future, and a powerful new tool in your hands.
146
+
147
+ <p align="center">
148
+ <img width="100%" src="figures/bench.png">
149
+ </p>
150
+
151
+ ## How to Use
152
+
153
+ - The MiniMax-M2.1 API is now live on the MiniMax Open Platform: https://platform.minimax.io/docs/guides/text-generation
154
+ - Our product MiniMax Agent, built on MiniMax-M2.1, is now publicly available: https://agent.minimax.io/
155
+ - The MiniMax-M2.1 model weights are now open-source, allowing for local deployment and use: https://huggingface.co/MiniMaxAI/MiniMax-M2.1
156
+
157
+ ## Benchmarks
158
+
159
+ MiniMax-M2.1 delivers a significant leap over M2 on core software engineering leaderboards. It shines particularly bright in multilingual scenarios, where it outperforms Claude Sonnet 4.5 and closely approaches Claude Opus 4.5.
160
+
161
+ | Benchmark | MiniMax-M2.1 | MiniMax-M2 | Claude Sonnet 4.5 | Claude Opus 4.5 | Gemini 3 Pro | GPT-5.2 (thinking) | DeepSeek V3.2 |
162
+ | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- |
163
+ | SWE-bench Verified | 74.0 | 69.4 | 77.2 | 80.9 | 78.0 | 80.0 | 73.1 |
164
+ | Multi-SWE-bench | 49.4 | 36.2 | 44.3 | 50.0 | 42.7 | x | 37.4 |
165
+ | SWE-bench Multilingual | 72.5 | 56.5 | 68 | 77.5 | 65.0 | 72.0 | 70.2 |
166
+ | Terminal-bench 2.0 | 47.9 | 30.0 | 50.0 | 57.8 | 54.2 | 54.0 | 46.4 |
167
+
168
+ We also evaluated MiniMax-M2.1 on SWE-bench Verified across a variety of coding agent frameworks. The results highlight the model's exceptional framework generalization and robust stability.
169
+
170
+ Furthermore, across specific benchmarks—including test case generation, code performance optimization, code review, and instruction following—MiniMax-M2.1 demonstrates comprehensive improvements over M2. In these specialized domains, it consistently matches or exceeds the performance of Claude Sonnet 4.5.
171
+
172
+ | Benchmark | MiniMax-M2.1 | MiniMax-M2 | Claude Sonnet 4.5 | Claude Opus 4.5 | Gemini 3 Pro | GPT-5.2 (thinking) | DeepSeek V3.2 |
173
+ | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- |
174
+ | SWE-bench Verified (Droid) | 71.3 | 68.1 | 72.3 | 75.2 | x | x | 67.0 |
175
+ | SWE-bench Verified (mini-swe-agent) | 67.0 | 61.0 | 70.6 | 74.4 | 71.8 | 74.2 | 60.0 |
176
+ | SWT-bench | 69.3 | 32.8 | 69.5 | 80.2 | 79.7 | 80.7 | 62.0 |
177
+ | SWE-Perf | 3.1 | 1.4 | 3.0 | 4.7 | 6.5 | 3.6 | 0.9 |
178
+ | SWE-Review | 8.9 | 3.4 | 10.5 | 16.2 | x | x | 6.4 |
179
+ | OctoCodingbench | 26.1 | 13.3 | 22.8 | 36.2 | 22.9 | x | 26.0 |
180
+
181
+ To evaluate the model's full-stack capability to architect complete, functional applications "from zero to one," we established a novel benchmark: [VIBE (Visual & Interactive Benchmark for Execution in Application Development)](https://huggingface.co/datasets/MiniMaxAI/VIBE). This suite encompasses five core subsets: Web, Simulation, Android, iOS, and Backend. Distinguishing itself from traditional benchmarks, VIBE leverages an innovative Agent-as-a-Verifier (AaaV) paradigm to automatically assess the interactive logic and visual aesthetics of generated applications within a real runtime environment.
182
+
183
+ MiniMax-M2.1 delivers outstanding performance on the VIBE aggregate benchmark, achieving an average score of 88.6—demonstrating robust full-stack development capabilities. It excels particularly in the VIBE-Web (91.5) and VIBE-Android (89.7) subsets.
184
+
185
+ | Benchmark | MiniMax-M2.1 | MiniMax-M2 | Claude Sonnet 4.5 | Claude Opus 4.5 | Gemini 3 Pro |
186
+ | ----- | ----- | ----- | ----- | ----- | ----- |
187
+ | VIBE (Average) | 88.6 | 67.5 | 85.2 | 90.7 | 82.4 |
188
+ | VIBE-Web | 91.5 | 80.4 | 87.3 | 89.1 | 89.5 |
189
+ | VIBE-Simulation | 87.1 | 77.0 | 79.1 | 84.0 | 89.2 |
190
+ | VIBE-Android | 89.7 | 69.2 | 87.5 | 92.2 | 78.7 |
191
+ | VIBE-iOS | 88.0 | 39.5 | 81.2 | 90.0 | 75.8 |
192
+ | VIBE-Backend | 86.7 | 67.8 | 90.8 | 98.0 | 78.7 |
193
+
194
+ MiniMax-M2.1 also demonstrates steady improvements over M2 in both long-horizon tool use and comprehensive intelligence metrics.
195
+
196
+ | Benchmark | MiniMax-M2.1 | MiniMax-M2 | Claude Sonnet 4.5 | Claude Opus 4.5 | Gemini 3 Pro | GPT-5.2 (thinking) | DeepSeek V3.2 |
197
+ | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- |
198
+ | Toolathlon | 43.5 | 16.7 | 38.9 | 43.5 | 36.4 | 41.7 | 35.2 |
199
+ | BrowseComp | 47.4 | 44.0 | 19.6 | 37.0 | 37.8 | 65.8 | 51.4 |
200
+ | BrowseComp (context management) | 62.0 | 56.9 | 26.1 | 57.8 | 59.2 | 70.0 | 67.6 |
201
+ | AIME25 | 83.0 | 78.0 | 88.0 | 91.0 | 96.0 | 98.0 | 92.0 |
202
+ | MMLU-Pro | 88.0 | 82.0 | 88.0 | 90.0 | 90.0 | 87.0 | 86.0 |
203
+ | GPQA-D | 83.0 | 78.0 | 83.0 | 87.0 | 91.0 | 90.0 | 84.0 |
204
+ | HLE w/o tools | 22.2 | 12.5 | 17.3 | 28.4 | 37.2 | 31.4 | 22.2 |
205
+ | LCB | 81.0 | 83.0 | 71.0 | 87.0 | 92.0 | 89.0 | 86.0 |
206
+ | SciCode | 41.0 | 36.0 | 45.0 | 50.0 | 56.0 | 52.0 | 39.0 |
207
+ | IFBench | 70.0 | 72.0 | 57.0 | 58.0 | 70.0 | 75.0 | 61.0 |
208
+ | AA-LCR | 62.0 | 61.0 | 66.0 | 74.0 | 71.0 | 73.0 | 65.0 |
209
+ | 𝜏²-Bench Telecom | 87.0 | 87.0 | 78.0 | 90.0 | 87.0 | 85.0 | 91.0 |
210
+
211
+ > **Evaluation Methodology Notes**:
212
+ > - **SWE-bench Verified**: Tested on internal infrastructure using [Claude Code](https://github.com/anthropics/claude-code), [Droid](https://factory.ai/), or [mini-swe-agent](https://github.com/SWE-agent/mini-SWE-agent) as scaffolding. By default, we utilized Claude Code metrics. When using Claude Code, the default system prompt was overridden. Results represent the average of 4 runs.
213
+ > - **Multi-SWE-Bench & SWE-bench Multilingual & SWT-bench & SWE-Perf**: Tested on internal infrastructure using Claude Code as scaffolding, with the default system prompt overridden. Results represent the average of 4 runs.
214
+ > - **Terminal-bench 2.0**: Tested using Claude Code on our internal evaluation framework. We verified the full dataset and fixed environmental issues. Timeout limits were removed, while all other configurations remained consistent with official settings. Results represent the average of 4 runs.
215
+ > - **SWE Review**: Built upon the SWE framework, this internal benchmark for code defect review covers diverse languages and scenarios, evaluating both defect recall and hallucination rates. A review is deemed "correct" only if the model accurately identifies the target defect and ensures all other reported findings are valid and free of hallucinations. All evaluations are executed using Claude Code, with final results reflecting the average of four independent runs per test case. We plan to open-source this benchmark soon.
216
+ > - **OctoCodingbench**: An internal benchmark focused on long-horizon instruction following for Code Agents in complex development scenarios. It conducts end-to-end behavioral supervision within a dynamic environment spanning diverse tech stacks and scaffolding frameworks. The core objective is to evaluate the model's ability to integrate and execute "composite instruction constraints"—encompassing System Prompts (SP), User Queries, Memory, Tool Schemas, and specifications such as `Agents.md`, `Claude.md`, and `Skill.md`. Adopting a strict "single-violation-failure" scoring mechanism, the final result is the average pass rate across 4 runs, quantifying the model's robustness in translating static constraints into precise behaviors. We plan to open-source this benchmark soon.
217
+ > - **VIBE**: An internal benchmark that utilizes Claude Code as scaffolding to automatically verify a program's interactive logic and visual effects. Scores are calculated through a unified pipeline comprising requirement sets, containerized deployment, and dynamic interaction environments. Final results represent the average of 3 runs. We have open-sourced this benchmark at [VIBE](https://huggingface.co/datasets/MiniMaxAI/VIBE).
218
+ > - **Toolathlon**: The evaluation protocol remains consistent with the original paper.
219
+ > - **BrowseComp**: All scores were obtained using the same agent framework as [WebExplorer](https://arxiv.org/pdf/2509.06501) (Liu et al. 2025), with only minor fine-tuning of tool descriptions. We utilized the same 103-sample GAIA text-only validation subset as WebExplorer.
220
+ > - **BrowseComp (context management)**: When token usage exceeds 30% of the maximum context window, we retain the first AI response, the last five AI responses, and the tool outputs, discarding the remaining content.
221
+ > - **AIME25 ~ 𝜏²-Bench Telecom**: Derived from internal testing based on the evaluation datasets and methodology referenced in the [Artificial Analysis Intelligence Index](https://artificialanalysis.ai/).
222
+
223
+ ## Local Deployment Guide
224
+
225
+ Download the model from HuggingFace repository: https://huggingface.co/MiniMaxAI/MiniMax-M2.1
226
+
227
+ We recommend using the following inference frameworks (listed alphabetically) to serve the model:
228
+
229
+ ### SGLang
230
+
231
+ We recommend using [SGLang](https://docs.sglang.io/) to serve MiniMax-M2.1. Please refer to our [SGLang Deployment Guide](./docs/sglang_deploy_guide.md).
232
+
233
+ ### vLLM
234
+
235
+ We recommend using [vLLM](https://github.com/vllm-project/vllm) to serve MiniMax-M2.1. Please refer to our [vLLM Deployment Guide](./docs/vllm_deploy_guide.md).
236
+
237
+ ### Transformers
238
+
239
+ We recommend using [Transformers](https://github.com/huggingface/transformers) to serve MiniMax-M2.1. Please refer to our [Transformers Deployment Guide](./docs/transformers_deploy_guide.md).
240
+
241
+ ### Other Inference Engines
242
+
243
+ - [KTransformers](https://github.com/kvcache-ai/ktransformers/blob/main/doc/en/kt-kernel/MiniMax-M2.1-Tutorial.md)
244
+
245
+ ### Inference Parameters
246
+
247
+ We recommend using the following parameters for best performance: `temperature=1.0`, `top_p = 0.95`, `top_k = 40`. Default system prompt:
248
+
249
+ ```
250
+ You are a helpful assistant. Your name is MiniMax-M2.1 and is built by MiniMax.
251
+ ```
252
+
253
+ ## Tool Calling Guide
254
+
255
+ Please refer to our [Tool Calling Guide](./docs/tool_calling_guide.md).
256
+
257
+ ## Contact Us
258
+
259
+ Contact us at [model@minimax.io](mailto:model@minimax.io).
260
+
.ipynb_checkpoints/config-checkpoint.json ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name_or_path": "tclf90/MiniMax-M2.1-AWQ-FP16Mix2",
3
+ "architectures": [
4
+ "MiniMaxM2ForCausalLM"
5
+ ],
6
+ "attn_type_list": [
7
+ 1,
8
+ 1,
9
+ 1,
10
+ 1,
11
+ 1,
12
+ 1,
13
+ 1,
14
+ 1,
15
+ 1,
16
+ 1,
17
+ 1,
18
+ 1,
19
+ 1,
20
+ 1,
21
+ 1,
22
+ 1,
23
+ 1,
24
+ 1,
25
+ 1,
26
+ 1,
27
+ 1,
28
+ 1,
29
+ 1,
30
+ 1,
31
+ 1,
32
+ 1,
33
+ 1,
34
+ 1,
35
+ 1,
36
+ 1,
37
+ 1,
38
+ 1,
39
+ 1,
40
+ 1,
41
+ 1,
42
+ 1,
43
+ 1,
44
+ 1,
45
+ 1,
46
+ 1,
47
+ 1,
48
+ 1,
49
+ 1,
50
+ 1,
51
+ 1,
52
+ 1,
53
+ 1,
54
+ 1,
55
+ 1,
56
+ 1,
57
+ 1,
58
+ 1,
59
+ 1,
60
+ 1,
61
+ 1,
62
+ 1,
63
+ 1,
64
+ 1,
65
+ 1,
66
+ 1,
67
+ 1,
68
+ 1
69
+ ],
70
+ "auto_map": {
71
+ "AutoConfig": "configuration_minimax_m2.MiniMaxM2Config",
72
+ "AutoModelForCausalLM": "modeling_minimax_m2.MiniMaxM2ForCausalLM"
73
+ },
74
+ "head_dim": 128,
75
+ "hidden_act": "silu",
76
+ "hidden_size": 3072,
77
+ "intermediate_size": 1536,
78
+ "max_position_embeddings": 196608,
79
+ "model_type": "minimax_m2",
80
+ "mtp_transformer_layers": 1,
81
+ "num_attention_heads": 48,
82
+ "num_experts_per_tok": 8,
83
+ "num_hidden_layers": 62,
84
+ "num_key_value_heads": 8,
85
+ "num_local_experts": 256,
86
+ "num_mtp_modules": 3,
87
+ "qk_norm_type": "per_layer",
88
+ "quantization_config": {
89
+ "quant_method": "awq",
90
+ "bits": 4,
91
+ "group_size": 128,
92
+ "version": "gemm",
93
+ "zero_point": true,
94
+ "modules_to_not_convert": [
95
+ "self_attn"
96
+ ]
97
+ },
98
+ "rms_norm_eps": 1e-06,
99
+ "rope_theta": 5000000,
100
+ "rotary_dim": 64,
101
+ "scoring_func": "sigmoid",
102
+ "shared_intermediate_size": 0,
103
+ "tie_word_embeddings": false,
104
+ "transformers_version": "4.46.1",
105
+ "use_cache": true,
106
+ "use_mtp": true,
107
+ "use_qk_norm": true,
108
+ "use_routing_bias": true,
109
+ "vocab_size": 200064,
110
+ "torch_dtype": "float16"
111
+ }
.mdl ADDED
Binary file (48 Bytes). View file
 
.msc ADDED
Binary file (5.45 kB). View file
 
.mv ADDED
@@ -0,0 +1 @@
 
 
1
+ Revision:master,CreatedAt:1766848785
README.md ADDED
@@ -0,0 +1,260 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ license_name: modified-mit
4
+ license_link: https://github.com/MiniMax-AI/MiniMax-M2.1/blob/main/LICENSE
5
+ library_name: transformers
6
+ pipeline_tag: text-generation
7
+ tags:
8
+ - vLLM
9
+ - AWQ
10
+ base_model:
11
+ - MiniMaxAI/MiniMax-M2.1
12
+ base_model_relation: quantized
13
+
14
+ ---
15
+ # MiniMax-M2.1-AWQ
16
+ Base model: [MiniMaxAI/MiniMax-M2.1](https://huggingface.co/MiniMaxAI/MiniMax-M2.1)
17
+
18
+ ### 【Dependencies / Installation】
19
+
20
+ ```python
21
+ vllm==0.13.0
22
+ ```
23
+
24
+ As of **2025-12-27**, make sure your system has cuda12.8 installed.
25
+
26
+ Then, create a fresh Python environment (e.g. python3.12 venv) and run:
27
+ ```bash
28
+ pip install -U vllm==0.13.0
29
+ ```
30
+ [vLLM Official Guide](https://docs.vllm.ai/projects/recipes/en/latest/MiniMax/MiniMax-M2.html)
31
+
32
+
33
+ ### 【vLLM Startup Command】
34
+ <i>Note: When launching with TP=8, include `--enable-expert-parallel`;
35
+ otherwise the expert tensors wouldn’t be evenly sharded across GPU devices.</i>
36
+
37
+ ```
38
+ export VLLM_USE_DEEP_GEMM=0
39
+ export VLLM_USE_FLASHINFER_MOE_FP16=1
40
+ export VLLM_USE_FLASHINFER_SAMPLER=0
41
+ export OMP_NUM_THREADS=4
42
+
43
+ vllm serve \
44
+ __YOUR_PATH__/tclf90/MiniMax-M2.1-AWQ \
45
+ --served-model-name MY_MODEL \
46
+ --swap-space 16 \
47
+ --max-num-seqs 32 \
48
+ --max-model-len 32768 \
49
+ --gpu-memory-utilization 0.9 \
50
+ --tensor-parallel-size 8 \
51
+ --enable-expert-parallel \
52
+ --enable-auto-tool-choice \
53
+ --tool-call-parser minimax_m2 \
54
+ --reasoning-parser minimax_m2_append_think \
55
+ --trust-remote-code \
56
+ --host 0.0.0.0 \
57
+ --port 8000
58
+ ```
59
+
60
+ ### 【Logs】
61
+ ```
62
+ 2025-12-27
63
+ 1. Initial commit
64
+ ```
65
+
66
+ ### 【Model Files】
67
+ | File Size | Last Updated |
68
+ |-----------|--------------|
69
+ | `117GiB` | `2025-12-27` |
70
+
71
+ ### 【Model Download】
72
+ ```python
73
+ from modelscope import snapshot_download
74
+ snapshot_download('tclf90/MiniMax-M2.1-AWQ', cache_dir="your_local_path")
75
+ ```
76
+
77
+ ### 【Overview】
78
+ <div align="center">
79
+
80
+ <svg width="60%" height="auto" viewBox="0 0 144 48" fill="none" xmlns="http://www.w3.org/2000/svg">
81
+ <path d="M26.6782 7.96523C26.6782 7.02436 25.913 6.26087 24.9739 6.26087C24.0348 6.26087 23.2695 7.0261 23.2695 7.96523V36.2139C23.2695 38.4 21.4904 40.1791 19.3043 40.1791C17.1183 40.1791 15.3391 38.4 15.3391 36.2139V18.0904C15.3391 17.1496 14.5739 16.3861 13.6348 16.3861C12.6956 16.3861 11.9304 17.1513 11.9304 18.0904V25.7722C11.9304 27.9583 10.1513 29.7374 7.96518 29.7374C5.7791 29.7374 4 27.9583 4 25.7722V22.9878C4 22.3635 4.50609 21.8574 5.13043 21.8574C5.75478 21.8574 6.26087 22.3635 6.26087 22.9878V25.7722C6.26087 26.713 7.02605 27.4765 7.96518 27.4765C8.90431 27.4765 9.66954 26.7113 9.66954 25.7722V18.0904C9.66954 15.9044 11.4487 14.1252 13.6348 14.1252C15.8209 14.1252 17.6 15.9044 17.6 18.0904V36.2139C17.6 37.1548 18.3652 37.9183 19.3043 37.9183C20.2435 37.9183 21.0087 37.153 21.0087 36.2139V25.1322V7.96523C21.0087 5.77914 22.7878 4 24.9739 4C27.16 4 28.9391 5.77914 28.9391 7.96523V31.3565C28.9391 31.9809 28.433 32.487 27.8087 32.487C27.1843 32.487 26.6782 31.9809 26.6782 31.3565V7.96523ZM47.6539 14.1252C45.4678 14.1252 43.6887 15.9044 43.6887 18.0904V33.2296C43.6887 34.1704 42.9235 34.9339 41.9843 34.9339C41.0452 34.9339 40.28 34.1687 40.28 33.2296V7.96523C40.28 5.77914 38.5008 4 36.3148 4C34.1287 4 32.3496 5.77914 32.3496 7.96523V40.0348C32.3496 40.9756 31.5843 41.7391 30.6452 41.7391C29.7061 41.7391 28.9409 40.9739 28.9409 40.0348V36.0643C28.9409 35.44 28.4348 34.9339 27.8104 34.9339C27.1861 34.9339 26.68 35.44 26.68 36.0643V40.0348C26.68 42.2209 28.4591 44 30.6452 44C32.8313 44 34.6104 42.2209 34.6104 40.0348V7.96523C34.6104 7.02436 35.3756 6.26087 36.3148 6.26087C37.2539 6.26087 38.0191 7.0261 38.0191 7.96523V33.2296C38.0191 35.4156 39.7982 37.1948 41.9843 37.1948C44.1704 37.1948 45.9496 35.4156 45.9496 33.2296V18.0904C45.9496 17.1496 46.7148 16.3861 47.6539 16.3861C48.593 16.3861 49.3582 17.1513 49.3582 18.0904V31.3565C49.3582 31.9809 49.8643 32.487 50.4887 32.487C51.113 32.487 51.6191 31.9809 51.6191 31.3565V18.0904C51.6191 15.9044 49.84 14.1252 47.6539 14.1252Z" fill="url(#paint0_linear_17_483)"/>
82
+ <path d="M68.7671 16.5615H71.2541C71.3254 16.5615 71.3845 16.5859 71.435 16.6363C71.4836 16.6868 71.5097 16.7459 71.5097 16.8172V31.1824C71.5097 31.2537 71.4854 31.3128 71.435 31.3633C71.3845 31.4137 71.3254 31.4381 71.2541 31.4381H68.7671C68.6958 31.4381 68.6367 31.4137 68.5862 31.3633C68.5358 31.3146 68.5115 31.2537 68.5115 31.1824V21.812C68.5115 21.7563 68.4976 21.7268 68.4697 21.7268C68.4419 21.7268 68.4123 21.7476 68.3845 21.7911L66.1323 25.318C66.061 25.4311 65.9619 25.4885 65.8349 25.4885H64.581C64.4541 25.4885 64.3549 25.4328 64.2836 25.318L62.0315 21.7911C62.0036 21.7494 61.9741 21.7302 61.9462 21.7372C61.9184 21.7441 61.9045 21.7772 61.9045 21.8328V31.1824C61.9045 31.2537 61.8802 31.3128 61.8297 31.3633C61.7793 31.4137 61.7202 31.4381 61.6489 31.4381H59.1619C59.0906 31.4381 59.0315 31.4137 58.981 31.3633C58.9306 31.3146 58.9062 31.2537 58.9062 31.1824V16.8172C58.9062 16.7459 58.9306 16.6868 58.981 16.6363C59.0315 16.5859 59.0906 16.5615 59.1619 16.5615H61.6489C61.7758 16.5615 61.8749 16.6189 61.9462 16.732L65.1341 21.6833C65.1758 21.7685 65.2193 21.7685 65.261 21.6833L68.4697 16.732C68.541 16.6189 68.6402 16.5615 68.7671 16.5615Z" fill="currentColor"/>
83
+ <path d="M74.1764 31.3633C74.1259 31.3146 74.1016 31.2537 74.1016 31.1824V16.8172C74.1016 16.7459 74.1259 16.6868 74.1764 16.6363C74.2268 16.5859 74.2859 16.5615 74.3572 16.5615H76.8442C76.9155 16.5615 76.9746 16.5859 77.0251 16.6363C77.0737 16.6868 77.0998 16.7459 77.0998 16.8172V31.1824C77.0998 31.2537 77.0755 31.3128 77.0251 31.3633C76.9746 31.4137 76.9155 31.4381 76.8442 31.4381H74.3572C74.2859 31.4381 74.2268 31.4137 74.1764 31.3633Z" fill="currentColor"/>
84
+ <path d="M88.3066 16.6361C88.3553 16.5874 88.4162 16.5613 88.4875 16.5613H90.9744C91.0457 16.5613 91.1049 16.5857 91.1553 16.6361C91.204 16.6865 91.2301 16.7457 91.2301 16.817V31.1822C91.2301 31.2535 91.2057 31.3126 91.1553 31.363C91.1049 31.4135 91.0457 31.4378 90.9744 31.4378H88.5727C88.4301 31.4378 88.331 31.3822 88.2753 31.2674L82.771 22.1717C82.7431 22.13 82.7136 22.1109 82.6858 22.1178C82.6579 22.1248 82.644 22.1578 82.644 22.2135L82.6858 31.1805C82.6858 31.2518 82.6614 31.3109 82.611 31.3613C82.5606 31.4117 82.5014 31.4361 82.4301 31.4361H79.9431C79.8718 31.4361 79.8127 31.4117 79.7623 31.3613C79.7118 31.3126 79.6875 31.2518 79.6875 31.1805V16.8152C79.6875 16.7439 79.7118 16.6848 79.7623 16.6344C79.8127 16.5839 79.8718 16.5596 79.9431 16.5596H82.3449C82.4858 16.5596 82.5849 16.617 82.6423 16.73L88.124 25.7822C88.1518 25.8239 88.1797 25.8431 88.2092 25.8361C88.2371 25.8292 88.251 25.7978 88.251 25.7404L88.2301 16.8152C88.2301 16.7439 88.2545 16.6848 88.3049 16.6344L88.3066 16.6361Z" fill="currentColor"/>
85
+ <path d="M93.8951 31.3633C93.8446 31.3146 93.8203 31.2537 93.8203 31.1824V16.8172C93.8203 16.7459 93.8446 16.6868 93.8951 16.6363C93.9455 16.5859 94.0047 16.5615 94.076 16.5615H96.5629C96.6342 16.5615 96.6934 16.5859 96.7438 16.6363C96.7925 16.6868 96.8186 16.7459 96.8186 16.8172V31.1824C96.8186 31.2537 96.7942 31.3128 96.7438 31.3633C96.6934 31.4137 96.6342 31.4381 96.5629 31.4381H94.076C94.0047 31.4381 93.9455 31.4137 93.8951 31.3633Z" fill="currentColor"/>
86
+ <path d="M109.267 16.5615H111.754C111.825 16.5615 111.885 16.5859 111.935 16.6363C111.984 16.6868 112.01 16.7459 112.01 16.8172V31.1824C112.01 31.2537 111.985 31.3128 111.935 31.3633C111.885 31.4137 111.825 31.4381 111.754 31.4381H109.267C109.196 31.4381 109.137 31.4137 109.086 31.3633C109.036 31.3146 109.011 31.2537 109.011 31.1824V21.812C109.011 21.7563 108.998 21.7268 108.97 21.7268C108.942 21.7268 108.912 21.7476 108.885 21.7911L106.632 25.318C106.561 25.4311 106.462 25.4885 106.335 25.4885H105.081C104.954 25.4885 104.855 25.4328 104.784 25.318L102.531 21.7911C102.504 21.7494 102.474 21.7302 102.446 21.7372C102.418 21.7441 102.405 21.7772 102.405 21.8328V31.1824C102.405 31.2537 102.38 31.3128 102.33 31.3633C102.279 31.4137 102.22 31.4381 102.149 31.4381H99.6619C99.5906 31.4381 99.5315 31.4137 99.481 31.3633C99.4306 31.3146 99.4062 31.2537 99.4062 31.1824V16.8172C99.4062 16.7459 99.4306 16.6868 99.481 16.6363C99.5315 16.5859 99.5906 16.5615 99.6619 16.5615H102.149C102.276 16.5615 102.375 16.6189 102.446 16.732L105.634 21.6833C105.676 21.7685 105.719 21.7685 105.761 21.6833L108.97 16.732C109.041 16.6189 109.14 16.5615 109.267 16.5615Z" fill="currentColor"/>
87
+ <path d="M123.782 31.2241L123.144 29.1424C123.116 29.0867 123.079 29.0572 123.038 29.0572H117.81C117.768 29.0572 117.732 29.085 117.704 29.1424L117.088 31.2241C117.046 31.3668 116.954 31.4363 116.812 31.4363H114.112C114.027 31.4363 113.963 31.412 113.921 31.3615C113.879 31.3128 113.871 31.2381 113.9 31.1389L118.49 16.7737C118.532 16.6328 118.624 16.5615 118.766 16.5615H122.102C122.243 16.5615 122.335 16.6328 122.379 16.7737L126.968 31.1389C126.982 31.1668 126.989 31.2033 126.989 31.245C126.989 31.372 126.911 31.4363 126.756 31.4363H124.057C123.916 31.4363 123.824 31.365 123.78 31.2241H123.782ZM118.554 26.7407H122.295C122.38 26.7407 122.408 26.6989 122.38 26.6137L120.467 20.3024C120.453 20.2467 120.432 20.2207 120.403 20.2276C120.375 20.2346 120.352 20.2589 120.339 20.3024L118.469 26.6137C118.455 26.6989 118.483 26.7407 118.554 26.7407Z" fill="currentColor"/>
88
+ <path d="M128.222 31.353C128.18 31.2974 128.187 31.2261 128.243 31.1409L132.365 24.0643C132.393 24.0226 132.393 23.9791 132.365 23.9374L128.243 16.8609L128.201 16.7339C128.201 16.6209 128.28 16.5635 128.434 16.5635H131.133C131.274 16.5635 131.38 16.6209 131.452 16.7339L134.213 21.6C134.255 21.6852 134.299 21.6852 134.34 21.6L137.102 16.7339C137.173 16.6209 137.28 16.5635 137.42 16.5635H140.099C140.198 16.5635 140.269 16.5913 140.311 16.6487C140.353 16.7061 140.346 16.7756 140.29 16.8609L136.168 23.9374C136.154 23.9791 136.154 24.0226 136.168 24.0643L140.29 31.1409L140.332 31.2678C140.332 31.3809 140.253 31.4383 140.099 31.4383H137.42C137.278 31.4383 137.172 31.3826 137.102 31.2678L134.34 26.4226C134.299 26.3374 134.255 26.3374 134.213 26.4226L131.429 31.2678C131.358 31.3809 131.252 31.4383 131.111 31.4383H128.433C128.333 31.4383 128.262 31.4104 128.22 31.353H128.222Z" fill="currentColor"/>
89
+ <defs>
90
+ <linearGradient id="paint0_linear_17_483" x1="3.99826" y1="24" x2="51.6208" y2="24" gradientUnits="userSpaceOnUse">
91
+ <stop stop-color="#E21680"/>
92
+ <stop offset="1" stop-color="#FF633A"/>
93
+ </linearGradient>
94
+ </defs>
95
+ </svg>
96
+
97
+ </div>
98
+ <hr>
99
+
100
+ <div align="center" style="line-height: 1.4; font-size:16px; margin-top: 30px;">
101
+ Join Our
102
+ <a href="https://platform.minimaxi.com/docs/faq/contact-us" target="_blank" style="font-size:17px; margin: 2px;">
103
+ 💬 WeChat
104
+ </a> |
105
+ <a href="https://discord.com/invite/hvvt8hAye6" target="_blank" style="font-size:17px; margin: 2px;">
106
+ 🧩 Discord
107
+ </a>
108
+ community.
109
+ </div>
110
+ <div align="center" style="line-height: 1.2; font-size:16px;">
111
+ <a href="https://agent.minimax.io/" target="_blank" style="display: inline-block; margin: 4px;">
112
+ MiniMax Agent
113
+ </a> |
114
+ <a href="https://platform.minimax.io/docs/guides/text-generation" target="_blank" style="display: inline-block; margin: 4px;">
115
+ ⚡️ API
116
+ </a> |
117
+ <a href="https://github.com/MiniMax-AI/MiniMax-MCP" style="display: inline-block; margin: 4px;">
118
+ MCP
119
+ </a> |
120
+ <a href="https://www.minimax.io" target="_blank" style="display: inline-block; margin: 4px;">
121
+ MiniMax Website
122
+ </a>
123
+ </div>
124
+ <div align="center" style="line-height: 1.2; font-size:16px; margin-bottom: 30px;">
125
+ <a href="https://huggingface.co/MiniMaxAI" target="_blank" style="margin: 2px;">
126
+ 🤗 Hugging Face
127
+ </a> |
128
+ <a href="https://github.com/MiniMax-AI/MiniMax-M2.1" target="_blank" style="margin: 2px;">
129
+ 🐙 GitHub
130
+ </a> |
131
+ <a href="https://www.modelscope.cn/organization/MiniMax" target="_blank" style="margin: 2px;">
132
+ 🤖️ ModelScope
133
+ </a> |
134
+ <a href="https://github.com/MiniMax-AI/MiniMax-M2.1/blob/main/LICENSE" style="margin: 2px;">
135
+ 📄 License: Modified-MIT
136
+ </a>
137
+ </div>
138
+
139
+ # Meet MiniMax-M2.1
140
+
141
+ Today, we are handing **MiniMax-M2.1** over to the open-source community. This release is more than just a parameter update; it is a significant step toward democratizing top-tier agentic capabilities.
142
+
143
+ M2.1 was built to shatter the stereotype that high-performance agents must remain behind closed doors. We have optimized the model specifically for robustness in coding, tool use, instruction following, and long-horizon planning. From automating multilingual software development to executing complex, multi-step office workflows, MiniMax-M2.1 empowers developers to build the next generation of autonomous applications—all while being fully transparent, controllable, and accessible.
144
+
145
+ We believe true intelligence should be within reach. M2.1 is our commitment to the future, and a powerful new tool in your hands.
146
+
147
+ <p align="center">
148
+ <img width="100%" src="figures/bench.png">
149
+ </p>
150
+
151
+ ## How to Use
152
+
153
+ - The MiniMax-M2.1 API is now live on the MiniMax Open Platform: https://platform.minimax.io/docs/guides/text-generation
154
+ - Our product MiniMax Agent, built on MiniMax-M2.1, is now publicly available: https://agent.minimax.io/
155
+ - The MiniMax-M2.1 model weights are now open-source, allowing for local deployment and use: https://huggingface.co/MiniMaxAI/MiniMax-M2.1
156
+
157
+ ## Benchmarks
158
+
159
+ MiniMax-M2.1 delivers a significant leap over M2 on core software engineering leaderboards. It shines particularly bright in multilingual scenarios, where it outperforms Claude Sonnet 4.5 and closely approaches Claude Opus 4.5.
160
+
161
+ | Benchmark | MiniMax-M2.1 | MiniMax-M2 | Claude Sonnet 4.5 | Claude Opus 4.5 | Gemini 3 Pro | GPT-5.2 (thinking) | DeepSeek V3.2 |
162
+ | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- |
163
+ | SWE-bench Verified | 74.0 | 69.4 | 77.2 | 80.9 | 78.0 | 80.0 | 73.1 |
164
+ | Multi-SWE-bench | 49.4 | 36.2 | 44.3 | 50.0 | 42.7 | x | 37.4 |
165
+ | SWE-bench Multilingual | 72.5 | 56.5 | 68 | 77.5 | 65.0 | 72.0 | 70.2 |
166
+ | Terminal-bench 2.0 | 47.9 | 30.0 | 50.0 | 57.8 | 54.2 | 54.0 | 46.4 |
167
+
168
+ We also evaluated MiniMax-M2.1 on SWE-bench Verified across a variety of coding agent frameworks. The results highlight the model's exceptional framework generalization and robust stability.
169
+
170
+ Furthermore, across specific benchmarks—including test case generation, code performance optimization, code review, and instruction following—MiniMax-M2.1 demonstrates comprehensive improvements over M2. In these specialized domains, it consistently matches or exceeds the performance of Claude Sonnet 4.5.
171
+
172
+ | Benchmark | MiniMax-M2.1 | MiniMax-M2 | Claude Sonnet 4.5 | Claude Opus 4.5 | Gemini 3 Pro | GPT-5.2 (thinking) | DeepSeek V3.2 |
173
+ | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- |
174
+ | SWE-bench Verified (Droid) | 71.3 | 68.1 | 72.3 | 75.2 | x | x | 67.0 |
175
+ | SWE-bench Verified (mini-swe-agent) | 67.0 | 61.0 | 70.6 | 74.4 | 71.8 | 74.2 | 60.0 |
176
+ | SWT-bench | 69.3 | 32.8 | 69.5 | 80.2 | 79.7 | 80.7 | 62.0 |
177
+ | SWE-Perf | 3.1 | 1.4 | 3.0 | 4.7 | 6.5 | 3.6 | 0.9 |
178
+ | SWE-Review | 8.9 | 3.4 | 10.5 | 16.2 | x | x | 6.4 |
179
+ | OctoCodingbench | 26.1 | 13.3 | 22.8 | 36.2 | 22.9 | x | 26.0 |
180
+
181
+ To evaluate the model's full-stack capability to architect complete, functional applications "from zero to one," we established a novel benchmark: [VIBE (Visual & Interactive Benchmark for Execution in Application Development)](https://huggingface.co/datasets/MiniMaxAI/VIBE). This suite encompasses five core subsets: Web, Simulation, Android, iOS, and Backend. Distinguishing itself from traditional benchmarks, VIBE leverages an innovative Agent-as-a-Verifier (AaaV) paradigm to automatically assess the interactive logic and visual aesthetics of generated applications within a real runtime environment.
182
+
183
+ MiniMax-M2.1 delivers outstanding performance on the VIBE aggregate benchmark, achieving an average score of 88.6—demonstrating robust full-stack development capabilities. It excels particularly in the VIBE-Web (91.5) and VIBE-Android (89.7) subsets.
184
+
185
+ | Benchmark | MiniMax-M2.1 | MiniMax-M2 | Claude Sonnet 4.5 | Claude Opus 4.5 | Gemini 3 Pro |
186
+ | ----- | ----- | ----- | ----- | ----- | ----- |
187
+ | VIBE (Average) | 88.6 | 67.5 | 85.2 | 90.7 | 82.4 |
188
+ | VIBE-Web | 91.5 | 80.4 | 87.3 | 89.1 | 89.5 |
189
+ | VIBE-Simulation | 87.1 | 77.0 | 79.1 | 84.0 | 89.2 |
190
+ | VIBE-Android | 89.7 | 69.2 | 87.5 | 92.2 | 78.7 |
191
+ | VIBE-iOS | 88.0 | 39.5 | 81.2 | 90.0 | 75.8 |
192
+ | VIBE-Backend | 86.7 | 67.8 | 90.8 | 98.0 | 78.7 |
193
+
194
+ MiniMax-M2.1 also demonstrates steady improvements over M2 in both long-horizon tool use and comprehensive intelligence metrics.
195
+
196
+ | Benchmark | MiniMax-M2.1 | MiniMax-M2 | Claude Sonnet 4.5 | Claude Opus 4.5 | Gemini 3 Pro | GPT-5.2 (thinking) | DeepSeek V3.2 |
197
+ | ----- | ----- | ----- | ----- | ----- | ----- | ----- | ----- |
198
+ | Toolathlon | 43.5 | 16.7 | 38.9 | 43.5 | 36.4 | 41.7 | 35.2 |
199
+ | BrowseComp | 47.4 | 44.0 | 19.6 | 37.0 | 37.8 | 65.8 | 51.4 |
200
+ | BrowseComp (context management) | 62.0 | 56.9 | 26.1 | 57.8 | 59.2 | 70.0 | 67.6 |
201
+ | AIME25 | 83.0 | 78.0 | 88.0 | 91.0 | 96.0 | 98.0 | 92.0 |
202
+ | MMLU-Pro | 88.0 | 82.0 | 88.0 | 90.0 | 90.0 | 87.0 | 86.0 |
203
+ | GPQA-D | 83.0 | 78.0 | 83.0 | 87.0 | 91.0 | 90.0 | 84.0 |
204
+ | HLE w/o tools | 22.2 | 12.5 | 17.3 | 28.4 | 37.2 | 31.4 | 22.2 |
205
+ | LCB | 81.0 | 83.0 | 71.0 | 87.0 | 92.0 | 89.0 | 86.0 |
206
+ | SciCode | 41.0 | 36.0 | 45.0 | 50.0 | 56.0 | 52.0 | 39.0 |
207
+ | IFBench | 70.0 | 72.0 | 57.0 | 58.0 | 70.0 | 75.0 | 61.0 |
208
+ | AA-LCR | 62.0 | 61.0 | 66.0 | 74.0 | 71.0 | 73.0 | 65.0 |
209
+ | 𝜏²-Bench Telecom | 87.0 | 87.0 | 78.0 | 90.0 | 87.0 | 85.0 | 91.0 |
210
+
211
+ > **Evaluation Methodology Notes**:
212
+ > - **SWE-bench Verified**: Tested on internal infrastructure using [Claude Code](https://github.com/anthropics/claude-code), [Droid](https://factory.ai/), or [mini-swe-agent](https://github.com/SWE-agent/mini-SWE-agent) as scaffolding. By default, we utilized Claude Code metrics. When using Claude Code, the default system prompt was overridden. Results represent the average of 4 runs.
213
+ > - **Multi-SWE-Bench & SWE-bench Multilingual & SWT-bench & SWE-Perf**: Tested on internal infrastructure using Claude Code as scaffolding, with the default system prompt overridden. Results represent the average of 4 runs.
214
+ > - **Terminal-bench 2.0**: Tested using Claude Code on our internal evaluation framework. We verified the full dataset and fixed environmental issues. Timeout limits were removed, while all other configurations remained consistent with official settings. Results represent the average of 4 runs.
215
+ > - **SWE Review**: Built upon the SWE framework, this internal benchmark for code defect review covers diverse languages and scenarios, evaluating both defect recall and hallucination rates. A review is deemed "correct" only if the model accurately identifies the target defect and ensures all other reported findings are valid and free of hallucinations. All evaluations are executed using Claude Code, with final results reflecting the average of four independent runs per test case. We plan to open-source this benchmark soon.
216
+ > - **OctoCodingbench**: An internal benchmark focused on long-horizon instruction following for Code Agents in complex development scenarios. It conducts end-to-end behavioral supervision within a dynamic environment spanning diverse tech stacks and scaffolding frameworks. The core objective is to evaluate the model's ability to integrate and execute "composite instruction constraints"—encompassing System Prompts (SP), User Queries, Memory, Tool Schemas, and specifications such as `Agents.md`, `Claude.md`, and `Skill.md`. Adopting a strict "single-violation-failure" scoring mechanism, the final result is the average pass rate across 4 runs, quantifying the model's robustness in translating static constraints into precise behaviors. We plan to open-source this benchmark soon.
217
+ > - **VIBE**: An internal benchmark that utilizes Claude Code as scaffolding to automatically verify a program's interactive logic and visual effects. Scores are calculated through a unified pipeline comprising requirement sets, containerized deployment, and dynamic interaction environments. Final results represent the average of 3 runs. We have open-sourced this benchmark at [VIBE](https://huggingface.co/datasets/MiniMaxAI/VIBE).
218
+ > - **Toolathlon**: The evaluation protocol remains consistent with the original paper.
219
+ > - **BrowseComp**: All scores were obtained using the same agent framework as [WebExplorer](https://arxiv.org/pdf/2509.06501) (Liu et al. 2025), with only minor fine-tuning of tool descriptions. We utilized the same 103-sample GAIA text-only validation subset as WebExplorer.
220
+ > - **BrowseComp (context management)**: When token usage exceeds 30% of the maximum context window, we retain the first AI response, the last five AI responses, and the tool outputs, discarding the remaining content.
221
+ > - **AIME25 ~ 𝜏²-Bench Telecom**: Derived from internal testing based on the evaluation datasets and methodology referenced in the [Artificial Analysis Intelligence Index](https://artificialanalysis.ai/).
222
+
223
+ ## Local Deployment Guide
224
+
225
+ Download the model from HuggingFace repository: https://huggingface.co/MiniMaxAI/MiniMax-M2.1
226
+
227
+ We recommend using the following inference frameworks (listed alphabetically) to serve the model:
228
+
229
+ ### SGLang
230
+
231
+ We recommend using [SGLang](https://docs.sglang.io/) to serve MiniMax-M2.1. Please refer to our [SGLang Deployment Guide](./docs/sglang_deploy_guide.md).
232
+
233
+ ### vLLM
234
+
235
+ We recommend using [vLLM](https://github.com/vllm-project/vllm) to serve MiniMax-M2.1. Please refer to our [vLLM Deployment Guide](./docs/vllm_deploy_guide.md).
236
+
237
+ ### Transformers
238
+
239
+ We recommend using [Transformers](https://github.com/huggingface/transformers) to serve MiniMax-M2.1. Please refer to our [Transformers Deployment Guide](./docs/transformers_deploy_guide.md).
240
+
241
+ ### Other Inference Engines
242
+
243
+ - [KTransformers](https://github.com/kvcache-ai/ktransformers/blob/main/doc/en/kt-kernel/MiniMax-M2.1-Tutorial.md)
244
+
245
+ ### Inference Parameters
246
+
247
+ We recommend using the following parameters for best performance: `temperature=1.0`, `top_p = 0.95`, `top_k = 40`. Default system prompt:
248
+
249
+ ```
250
+ You are a helpful assistant. Your name is MiniMax-M2.1 and is built by MiniMax.
251
+ ```
252
+
253
+ ## Tool Calling Guide
254
+
255
+ Please refer to our [Tool Calling Guide](./docs/tool_calling_guide.md).
256
+
257
+ ## Contact Us
258
+
259
+ Contact us at [model@minimax.io](mailto:model@minimax.io).
260
+
chat_template.jinja ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {# ----------‑‑‑ special token variables ‑‑‑---------- #}
2
+ {%- set toolcall_begin_token = '<minimax:tool_call>' -%}
3
+ {%- set toolcall_end_token = '</minimax:tool_call>' -%}
4
+ {#- Tool Rendering Functions ============================================== -#}
5
+ {%- macro render_tool_namespace(namespace_name, tool_list) -%}
6
+ {%- for tool in tool_list -%}
7
+ <tool>{{ tool.function | tojson(ensure_ascii=False) }}</tool>
8
+ {% endfor -%}
9
+ {%- endmacro -%}
10
+ {%- macro visible_text(content) -%}
11
+ {%- if content is string -%}
12
+ {{ content }}
13
+ {%- elif content is iterable and content is not mapping -%}
14
+ {%- for item in content -%}
15
+ {%- if item is mapping and item.type == 'text' -%}
16
+ {{- item.text }}
17
+ {%- elif item is string -%}
18
+ {{- item }}
19
+ {%- endif -%}
20
+ {%- endfor -%}
21
+ {%- elif content is none -%}
22
+ {{- '' }}
23
+ {%- else -%}
24
+ {{- content }}
25
+ {%- endif -%}
26
+ {%- endmacro -%}
27
+ {#- System Message Construction ============================================ -#}
28
+ {%- macro build_system_message(system_message) -%}
29
+ {%- if system_message and system_message.content -%}
30
+ {{- visible_text(system_message.content) }}
31
+ {%- else -%}
32
+ {%- if model_identity is not defined -%}
33
+ {%- set model_identity = "You are a helpful assistant. Your name is MiniMax-M2.1 and is built by MiniMax." -%}
34
+ {%- endif -%}
35
+ {{- model_identity }}
36
+ {%- endif -%}
37
+
38
+ {#- Handle current_date -#}
39
+ {%- if system_message and system_message.current_date -%}
40
+ {{- '\n' ~ 'Current date: ' + system_message.current_date }}
41
+ {%- endif -%}
42
+ {#- Handle current_location -#}
43
+ {%- if system_message and system_message.current_location -%}
44
+ {{- '\n' ~ 'Current location: ' + system_message.current_location }}
45
+ {%- endif -%}
46
+ {%- endmacro -%}
47
+ {#- Main Template Logic ================================================= -#}
48
+ {#- Extract system message (only first message if it's system) -#}
49
+ {%- set system_message = none -%}
50
+ {%- set conversation_messages = messages -%}
51
+ {%- if messages and messages[0].role == "system" -%}
52
+ {%- set system_message = messages[0] -%}
53
+ {%- set conversation_messages = messages[1:] -%}
54
+ {%- endif -%}
55
+ {#- Get the last user message turn, for interleved thinking -#}
56
+ {%- set ns = namespace(last_user_index=-1) %}
57
+ {% for m in conversation_messages %}
58
+ {%- if m.role == 'user' %}
59
+ {% set ns.last_user_index = loop.index0 -%}
60
+ {%- endif %}
61
+ {%- endfor %}
62
+ {#- Render system message -#}
63
+ {{- ']~!b[' ~ ']~b]system' ~ '\n' }}
64
+ {{- build_system_message(system_message) }}
65
+ {#- Render tools if available -#}
66
+ {%- if tools -%}
67
+ {{- '\n\n' ~ '# Tools' ~ '\n' ~ 'You may call one or more tools to assist with the user query.\nHere are the tools available in JSONSchema format:' ~ '\n' }}
68
+ {{- '\n' ~ '<tools>' ~ '\n' }}
69
+ {{- render_tool_namespace("functions", tools) }}
70
+ {{- '</tools>' ~ '\n\n' }}
71
+ {{- 'When making tool calls, use XML format to invoke tools and pass parameters:' ~ '\n' }}
72
+ {{- '\n' ~ toolcall_begin_token }}
73
+ <invoke name="tool-name-1">
74
+ <parameter name="param-key-1">param-value-1</parameter>
75
+ <parameter name="param-key-2">param-value-2</parameter>
76
+ ...
77
+ </invoke>
78
+ {{- '\n' ~ toolcall_end_token }}
79
+ {%- endif -%}
80
+ {{- '[e~[\n' }}
81
+
82
+ {#- Render messages -#}
83
+ {%- set last_tool_call = namespace(name=none) -%}
84
+ {%- for message in conversation_messages -%}
85
+ {%- if message.role == 'assistant' -%}
86
+ {#- Only render reasoning_content if no user message follows -#}
87
+ {{- ']~b]ai' ~ '\n' }}
88
+
89
+ {%- set reasoning_content = '' %}
90
+ {%- set content = visible_text(message.content) %}
91
+ {%- if message.reasoning_content is string %}
92
+ {%- set reasoning_content = message.reasoning_content %}
93
+ {%- else %}
94
+ {%- if '</think>' in content %}
95
+ {%- set reasoning_content = content.split('</think>')[0].strip('\n').split('<think>')[-1].strip('\n') %}
96
+ {%- set content = content.split('</think>')[-1].strip('\n') %}
97
+ {%- endif %}
98
+ {%- endif %}
99
+ {%- if reasoning_content and loop.index0 > ns.last_user_index -%}
100
+ {{- '<think>' ~ '\n' ~ reasoning_content ~ '\n' ~ '</think>' ~ '\n\n' }}
101
+ {%- endif -%}
102
+ {%- if content -%}
103
+ {{- content }}
104
+ {%- endif -%}
105
+ {%- if message.tool_calls -%}
106
+ {{- '\n' ~ toolcall_begin_token ~ '\n' }}
107
+
108
+ {%- for tool_call in message.tool_calls -%}
109
+ {%- if tool_call.function %}
110
+ {%- set tool_call = tool_call.function %}
111
+ {%- endif %}
112
+ {{- '<invoke name="' + tool_call.name + '">' }}
113
+ {% set _args = tool_call.arguments %}
114
+ {%- for k, v in _args.items() %}
115
+ {{- '<parameter name="' + k + '">' }}
116
+ {{- v | tojson(ensure_ascii=False) if v is not string else v }}
117
+ {{- '</parameter>' }}
118
+ {% endfor %}
119
+ {{- '</invoke>' ~ '\n' }}
120
+ {%- endfor -%}
121
+
122
+ {{- toolcall_end_token}}
123
+ {%- if message.tool_calls[-1].function -%}
124
+ {%- set last_tool_call.name = message.tool_calls[-1].function.name -%}
125
+ {%- else -%}
126
+ {%- set last_tool_call.name = message.tool_calls[-1].name -%}
127
+ {%- endif -%}
128
+ {%- else -%}
129
+ {%- set last_tool_call.name = none -%}
130
+ {%- endif -%}
131
+ {{- '[e~[' ~ '\n' }}
132
+
133
+ {%- elif message.role == 'tool' -%}
134
+ {%- if last_tool_call.name is none -%}
135
+ {{- raise_exception("Message has tool role, but there was no previous assistant message with a tool call!") }}
136
+ {%- endif -%}
137
+ {%- if loop.first or (conversation_messages[loop.index0 - 1].role != 'tool') -%}
138
+ {{- ']~b]tool' }}
139
+ {%- endif -%}
140
+ {%- if message.content is string -%}
141
+ {{- '\n<response>' }}
142
+ {{- message.content }}
143
+ {{- '</response>' }}
144
+ {%- else -%}
145
+ {%- for tr in message.content -%}
146
+ {{- '\n<response>' }}
147
+ {{- tr.output if tr.output is defined else (tr.text if tr.type == 'text' and tr.text is defined else tr) }}
148
+ {{- '\n</response>' }}
149
+ {%- endfor -%}
150
+ {%- endif -%}
151
+ {%- if loop.last or (conversation_messages[loop.index0 + 1].role != 'tool') -%}
152
+ {{- '[e~[\n' -}}
153
+ {%- endif -%}
154
+
155
+ {%- elif message.role == 'user' -%}
156
+ {{- ']~b]user' ~ '\n' }}
157
+ {{- visible_text(message.content) }}
158
+ {{- '[e~[' ~ '\n' }}
159
+ {%- endif -%}
160
+ {%- endfor -%}
161
+
162
+ {#- Generation prompt -#}
163
+ {%- if add_generation_prompt -%}
164
+ {{- ']~b]ai' ~ '\n' ~ '<think>' ~ '\n' }}
165
+ {%- endif -%}
config.json ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name_or_path": "tclf90/MiniMax-M2.1-AWQ-FP16Mix2",
3
+ "architectures": [
4
+ "MiniMaxM2ForCausalLM"
5
+ ],
6
+ "attn_type_list": [
7
+ 1,
8
+ 1,
9
+ 1,
10
+ 1,
11
+ 1,
12
+ 1,
13
+ 1,
14
+ 1,
15
+ 1,
16
+ 1,
17
+ 1,
18
+ 1,
19
+ 1,
20
+ 1,
21
+ 1,
22
+ 1,
23
+ 1,
24
+ 1,
25
+ 1,
26
+ 1,
27
+ 1,
28
+ 1,
29
+ 1,
30
+ 1,
31
+ 1,
32
+ 1,
33
+ 1,
34
+ 1,
35
+ 1,
36
+ 1,
37
+ 1,
38
+ 1,
39
+ 1,
40
+ 1,
41
+ 1,
42
+ 1,
43
+ 1,
44
+ 1,
45
+ 1,
46
+ 1,
47
+ 1,
48
+ 1,
49
+ 1,
50
+ 1,
51
+ 1,
52
+ 1,
53
+ 1,
54
+ 1,
55
+ 1,
56
+ 1,
57
+ 1,
58
+ 1,
59
+ 1,
60
+ 1,
61
+ 1,
62
+ 1,
63
+ 1,
64
+ 1,
65
+ 1,
66
+ 1,
67
+ 1,
68
+ 1
69
+ ],
70
+ "auto_map": {
71
+ "AutoConfig": "configuration_minimax_m2.MiniMaxM2Config",
72
+ "AutoModelForCausalLM": "modeling_minimax_m2.MiniMaxM2ForCausalLM"
73
+ },
74
+ "head_dim": 128,
75
+ "hidden_act": "silu",
76
+ "hidden_size": 3072,
77
+ "intermediate_size": 1536,
78
+ "max_position_embeddings": 196608,
79
+ "model_type": "minimax_m2",
80
+ "mtp_transformer_layers": 1,
81
+ "num_attention_heads": 48,
82
+ "num_experts_per_tok": 8,
83
+ "num_hidden_layers": 62,
84
+ "num_key_value_heads": 8,
85
+ "num_local_experts": 256,
86
+ "num_mtp_modules": 3,
87
+ "qk_norm_type": "per_layer",
88
+ "quantization_config": {
89
+ "quant_method": "awq",
90
+ "bits": 4,
91
+ "group_size": 128,
92
+ "version": "gemm",
93
+ "zero_point": true,
94
+ "modules_to_not_convert": [
95
+ "self_attn"
96
+ ]
97
+ },
98
+ "rms_norm_eps": 1e-06,
99
+ "rope_theta": 5000000,
100
+ "rotary_dim": 64,
101
+ "scoring_func": "sigmoid",
102
+ "shared_intermediate_size": 0,
103
+ "tie_word_embeddings": false,
104
+ "transformers_version": "4.46.1",
105
+ "use_cache": true,
106
+ "use_mtp": true,
107
+ "use_qk_norm": true,
108
+ "use_routing_bias": true,
109
+ "vocab_size": 200064,
110
+ "torch_dtype": "float16"
111
+ }
configuration.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"framework": "pytorch", "task": "text-generation", "allow_remote": true}
configuration_minimax_m2.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/minimax_m2/modular_minimax_m2.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_minimax_m2.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2025 the HuggingFace Team. All rights reserved.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+
22
+
23
+ from transformers.configuration_utils import PretrainedConfig
24
+
25
+
26
+ class MiniMaxM2Config(PretrainedConfig):
27
+ r"""
28
+ This is the configuration class to store the configuration of a [`MiniMaxM2Model`]. It is used to instantiate an
29
+ MiniMaxM2 model according to the specified arguments, defining the model architecture. Instantiating a configuration
30
+ with the defaults will yield a similar configuration to that of the MiniMaxM2-7B-v0.1 or MiniMaxM2-7B-Instruct-v0.1.
31
+
32
+ [minimax_m2ai/MiniMaxM2-8x7B](https://huggingface.co/minimax_m2ai/MiniMaxM2-8x7B)
33
+ [minimax_m2ai/MiniMaxM2-7B-Instruct-v0.1](https://huggingface.co/minimax_m2ai/MiniMaxM2-7B-Instruct-v0.1)
34
+
35
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
36
+ documentation from [`PretrainedConfig`] for more information.
37
+
38
+
39
+ Args:
40
+ vocab_size (`int`, *optional*, defaults to 32000):
41
+ Vocabulary size of the MiniMaxM2 model. Defines the number of different tokens that can be represented by the
42
+ `inputs_ids` passed when calling [`MiniMaxM2Model`]
43
+ hidden_size (`int`, *optional*, defaults to 4096):
44
+ Dimension of the hidden representations.
45
+ intermediate_size (`int`, *optional*, defaults to 14336):
46
+ Dimension of the MLP representations.
47
+ num_hidden_layers (`int`, *optional*, defaults to 32):
48
+ Number of hidden layers in the Transformer encoder.
49
+ num_attention_heads (`int`, *optional*, defaults to 32):
50
+ Number of attention heads for each attention layer in the Transformer encoder.
51
+ num_key_value_heads (`int`, *optional*, defaults to 8):
52
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
53
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
54
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
55
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
56
+ by meanpooling all the original heads within that group. For more details, check out [this
57
+ paper](https://huggingface.co/papers/2305.13245). If it is not specified, will default to `8`.
58
+ head_dim (`int`, *optional*, defaults to `hidden_size // num_attention_heads`):
59
+ The attention head dimension.
60
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
61
+ The non-linear activation function (function or string) in the decoder.
62
+ max_position_embeddings (`int`, *optional*, defaults to `4096*32`):
63
+ The maximum sequence length that this model might ever be used with. MiniMaxM2's sliding window attention
64
+ allows sequence of up to 4096*32 tokens.
65
+ initializer_range (`float`, *optional*, defaults to 0.02):
66
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
67
+ rms_norm_eps (`float`, *optional*, defaults to 1e-05):
68
+ The epsilon used by the rms normalization layers.
69
+ use_cache (`bool`, *optional*, defaults to `True`):
70
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
71
+ relevant if `config.is_decoder=True`.
72
+ pad_token_id (`int`, *optional*):
73
+ The id of the padding token.
74
+ bos_token_id (`int`, *optional*, defaults to 1):
75
+ The id of the "beginning-of-sequence" token.
76
+ eos_token_id (`int`, *optional*, defaults to 2):
77
+ The id of the "end-of-sequence" token.
78
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
79
+ Whether the model's input and output word embeddings should be tied.
80
+ rope_theta (`float`, *optional*, defaults to 1000000.0):
81
+ The base period of the RoPE embeddings.
82
+ sliding_window (`int`, *optional*):
83
+ Sliding window attention window size. If not specified, will default to `4096`.
84
+ attention_dropout (`float`, *optional*, defaults to 0.0):
85
+ The dropout ratio for the attention probabilities.
86
+ num_experts_per_tok (`int`, *optional*, defaults to 2):
87
+ The number of experts to route per-token, can be also interpreted as the `top-k` routing
88
+ parameter
89
+ num_local_experts (`int`, *optional*, defaults to 8):
90
+ Number of experts per Sparse MLP layer.
91
+ output_router_logits (`bool`, *optional*, defaults to `False`):
92
+ Whether or not the router logits should be returned by the model. Enabling this will also
93
+ allow the model to output the auxiliary loss. See [here]() for more details
94
+ router_aux_loss_coef (`float`, *optional*, defaults to 0.001):
95
+ The aux loss factor for the total loss.
96
+ router_jitter_noise (`float`, *optional*, defaults to 0.0):
97
+ Amount of noise to add to the router.
98
+
99
+ ```python
100
+ >>> from transformers import MiniMaxM2Model, MiniMaxM2Config
101
+
102
+ >>> # Initializing a MiniMaxM2 7B style configuration
103
+ >>> configuration = MiniMaxM2Config()
104
+
105
+ >>> # Initializing a model from the MiniMaxM2 7B style configuration
106
+ >>> model = MiniMaxM2Model(configuration)
107
+
108
+ >>> # Accessing the model configuration
109
+ >>> configuration = model.config
110
+ ```"""
111
+
112
+ model_type = "minimax_m2"
113
+ keys_to_ignore_at_inference = ["past_key_values"]
114
+ base_model_tp_plan = {
115
+ "layers.*.self_attn.q_proj": "colwise",
116
+ "layers.*.self_attn.k_proj": "colwise",
117
+ "layers.*.self_attn.v_proj": "colwise",
118
+ "layers.*.self_attn.o_proj": "rowwise",
119
+ "layers.*.block_sparse_moe.gate": "colwise_rep", # we need to replicate here to correctly route experts
120
+ "layers.*.block_sparse_moe.experts.*.w1": "colwise",
121
+ "layers.*.block_sparse_moe.experts.*.w2": "rowwise",
122
+ "layers.*.block_sparse_moe.experts.*.w3": "colwise",
123
+ }
124
+ base_model_pp_plan = {
125
+ "embed_tokens": (["input_ids"], ["inputs_embeds"]),
126
+ "layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
127
+ "norm": (["hidden_states"], ["hidden_states"]),
128
+ }
129
+
130
+ def __init__(
131
+ self,
132
+ vocab_size=32000,
133
+ hidden_size=4096,
134
+ intermediate_size=14336,
135
+ num_hidden_layers=32,
136
+ num_attention_heads=32,
137
+ num_key_value_heads=8,
138
+ head_dim=None,
139
+ hidden_act="silu",
140
+ max_position_embeddings=4096 * 32,
141
+ initializer_range=0.02,
142
+ rms_norm_eps=1e-5,
143
+ use_cache=True,
144
+ pad_token_id=None,
145
+ bos_token_id=1,
146
+ eos_token_id=2,
147
+ tie_word_embeddings=False,
148
+ rope_theta=1e6,
149
+ sliding_window=None,
150
+ attention_dropout=0.0,
151
+ num_experts_per_tok=2,
152
+ num_local_experts=8,
153
+ output_router_logits=False,
154
+ router_aux_loss_coef=0.001,
155
+ router_jitter_noise=0.0,
156
+ **kwargs,
157
+ ):
158
+ self.vocab_size = vocab_size
159
+ self.max_position_embeddings = max_position_embeddings
160
+ self.hidden_size = hidden_size
161
+ self.intermediate_size = intermediate_size
162
+ self.num_hidden_layers = num_hidden_layers
163
+ self.num_attention_heads = num_attention_heads
164
+ self.sliding_window = sliding_window
165
+
166
+ # for backward compatibility
167
+ if num_key_value_heads is None:
168
+ num_key_value_heads = num_attention_heads
169
+
170
+ self.num_key_value_heads = num_key_value_heads
171
+ self.hidden_act = hidden_act
172
+ self.initializer_range = initializer_range
173
+ self.rms_norm_eps = rms_norm_eps
174
+ self.use_cache = use_cache
175
+ self.rope_theta = rope_theta
176
+ self.attention_dropout = attention_dropout
177
+ self.head_dim = head_dim
178
+
179
+ self.num_experts_per_tok = num_experts_per_tok
180
+ self.num_local_experts = num_local_experts
181
+ self.output_router_logits = output_router_logits
182
+ self.router_aux_loss_coef = router_aux_loss_coef
183
+ self.router_jitter_noise = router_jitter_noise
184
+
185
+ self.use_qk_norm = kwargs.pop("use_qk_norm", False)
186
+ self.rotary_dim = kwargs.pop("rotary_dim", self.head_dim)
187
+ self.partial_rotary_factor = kwargs.pop("partial_rotary_factor", 1)
188
+ if self.head_dim is not None:
189
+ self.partial_rotary_factor = self.rotary_dim / self.head_dim
190
+
191
+ super().__init__(
192
+ pad_token_id=pad_token_id,
193
+ bos_token_id=bos_token_id,
194
+ eos_token_id=eos_token_id,
195
+ tie_word_embeddings=tie_word_embeddings,
196
+ **kwargs,
197
+ )
198
+
199
+
200
+ __all__ = ["MiniMaxM2Config"]
docs/sglang_deploy_guide.md ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MiniMax M2.1 Model SGLang Deployment Guide
2
+
3
+ [English Version](./sglang_deploy_guide.md) | [Chinese Version](./sglang_deploy_guide_cn.md)
4
+
5
+ We recommend using [SGLang](https://github.com/sgl-project/sglang) to deploy the [MiniMax-M2.1](https://huggingface.co/MiniMaxAI/MiniMax-M2.1) model. SGLang is a high-performance inference engine with excellent serving throughput, efficient and intelligent memory management, powerful batch request processing capabilities, and deeply optimized underlying performance. We recommend reviewing SGLang's official documentation to check hardware compatibility before deployment.
6
+
7
+ ## Applicable Models
8
+
9
+ This document applies to the following models. You only need to change the model name during deployment.
10
+
11
+ - [MiniMaxAI/MiniMax-M2.1](https://huggingface.co/MiniMaxAI/MiniMax-M2.1)
12
+ - [MiniMaxAI/MiniMax-M2](https://huggingface.co/MiniMaxAI/MiniMax-M2)
13
+
14
+ The deployment process is illustrated below using MiniMax-M2.1 as an example.
15
+
16
+ ## System Requirements
17
+
18
+ - OS: Linux
19
+
20
+ - Python: 3.9 - 3.12
21
+
22
+ - GPU:
23
+
24
+ - compute capability 7.0 or higher
25
+
26
+ - Memory requirements: 220 GB for weights, 240 GB per 1M context tokens
27
+
28
+ The following are recommended configurations; actual requirements should be adjusted based on your use case:
29
+
30
+ - 4x 96GB GPUs: Supported context length of up to 400K tokens.
31
+
32
+ - 8x 144GB GPUs: Supported context length of up to 3M tokens.
33
+
34
+ ## Deployment with Python
35
+
36
+ It is recommended to use a virtual environment (such as **venv**, **conda**, or **uv**) to avoid dependency conflicts.
37
+
38
+ We recommend installing SGLang in a fresh Python environment:
39
+
40
+ ```bash
41
+ uv venv
42
+ source .venv/bin/activate
43
+ git clone https://github.com/sgl-project/sglang
44
+ cd sglang
45
+ uv pip install -e "python" --prerelease=allow
46
+ ```
47
+
48
+ Run the following command to start the SGLang server. SGLang will automatically download and cache the MiniMax-M2.1 model from Hugging Face.
49
+
50
+ 4-GPU deployment command:
51
+
52
+ ```bash
53
+ python -m sglang.launch_server \
54
+ --model-path MiniMaxAI/MiniMax-M2.1 \
55
+ --tp-size 4 \
56
+ --tool-call-parser minimax-m2 \
57
+ --reasoning-parser minimax-append-think \
58
+ --host 0.0.0.0 \
59
+ --trust-remote-code \
60
+ --port 8000 \
61
+ --mem-fraction-static 0.85
62
+ ```
63
+
64
+ 8-GPU deployment command:
65
+
66
+ ```bash
67
+ python -m sglang.launch_server \
68
+ --model-path MiniMaxAI/MiniMax-M2.1 \
69
+ --tp-size 8 \
70
+ --ep-size 8 \
71
+ --tool-call-parser minimax-m2 \
72
+ --trust-remote-code \
73
+ --host 0.0.0.0 \
74
+ --reasoning-parser minimax-append-think \
75
+ --port 8000 \
76
+ --mem-fraction-static 0.85
77
+ ```
78
+
79
+ ## Testing Deployment
80
+
81
+ After startup, you can test the SGLang OpenAI-compatible API with the following command:
82
+
83
+ ```bash
84
+ curl http://localhost:8000/v1/chat/completions \
85
+ -H "Content-Type: application/json" \
86
+ -d '{
87
+ "model": "MiniMaxAI/MiniMax-M2.1",
88
+ "messages": [
89
+ {"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
90
+ {"role": "user", "content": [{"type": "text", "text": "Who won the world series in 2020?"}]}
91
+ ]
92
+ }'
93
+ ```
94
+
95
+ ## Common Issues
96
+
97
+ ### MiniMax-M2 model is not currently supported
98
+
99
+ Please upgrade to the latest stable version, >= v0.5.4.post1.
100
+
101
+ ## Getting Support
102
+
103
+ If you encounter any issues while deploying the MiniMax model:
104
+
105
+ - Contact our technical support team through official channels such as email at [model@minimax.io](mailto:model@minimax.io)
106
+
107
+ - Submit an issue on our [GitHub](https://github.com/MiniMax-AI) repository
108
+
109
+ We continuously optimize the deployment experience for our models. Feedback is welcome!
110
+
docs/sglang_deploy_guide_cn.md ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MiniMax M2.1 模型 SGLang 部署指南
2
+
3
+ [英文版](./sglang_deploy_guide.md) | [中文版](./sglang_deploy_guide_cn.md)
4
+
5
+ 我们推荐使用 [SGLang](https://github.com/sgl-project/sglang) 来部署 [MiniMax-M2.1](https://huggingface.co/MiniMaxAI/MiniMax-M2.1) 模型。SGLang 是一个高性能的推理引擎,其具有卓越的服务吞吐、高效智能的内存管理机制、强大的批量请求处理能力、深度优化的底层性能等特性。我们建议在部署之前查看 SGLang 的官方文档以检查硬件兼容性。
6
+
7
+ ## 本文档适用模型
8
+
9
+ 本文档适用以下模型,只需在部署时修改模型名称即可。
10
+
11
+ - [MiniMaxAI/MiniMax-M2.1](https://huggingface.co/MiniMaxAI/MiniMax-M2.1)
12
+ - [MiniMaxAI/MiniMax-M2](https://huggingface.co/MiniMaxAI/MiniMax-M2)
13
+
14
+ 以下以 MiniMax-M2.1 为例说明部署流程。
15
+
16
+ ## 环境要求
17
+
18
+ - OS:Linux
19
+
20
+ - Python:3.9 - 3.12
21
+
22
+ - GPU:
23
+
24
+ - compute capability 7.0 or higher
25
+
26
+ - 显存需求:权重需要 220 GB,每 1M 上下文 token 需要 240 GB
27
+
28
+ 以下为推荐配置,实际需求请根据业务场景调整:
29
+
30
+ - 96G x4 GPU:支持 40 万 token 的总上下文。
31
+
32
+ - 144G x8 GPU:支持长达 300 万 token 的总上下文。
33
+
34
+ ## 使用 Python 部署
35
+
36
+ 建议使用虚拟环境(如 **venv**、**conda**、**uv**)以避免依赖冲突。
37
+
38
+ 建议在全新的 Python 环境中安装 SGLang:
39
+
40
+ ```bash
41
+ uv venv
42
+ source .venv/bin/activate
43
+ git clone https://github.com/sgl-project/sglang
44
+ cd sglang
45
+ uv pip install -e "python" --prerelease=allow
46
+ ```
47
+
48
+ 运行如下命令启动 SGLang 服务器,SGLang 会自动从 Huggingface 下载并缓存 MiniMax-M2.1 模型。
49
+
50
+ 4 卡部署命令:
51
+
52
+ ```bash
53
+ python -m sglang.launch_server \
54
+ --model-path MiniMaxAI/MiniMax-M2.1 \
55
+ --tp-size 4 \
56
+ --tool-call-parser minimax-m2 \
57
+ --reasoning-parser minimax-append-think \
58
+ --host 0.0.0.0 \
59
+ --trust-remote-code \
60
+ --port 8000 \
61
+ --mem-fraction-static 0.85
62
+ ```
63
+
64
+ 8 卡部署命令:
65
+
66
+ ```bash
67
+ python -m sglang.launch_server \
68
+ --model-path MiniMaxAI/MiniMax-M2.1 \
69
+ --tp-size 8 \
70
+ --ep-size 8 \
71
+ --tool-call-parser minimax-m2 \
72
+ --trust-remote-code \
73
+ --host 0.0.0.0 \
74
+ --reasoning-parser minimax-append-think \
75
+ --port 8000 \
76
+ --mem-fraction-static 0.85
77
+ ```
78
+
79
+ ## 测试部署
80
+
81
+ 启动后,可以通过如下命令测试 SGLang OpenAI 兼容接口:
82
+
83
+ ```bash
84
+ curl http://localhost:8000/v1/chat/completions \
85
+ -H "Content-Type: application/json" \
86
+ -d '{
87
+ "model": "MiniMaxAI/MiniMax-M2.1",
88
+ "messages": [
89
+ {"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
90
+ {"role": "user", "content": [{"type": "text", "text": "Who won the world series in 2020?"}]}
91
+ ]
92
+ }'
93
+ ```
94
+
95
+ ## 常见问题
96
+
97
+ ### Huggingface 网络问题
98
+
99
+ 如果遇到网络问题,可以设置代理后再进行拉取。
100
+
101
+ ```bash
102
+ export HF_ENDPOINT=https://hf-mirror.com
103
+ ```
104
+
105
+ ### MiniMax-M2 model is not currently supported
106
+
107
+ 请升级到最新的稳定版本, >= v0.5.4.post1.
108
+
109
+ ## 获取支持
110
+
111
+ 如果在部署 MiniMax 模型过程中遇到任何问题:
112
+
113
+ - 通过邮箱 [model@minimax.io](mailto:model@minimax.io) 等官方渠道联系我们的技术支持团队
114
+
115
+ - 在我们的 [GitHub](https://github.com/MiniMax-AI) 仓库提交 Issue
116
+
117
+ - 通过我们的 [官方企业微信交流群](https://github.com/MiniMax-AI/MiniMax-AI.github.io/blob/main/images/wechat-qrcode.jpeg) 反馈
118
+
119
+ 我们会持续优化模型的部署体验,欢迎反馈!
docs/tool_calling_guide.md ADDED
@@ -0,0 +1,487 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MiniMax-M2.1 Tool Calling Guide
2
+
3
+ [English Version](./tool_calling_guide.md) | [Chinese Version](./tool_calling_guide_cn.md)
4
+
5
+ MiniMax-M2.1 supports the same toolcall syntax as MiniMax-M2.
6
+
7
+ ## Introduction
8
+
9
+ The MiniMax-M2.1 model supports tool calling capabilities, enabling the model to identify when external tools need to be called and output tool call parameters in a structured format. This document provides detailed instructions on how to use the tool calling features of MiniMax-M2.1.
10
+
11
+ ## Basic Example
12
+
13
+ The following Python script implements a weather query tool call example based on the OpenAI SDK:
14
+
15
+ ```python
16
+ from openai import OpenAI
17
+ import json
18
+
19
+ client = OpenAI(base_url="http://localhost:8000/v1", api_key="dummy")
20
+
21
+ def get_weather(location: str, unit: str):
22
+ return f"Getting the weather for {location} in {unit}..."
23
+
24
+ tool_functions = {"get_weather": get_weather}
25
+
26
+ tools = [{
27
+ "type": "function",
28
+ "function": {
29
+ "name": "get_weather",
30
+ "description": "Get the current weather in a given location",
31
+ "parameters": {
32
+ "type": "object",
33
+ "properties": {
34
+ "location": {"type": "string", "description": "City and state, e.g., 'San Francisco, CA'"},
35
+ "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}
36
+ },
37
+ "required": ["location", "unit"]
38
+ }
39
+ }
40
+ }]
41
+
42
+ response = client.chat.completions.create(
43
+ model=client.models.list().data[0].id,
44
+ messages=[{"role": "user", "content": "What's the weather like in San Francisco? use celsius."}],
45
+ tools=tools,
46
+ tool_choice="auto"
47
+ )
48
+
49
+ print(response)
50
+
51
+ tool_call = response.choices[0].message.tool_calls[0].function
52
+ print(f"Function called: {tool_call.name}")
53
+ print(f"Arguments: {tool_call.arguments}")
54
+ print(f"Result: {get_weather(**json.loads(tool_call.arguments))}")
55
+ ```
56
+
57
+ **Output Example:**
58
+ ```
59
+ Function called: get_weather
60
+ Arguments: {"location": "San Francisco, CA", "unit": "celsius"}
61
+ Result: Getting the weather for San Francisco, CA in celsius...
62
+ ```
63
+
64
+ ## Manually Parsing Model Output
65
+
66
+ **We strongly recommend using vLLM or SGLang for parsing tool calls.** If you cannot use the built-in parser of inference engines (e.g., vLLM and SGLang) that support MiniMax-M2.1, or need to use other inference frameworks (such as transformers, TGI, etc.), you can manually parse the model's raw output using the following method. This approach requires you to parse the XML tag format of the model output yourself.
67
+
68
+ ### Example Using Transformers
69
+
70
+ Here is a complete example using the transformers library:
71
+
72
+ ```python
73
+ from transformers import AutoTokenizer
74
+
75
+ def get_default_tools():
76
+ return [
77
+ {
78
+ "name": "get_current_weather",
79
+ "description": "Get the latest weather for a location",
80
+ "parameters": {
81
+ "type": "object",
82
+ "properties": {
83
+ "location": {
84
+ "type": "string",
85
+ "description": "A certain city, such as Beijing, Shanghai"
86
+ }
87
+ },
88
+ }
89
+ "required": ["location"],
90
+ "type": "object"
91
+ }
92
+ ]
93
+
94
+ # Load model and tokenizer
95
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
96
+ prompt = "What's the weather like in Shanghai today?"
97
+ messages = [
98
+ {"role": "system", "content": "You are a helpful assistant."},
99
+ {"role": "user", "content": prompt},
100
+ ]
101
+
102
+ # Enable function calling tools
103
+ tools = get_default_tools()
104
+
105
+ # Apply chat template and include tool definitions
106
+ text = tokenizer.apply_chat_template(
107
+ messages,
108
+ tokenize=False,
109
+ add_generation_prompt=True,
110
+ tools=tools
111
+ )
112
+
113
+ # Send request (using any inference service)
114
+ import requests
115
+ payload = {
116
+ "model": "MiniMaxAI/MiniMax-M2.1",
117
+ "prompt": text,
118
+ "max_tokens": 4096
119
+ }
120
+ response = requests.post(
121
+ "http://localhost:8000/v1/completions",
122
+ headers={"Content-Type": "application/json"},
123
+ json=payload,
124
+ stream=False,
125
+ )
126
+
127
+ # Model output needs manual parsing
128
+ raw_output = response.json()["choices"][0]["text"]
129
+ print("Raw output:", raw_output)
130
+
131
+ # Use the parsing function below to process the output
132
+ tool_calls = parse_tool_calls(raw_output, tools)
133
+ ```
134
+
135
+ ## 🛠️ Tool Call Definition
136
+
137
+ ### Tool Structure
138
+
139
+ Tool calls need to define the `tools` field in the request body. Each tool consists of the following parts:
140
+
141
+ ```json
142
+ {
143
+ "tools": [
144
+ {
145
+ "name": "search_web",
146
+ "description": "Search function.",
147
+ "parameters": {
148
+ "properties": {
149
+ "query_list": {
150
+ "description": "Keywords for search, list should contain 1 element.",
151
+ "items": { "type": "string" },
152
+ "type": "array"
153
+ },
154
+ "query_tag": {
155
+ "description": "Category of query",
156
+ "items": { "type": "string" },
157
+ "type": "array"
158
+ }
159
+ },
160
+ "required": [ "query_list", "query_tag" ],
161
+ "type": "object"
162
+ }
163
+ }
164
+ ]
165
+ }
166
+ ```
167
+
168
+ **Field Descriptions:**
169
+ - `name`: Function name
170
+ - `description`: Function description
171
+ - `parameters`: Function parameter definition
172
+ - `properties`: Parameter property definition, where key is the parameter name and value contains detailed parameter description
173
+ - `required`: List of required parameters
174
+ - `type`: Parameter type (usually "object")
175
+
176
+ ### Internal Processing Format
177
+
178
+ When processing within the MiniMax-M2.1 model, tool definitions are converted to a special format and concatenated to the input text. Here is a complete example:
179
+
180
+ ```
181
+ ]~!b[]~b]system
182
+ You are a helpful assistant.
183
+
184
+ # Tools
185
+ You may call one or more tools to assist with the user query.
186
+ Here are the tools available in JSONSchema format:
187
+
188
+ <tools>
189
+ <tool>{"name": "search_web", "description": "Search function.", "parameters": {"type": "object", "properties": {"query_list": {"type": "array", "items": {"type": "string"}, "description": "Keywords for search, list should contain 1 element."}, "query_tag": {"type": "array", "items": {"type": "string"}, "description": "Category of query"}}, "required": ["query_list", "query_tag"]}}</tool>
190
+ </tools>
191
+
192
+ When making tool calls, use XML format to invoke tools and pass parameters:
193
+
194
+ <minimax:tool_call>
195
+ <invoke name="tool-name-1">
196
+ <parameter name="param-key-1">param-value-1</parameter>
197
+ <parameter name="param-key-2">param-value-2</parameter>
198
+ ...
199
+ </invoke>
200
+ [e~[
201
+ ]~b]user
202
+ When were the latest announcements from OpenAI and Gemini?[e~[
203
+ ]~b]ai
204
+ <think>
205
+ ```
206
+
207
+ **Format Description:**
208
+
209
+ - `]~!b[]~b]system`: System message start marker
210
+ - `[e~[`: Message end marker
211
+ - `]~b]user`: User message start marker
212
+ - `]~b]ai`: Assistant message start marker
213
+ - `]~b]tool`: Tool result message start marker
214
+ - `<tools>...</tools>`: Tool definition area, each tool is wrapped with `<tool>` tag, content is JSON Schema
215
+ - `<minimax:tool_call>...</minimax:tool_call>`: Tool call area
216
+ - `<think>...</think>`: Thinking process marker during generation
217
+
218
+ ### Model Output Format
219
+
220
+ MiniMax-M2.1 uses structured XML tag format:
221
+
222
+ ```xml
223
+ <minimax:tool_call>
224
+ <invoke name="search_web">
225
+ <parameter name="query_tag">["technology", "events"]</parameter>
226
+ <parameter name="query_list">["\"OpenAI\" \"latest\" \"release\""]</parameter>
227
+ </invoke>
228
+ <invoke name="search_web">
229
+ <parameter name="query_tag">["technology", "events"]</parameter>
230
+ <parameter name="query_list">["\"Gemini\" \"latest\" \"release\""]</parameter>
231
+ </invoke>
232
+ </minimax:tool_call>
233
+ ```
234
+
235
+ Each tool call uses the `<invoke name="function_name">` tag, and parameters use the `<parameter name="parameter_name">` tag wrapper.
236
+
237
+ ## Manually Parsing Tool Call Results
238
+
239
+ ### Parsing Tool Calls
240
+
241
+ MiniMax-M2.1 uses structured XML tags, which require a different parsing approach. The core function is as follows:
242
+
243
+ ```python
244
+ import re
245
+ import json
246
+ from typing import Any, Optional, List, Dict
247
+
248
+
249
+ def extract_name(name_str: str) -> str:
250
+ """Extract name from quoted string"""
251
+ name_str = name_str.strip()
252
+ if name_str.startswith('"') and name_str.endswith('"'):
253
+ return name_str[1:-1]
254
+ elif name_str.startswith("'") and name_str.endswith("'"):
255
+ return name_str[1:-1]
256
+ return name_str
257
+
258
+
259
+ def convert_param_value(value: str, param_type: str) -> Any:
260
+ """Convert parameter value based on parameter type"""
261
+ if value.lower() == "null":
262
+ return None
263
+
264
+ param_type = param_type.lower()
265
+
266
+ if param_type in ["string", "str", "text"]:
267
+ return value
268
+ elif param_type in ["integer", "int"]:
269
+ try:
270
+ return int(value)
271
+ except (ValueError, TypeError):
272
+ return value
273
+ elif param_type in ["number", "float"]:
274
+ try:
275
+ val = float(value)
276
+ return val if val != int(val) else int(val)
277
+ except (ValueError, TypeError):
278
+ return value
279
+ elif param_type in ["boolean", "bool"]:
280
+ return value.lower() in ["true", "1"]
281
+ elif param_type in ["object", "array"]:
282
+ try:
283
+ return json.loads(value)
284
+ except json.JSONDecodeError:
285
+ return value
286
+ else:
287
+ # Try JSON parsing, return string if failed
288
+ try:
289
+ return json.loads(value)
290
+ except json.JSONDecodeError:
291
+ return value
292
+
293
+
294
+ def parse_tool_calls(model_output: str, tools: Optional[List[Dict]] = None) -> List[Dict]:
295
+ """
296
+ Extract all tool calls from model output
297
+
298
+ Args:
299
+ model_output: Complete output text from the model
300
+ tools: Tool definition list for getting parameter type information, format can be:
301
+ - [{"name": "...", "parameters": {...}}]
302
+ - [{"type": "function", "function": {"name": "...", "parameters": {...}}}]
303
+
304
+ Returns:
305
+ Parsed tool call list, each element contains name and arguments fields
306
+
307
+ Example:
308
+ >>> tools = [{
309
+ ... "name": "get_weather",
310
+ ... "parameters": {
311
+ ... "type": "object",
312
+ ... "properties": {
313
+ ... "location": {"type": "string"},
314
+ ... "unit": {"type": "string"}
315
+ ... }
316
+ ... }
317
+ ... }]
318
+ >>> output = '''<minimax:tool_call>
319
+ ... <invoke name="get_weather">
320
+ ... <parameter name="location">San Francisco</parameter>
321
+ ... <parameter name="unit">celsius</parameter>
322
+ ... </invoke>
323
+ ... </minimax:tool_call>'''
324
+ >>> result = parse_tool_calls(output, tools)
325
+ >>> print(result)
326
+ [{'name': 'get_weather', 'arguments': {'location': 'San Francisco', 'unit': 'celsius'}}]
327
+ """
328
+ # Quick check if tool call marker is present
329
+ if "<minimax:tool_call>" not in model_output:
330
+ return []
331
+
332
+ tool_calls = []
333
+
334
+ try:
335
+ # Match all <minimax:tool_call> blocks
336
+ tool_call_regex = re.compile(r"<minimax:tool_call>(.*?)</minimax:tool_call>", re.DOTALL)
337
+ invoke_regex = re.compile(r"<invoke name=(.*?)</invoke>", re.DOTALL)
338
+ parameter_regex = re.compile(r"<parameter name=(.*?)</parameter>", re.DOTALL)
339
+
340
+ # Iterate through all tool_call blocks
341
+ for tool_call_match in tool_call_regex.findall(model_output):
342
+ # Iterate through all invokes in this block
343
+ for invoke_match in invoke_regex.findall(tool_call_match):
344
+ # Extract function name
345
+ name_match = re.search(r'^([^>]+)', invoke_match)
346
+ if not name_match:
347
+ continue
348
+
349
+ function_name = extract_name(name_match.group(1))
350
+
351
+ # Get parameter configuration
352
+ param_config = {}
353
+ if tools:
354
+ for tool in tools:
355
+ tool_name = tool.get("name") or tool.get("function", {}).get("name")
356
+ if tool_name == function_name:
357
+ params = tool.get("parameters") or tool.get("function", {}).get("parameters")
358
+ if isinstance(params, dict) and "properties" in params:
359
+ param_config = params["properties"]
360
+ break
361
+
362
+ # Extract parameters
363
+ param_dict = {}
364
+ for match in parameter_regex.findall(invoke_match):
365
+ param_match = re.search(r'^([^>]+)>(.*)', match, re.DOTALL)
366
+ if param_match:
367
+ param_name = extract_name(param_match.group(1))
368
+ param_value = param_match.group(2).strip()
369
+
370
+ # Remove leading and trailing newlines
371
+ if param_value.startswith('\n'):
372
+ param_value = param_value[1:]
373
+ if param_value.endswith('\n'):
374
+ param_value = param_value[:-1]
375
+
376
+ # Get parameter type and convert
377
+ param_type = "string"
378
+ if param_name in param_config:
379
+ if isinstance(param_config[param_name], dict) and "type" in param_config[param_name]:
380
+ param_type = param_config[param_name]["type"]
381
+
382
+ param_dict[param_name] = convert_param_value(param_value, param_type)
383
+
384
+ tool_calls.append({
385
+ "name": function_name,
386
+ "arguments": param_dict
387
+ })
388
+
389
+ except Exception as e:
390
+ print(f"Failed to parse tool calls: {e}")
391
+ return []
392
+
393
+ return tool_calls
394
+ ```
395
+
396
+ **Usage Example:**
397
+
398
+ ```python
399
+ # Define tools
400
+ tools = [
401
+ {
402
+ "name": "get_weather",
403
+ "parameters": {
404
+ "type": "object",
405
+ "properties": {
406
+ "location": {"type": "string"},
407
+ "unit": {"type": "string"}
408
+ },
409
+ "required": ["location", "unit"]
410
+ }
411
+ }
412
+ ]
413
+
414
+ # Model output
415
+ model_output = """Let me help you query the weather.
416
+ <minimax:tool_call>
417
+ <invoke name="get_weather">
418
+ <parameter name="location">San Francisco</parameter>
419
+ <parameter name="unit">celsius</parameter>
420
+ </invoke>
421
+ </minimax:tool_call>"""
422
+
423
+ # Parse tool calls
424
+ tool_calls = parse_tool_calls(model_output, tools)
425
+
426
+ # Output results
427
+ for call in tool_calls:
428
+ print(f"Function called: {call['name']}")
429
+ print(f"Arguments: {call['arguments']}")
430
+ # Output: Function called: get_weather
431
+ # Arguments: {'location': 'San Francisco', 'unit': 'celsius'}
432
+ ```
433
+
434
+ ### Executing Tool Calls
435
+
436
+ After parsing is complete, you can execute the corresponding tool and construct the return result:
437
+
438
+ ```python
439
+ def execute_function_call(function_name: str, arguments: dict):
440
+ """Execute function call and return result"""
441
+ if function_name == "get_weather":
442
+ location = arguments.get("location", "Unknown location")
443
+ unit = arguments.get("unit", "celsius")
444
+ # Build function execution result
445
+ return {
446
+ "role": "tool",
447
+ "content": [
448
+ {
449
+ "name": function_name,
450
+ "type": "text",
451
+ "text": json.dumps({
452
+ "location": location,
453
+ "temperature": "25",
454
+ "unit": unit,
455
+ "weather": "Sunny"
456
+ }, ensure_ascii=False)
457
+ }
458
+ ]
459
+ }
460
+ elif function_name == "search_web":
461
+ query_list = arguments.get("query_list", [])
462
+ query_tag = arguments.get("query_tag", [])
463
+ # Simulate search results
464
+ return {
465
+ "role": "tool",
466
+ "content": [
467
+ {
468
+ "name": function_name,
469
+ "type": "text",
470
+ "text": f"Search keywords: {query_list}, Category: {query_tag}\nSearch results: Relevant information found"
471
+ }
472
+ ]
473
+ }
474
+
475
+ return None
476
+ ```
477
+
478
+ ### Returning Tool Execution Results to the Model
479
+
480
+ After successfully parsing tool calls, you should add the tool execution results to the conversation history so that the model can access and utilize this information in subsequent interactions. Refer to [chat_template.jinja](https://huggingface.co/MiniMaxAI/MiniMax-M2.1/blob/main/chat_template.jinja) for concatenation format.
481
+
482
+ ## References
483
+
484
+ - [MiniMax-M2.1 Model Repository](https://github.com/MiniMax-AI/MiniMax-M2.1)
485
+ - [vLLM Project Homepage](https://github.com/vllm-project/vllm)
486
+ - [SGLang Project Homepage](https://github.com/sgl-project/sglang)
487
+ - [OpenAI Python SDK](https://github.com/openai/openai-python)
docs/tool_calling_guide_cn.md ADDED
@@ -0,0 +1,499 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MiniMax-M2.1 工具调用指南
2
+
3
+ [英文版](./tool_calling_guide.md) | [中文版](./tool_calling_guide_cn.md)
4
+
5
+ MiniMax-M2.1 支持与 MiniMax-M2 相同的工具调用语法。
6
+
7
+ ## 简介
8
+
9
+ MiniMax-M2.1 模型支持工具调用功能,使模型能够识别何时需要调用外部工具,并以结构化格式输出工具调用参数。本文档提供了有关如何使用 MiniMax-M2.1 工具调用功能的详细说明。
10
+
11
+ ## 基础示例
12
+
13
+ 以下 Python 脚本基于 OpenAI SDK 实现了一个天气查询工具调用示例:
14
+
15
+ ```python
16
+ from openai import OpenAI
17
+ import json
18
+
19
+ client = OpenAI(base_url="http://localhost:8000/v1", api_key="dummy")
20
+
21
+ def get_weather(location: str, unit: str):
22
+ return f"Getting the weather for {location} in {unit}..."
23
+
24
+ tool_functions = {"get_weather": get_weather}
25
+
26
+ tools = [{
27
+ "type": "function",
28
+ "function": {
29
+ "name": "get_weather",
30
+ "description": "Get the current weather in a given location",
31
+ "parameters": {
32
+ "type": "object",
33
+ "properties": {
34
+ "location": {"type": "string", "description": "City and state, e.g., 'San Francisco, CA'"},
35
+ "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}
36
+ },
37
+ "required": ["location", "unit"]
38
+ }
39
+ }
40
+ }]
41
+
42
+ response = client.chat.completions.create(
43
+ model=client.models.list().data[0].id,
44
+ messages=[{"role": "user", "content": "What's the weather like in San Francisco? use celsius."}],
45
+ tools=tools,
46
+ tool_choice="auto"
47
+ )
48
+
49
+ print(response)
50
+
51
+ tool_call = response.choices[0].message.tool_calls[0].function
52
+ print(f"Function called: {tool_call.name}")
53
+ print(f"Arguments: {tool_call.arguments}")
54
+ print(f"Result: {get_weather(**json.loads(tool_call.arguments))}")
55
+ ```
56
+
57
+ **输出示例:**
58
+ ```
59
+ Function called: get_weather
60
+ Arguments: {"location": "San Francisco, CA", "unit": "celsius"}
61
+ Result: Getting the weather for San Francisco, CA in celsius...
62
+ ```
63
+
64
+ ## 手动解析模型输出
65
+
66
+ **我们强烈建议使用 vLLM 或 SGLnag 来解析工具调用。** 如果您无法使用支持 MiniMax-M2.1 的推理引擎(如 vLLM 和 SGLang)的内置解析器,或需要使用其他推理框架(如 transformers、TGI 等),您可以使用以下方法手动解析模型的原始输出。这种方法需要您自己解析模型输出的 XML 标签格式。
67
+
68
+ ### 使用 Transformers 的示例
69
+
70
+ 这是一个使用 transformers 库的完整示例:
71
+
72
+ ```python
73
+ from transformers import AutoTokenizer
74
+
75
+ def get_default_tools():
76
+ return [
77
+ {
78
+ "name": "get_current_weather",
79
+ "description": "Get the latest weather for a location",
80
+ "parameters": {
81
+ "type": "object",
82
+ "properties": {
83
+ "location": {
84
+ "type": "string",
85
+ "description": "A certain city, such as Beijing, Shanghai"
86
+ }
87
+ },
88
+ }
89
+ "required": ["location"],
90
+ "type": "object"
91
+ }
92
+ ]
93
+
94
+ # Load model and tokenizer
95
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
96
+ prompt = "What's the weather like in Shanghai today?"
97
+ messages = [
98
+ {"role": "system", "content": "You are a helpful assistant."},
99
+ {"role": "user", "content": prompt},
100
+ ]
101
+
102
+ # Enable function calling tools
103
+ tools = get_default_tools()
104
+
105
+ # Apply chat template and include tool definitions
106
+ text = tokenizer.apply_chat_template(
107
+ messages,
108
+ tokenize=False,
109
+ add_generation_prompt=True,
110
+ tools=tools
111
+ )
112
+
113
+ # Send request (using any inference service)
114
+ import requests
115
+ payload = {
116
+ "model": "MiniMaxAI/MiniMax-M2.1",
117
+ "prompt": text,
118
+ "max_tokens": 4096
119
+ }
120
+ response = requests.post(
121
+ "http://localhost:8000/v1/completions",
122
+ headers={"Content-Type": "application/json"},
123
+ json=payload,
124
+ stream=False,
125
+ )
126
+
127
+ # Model output needs manual parsing
128
+ raw_output = response.json()["choices"][0]["text"]
129
+ print("Raw output:", raw_output)
130
+
131
+ # Use the parsing function below to process the output
132
+ tool_calls = parse_tool_calls(raw_output, tools)
133
+ ```
134
+
135
+ ## 🛠️ 工具调用定义
136
+
137
+ ### 工具结构
138
+
139
+ 工具调用需要在请求体中定义 `tools` 字段。每个工具由以下部分组成:
140
+
141
+ ```json
142
+ {
143
+ "tools": [
144
+ {
145
+ "name": "search_web",
146
+ "description": "Search function.",
147
+ "parameters": {
148
+ "properties": {
149
+ "query_list": {
150
+ "description": "Keywords for search, list should contain 1 element.",
151
+ "items": { "type": "string" },
152
+ "type": "array"
153
+ },
154
+ "query_tag": {
155
+ "description": "Category of query",
156
+ "items": { "type": "string" },
157
+ "type": "array"
158
+ }
159
+ },
160
+ "required": [ "query_list", "query_tag" ],
161
+ "type": "object"
162
+ }
163
+ }
164
+ ]
165
+ }
166
+ ```
167
+
168
+ **字段说明:**
169
+ - `name`:函数名称
170
+ - `description`:函数描述
171
+ - `parameters`:函数参数定义
172
+ - `properties`:参数属性定义,其中键是参数名称,值包含详细的参数描述
173
+ - `required`:必需参数列表
174
+ - `type`:参数类型(通常为 "object")
175
+
176
+ ### 内部处理格式
177
+
178
+ 在 MiniMax-M2.1 模型内部处理时,工具定义会被转换为特殊格式并连接到输入文本中。以下是一个完整示例:
179
+
180
+ ```
181
+ ]~!b[]~b]system
182
+ You are a helpful assistant.
183
+
184
+ # Tools
185
+ You may call one or more tools to assist with the user query.
186
+ Here are the tools available in JSONSchema format:
187
+
188
+ <tools>
189
+ <tool>{"name": "search_web", "description": "Search function.", "parameters": {"type": "object", "properties": {"query_list": {"type": "array", "items": {"type": "string"}, "description": "Keywords for search, list should contain 1 element."}, "query_tag": {"type": "array", "items": {"type": "string"}, "description": "Category of query"}}, "required": ["query_list", "query_tag"]}}</tool>
190
+ </tools>
191
+
192
+ When making tool calls, use XML format to invoke tools and pass parameters:
193
+
194
+ <minimax:tool_call>
195
+ <invoke name="tool-name-1">
196
+ <parameter name="param-key-1">param-value-1</parameter>
197
+ <parameter name="param-key-2">param-value-2</parameter>
198
+ ...
199
+ </invoke>
200
+ [e~[
201
+ ]~b]user
202
+ When were the latest announcements from OpenAI and Gemini?[e~[
203
+ ]~b]ai
204
+ <think>
205
+ ```
206
+
207
+ **格式说明:**
208
+
209
+ - `]~!b[]~b]system`:系统消息开始标记
210
+ - `[e~[`:消息结束标记
211
+ - `]~b]user`:用户消息开始标记
212
+ - `]~b]ai`:助手消息开始标记
213
+ - `]~b]tool`:工具结果消息开始标记
214
+ - `<tools>...</tools>`:工具定义区域,每个工具都用 `<tool>` 标签包装,内容为 JSON Schema
215
+ - `<minimax:tool_call>...</minimax:tool_call>`:工具调用区域
216
+ - `<think>...</think>`:生成过程中的思考过程标记
217
+
218
+ ### 模型输出格式
219
+
220
+ MiniMax-M2.1 使用结构化的 XML 标签格式:
221
+
222
+ ```xml
223
+ <minimax:tool_call>
224
+ <invoke name="search_web">
225
+ <parameter name="query_tag">["technology", "events"]</parameter>
226
+ <parameter name="query_list">["\"OpenAI\" \"latest\" \"release\""]</parameter>
227
+ </invoke>
228
+ <invoke name="search_web">
229
+ <parameter name="query_tag">["technology", "events"]</parameter>
230
+ <parameter name="query_list">["\"Gemini\" \"latest\" \"release\""]</parameter>
231
+ </invoke>
232
+ </minimax:tool_call>
233
+ ```
234
+
235
+ 每个工具调用使用 `<invoke name="function_name">` 标签,参数使用 `<parameter name="parameter_name">` 标签包装。
236
+
237
+ ## 手动解析工具调用结果
238
+
239
+ ### 解析工具调用
240
+
241
+ MiniMax-M2.1 使用结构化的 XML 标签,这需要一种不同的解析方法。核心函数如下:
242
+
243
+ ```python
244
+ import re
245
+ import json
246
+ from typing import Any, Optional, List, Dict
247
+
248
+
249
+ def extract_name(name_str: str) -> str:
250
+ """Extract name from quoted string"""
251
+ name_str = name_str.strip()
252
+ if name_str.startswith('"') and name_str.endswith('"'):
253
+ return name_str[1:-1]
254
+ elif name_str.startswith("'") and name_str.endswith("'"):
255
+ return name_str[1:-1]
256
+ return name_str
257
+
258
+
259
+ def convert_param_value(value: str, param_type: str) -> Any:
260
+ """Convert parameter value based on parameter type"""
261
+ if value.lower() == "null":
262
+ return None
263
+
264
+ param_type = param_type.lower()
265
+
266
+ if param_type in ["string", "str", "text"]:
267
+ return value
268
+ elif param_type in ["integer", "int"]:
269
+ try:
270
+ return int(value)
271
+ except (ValueError, TypeError):
272
+ return value
273
+ elif param_type in ["number", "float"]:
274
+ try:
275
+ val = float(value)
276
+ return val if val != int(val) else int(val)
277
+ except (ValueError, TypeError):
278
+ return value
279
+ elif param_type in ["boolean", "bool"]:
280
+ return value.lower() in ["true", "1"]
281
+ elif param_type in ["object", "array"]:
282
+ try:
283
+ return json.loads(value)
284
+ except json.JSONDecodeError:
285
+ return value
286
+ else:
287
+ # Try JSON parsing, return string if failed
288
+ try:
289
+ return json.loads(value)
290
+ except json.JSONDecodeError:
291
+ return value
292
+
293
+
294
+ def parse_tool_calls(model_output: str, tools: Optional[List[Dict]] = None) -> List[Dict]:
295
+ """
296
+ Extract all tool calls from model output
297
+
298
+ Args:
299
+ model_output: Complete output text from the model
300
+ tools: Tool definition list for getting parameter type information, format can be:
301
+ - [{"name": "...", "parameters": {...}}]
302
+ - [{"type": "function", "function": {"name": "...", "parameters": {...}}}]
303
+
304
+ Returns:
305
+ Parsed tool call list, each element contains name and arguments fields
306
+
307
+ Example:
308
+ >>> tools = [{
309
+ ... "name": "get_weather",
310
+ ... "parameters": {
311
+ ... "type": "object",
312
+ ... "properties": {
313
+ ... "location": {"type": "string"},
314
+ ... "unit": {"type": "string"}
315
+ ... }
316
+ ... }
317
+ ... }]
318
+ >>> output = '''<minimax:tool_call>
319
+ ... <invoke name="get_weather">
320
+ ... <parameter name="location">San Francisco</parameter>
321
+ ... <parameter name="unit">celsius</parameter>
322
+ ... </invoke>
323
+ ... </minimax:tool_call>'''
324
+ >>> result = parse_tool_calls(output, tools)
325
+ >>> print(result)
326
+ [{'name': 'get_weather', 'arguments': {'location': 'San Francisco', 'unit': 'celsius'}}]
327
+ """
328
+ # Quick check if tool call marker is present
329
+ if "<minimax:tool_call>" not in model_output:
330
+ return []
331
+
332
+ tool_calls = []
333
+
334
+ try:
335
+ # Match all <minimax:tool_call> blocks
336
+ tool_call_regex = re.compile(r"<minimax:tool_call>(.*?)</minimax:tool_call>", re.DOTALL)
337
+ invoke_regex = re.compile(r"<invoke name=(.*?)</invoke>", re.DOTALL)
338
+ parameter_regex = re.compile(r"<parameter name=(.*?)</parameter>", re.DOTALL)
339
+
340
+ # Iterate through all tool_call blocks
341
+ for tool_call_match in tool_call_regex.findall(model_output):
342
+ # Iterate through all invokes in this block
343
+ for invoke_match in invoke_regex.findall(tool_call_match):
344
+ # Extract function name
345
+ name_match = re.search(r'^([^>]+)', invoke_match)
346
+ if not name_match:
347
+ continue
348
+
349
+ function_name = extract_name(name_match.group(1))
350
+
351
+ # Get parameter configuration
352
+ param_config = {}
353
+ if tools:
354
+ for tool in tools:
355
+ tool_name = tool.get("name") or tool.get("function", {}).get("name")
356
+ if tool_name == function_name:
357
+ params = tool.get("parameters") or tool.get("function", {}).get("parameters")
358
+ if isinstance(params, dict) and "properties" in params:
359
+ param_config = params["properties"]
360
+ break
361
+
362
+ # Extract parameters
363
+ param_dict = {}
364
+ for match in parameter_regex.findall(invoke_match):
365
+ param_match = re.search(r'^([^>]+)>(.*)', match, re.DOTALL)
366
+ if param_match:
367
+ param_name = extract_name(param_match.group(1))
368
+ param_value = param_match.group(2).strip()
369
+
370
+ # Remove leading and trailing newlines
371
+ if param_value.startswith('\n'):
372
+ param_value = param_value[1:]
373
+ if param_value.endswith('\n'):
374
+ param_value = param_value[:-1]
375
+
376
+ # Get parameter type and convert
377
+ param_type = "string"
378
+ if param_name in param_config:
379
+ if isinstance(param_config[param_name], dict) and "type" in param_config[param_name]:
380
+ param_type = param_config[param_name]["type"]
381
+
382
+ param_dict[param_name] = convert_param_value(param_value, param_type)
383
+
384
+ tool_calls.append({
385
+ "name": function_name,
386
+ "arguments": param_dict
387
+ })
388
+
389
+ except Exception as e:
390
+ print(f"Failed to parse tool calls: {e}")
391
+ return []
392
+
393
+ return tool_calls
394
+ ```
395
+
396
+ **使用示例:**
397
+
398
+ ```python
399
+ # Define tools
400
+ tools = [
401
+ {
402
+ "name": "get_weather",
403
+ "parameters": {
404
+ "type": "object",
405
+ "properties": {
406
+ "location": {"type": "string"},
407
+ "unit": {"type": "string"}
408
+ },
409
+ "required": ["location", "unit"]
410
+ }
411
+ }
412
+ ]
413
+
414
+ # Model output
415
+ model_output = """Let me help you query the weather.
416
+ <minimax:tool_call>
417
+ <invoke name="get_weather">
418
+ <parameter name="location">San Francisco</parameter>
419
+ <parameter name="unit">celsius</parameter>
420
+ </invoke>
421
+ </minimax:tool_call>"""
422
+
423
+ # Parse tool calls
424
+ tool_calls = parse_tool_calls(model_output, tools)
425
+
426
+ # Output results
427
+ for call in tool_calls:
428
+ print(f"Function called: {call['name']}")
429
+ print(f"Arguments: {call['arguments']}")
430
+ # Output: Function called: get_weather
431
+ # Arguments: {'location': 'San Francisco', 'unit': 'celsius'}
432
+ ```
433
+
434
+ ### 执行工具调用
435
+
436
+ 完成解析后,您可以执行相应的工具并构造返回结果:
437
+
438
+ ```python
439
+ def execute_function_call(function_name: str, arguments: dict):
440
+ """Execute function call and return result"""
441
+ if function_name == "get_weather":
442
+ location = arguments.get("location", "Unknown location")
443
+ unit = arguments.get("unit", "celsius")
444
+ # Build function execution result
445
+ return {
446
+ "role": "tool",
447
+ "content": [
448
+ {
449
+ "name": function_name,
450
+ "type": "text",
451
+ "text": json.dumps({
452
+ "location": location,
453
+ "temperature": "25",
454
+ "unit": unit,
455
+ "weather": "Sunny"
456
+ }, ensure_ascii=False)
457
+ }
458
+ ]
459
+ }
460
+ elif function_name == "search_web":
461
+ query_list = arguments.get("query_list", [])
462
+ query_tag = arguments.get("query_tag", [])
463
+ # Simulate search results
464
+ return {
465
+ "role": "tool",
466
+ "content": [
467
+ {
468
+ "name": function_name,
469
+ "type": "text",
470
+ "text": f"Search keywords: {query_list}, Category: {query_tag}\nSearch results: Relevant information found"
471
+ }
472
+ ]
473
+ }
474
+
475
+ return None
476
+ ```
477
+
478
+ ### 将工具执行结果返回给模型
479
+
480
+ 在成功解析工具调用后,您应该将工具执行结果添加到对话历史中,以便模型在后续交互中可以访问和利用这些信息。请参考 [chat_template.jinja](https://huggingface.co/MiniMaxAI/MiniMax-M2.1/blob/main/chat_template.jinja) 了解连接格式。
481
+
482
+ ## 参考文献
483
+
484
+ - [MiniMax-M2.1 模型仓库](https://github.com/MiniMax-AI/MiniMax-M2.1)
485
+ - [vLLM 项目主页](https://github.com/vllm-project/vllm)
486
+ - [SGLang 项目主页](https://github.com/sgl-project/sglang)
487
+ - [OpenAI Python SDK](https://github.com/openai/openai-python)
488
+
489
+ ## 获取支持
490
+
491
+ 如果遇到任何问题:
492
+
493
+ - 通过邮箱 [model@minimax.io](mailto:model@minimax.io) 等官方渠道联系我们的技术支持团队
494
+
495
+ - 在我们的仓库提交 Issue
496
+
497
+ - 通过我们的 [官方企业微信交流群](https://github.com/MiniMax-AI/MiniMax-AI.github.io/blob/main/images/wechat-qrcode.jpeg) 反馈
498
+
499
+ 我们会持续优化模型的使用体验,欢迎反馈!
docs/transformers_deploy_guide.md ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MiniMax M2.1 Model Transformers Deployment Guide
2
+
3
+ [English Version](./transformers_deploy_guide.md) | [Chinese Version](./transformers_deploy_guide_cn.md)
4
+
5
+ ## Applicable Models
6
+
7
+ This document applies to the following models. You only need to change the model name during deployment.
8
+
9
+ - [MiniMaxAI/MiniMax-M2.1](https://huggingface.co/MiniMaxAI/MiniMax-M2.1)
10
+ - [MiniMaxAI/MiniMax-M2](https://huggingface.co/MiniMaxAI/MiniMax-M2)
11
+
12
+ The deployment process is illustrated below using MiniMax-M2.1 as an example.
13
+
14
+ ## System Requirements
15
+
16
+ - OS: Linux
17
+
18
+ - Python: 3.9 - 3.12
19
+
20
+ - Transformers: 4.57.1
21
+
22
+ - GPU:
23
+
24
+ - compute capability 7.0 or higher
25
+
26
+ - Memory requirements: 220 GB for weights.
27
+
28
+ ## Deployment with Python
29
+
30
+ It is recommended to use a virtual environment (such as **venv**, **conda**, or **uv**) to avoid dependency conflicts.
31
+
32
+ We recommend installing Transformers in a fresh Python environment:
33
+
34
+ ```bash
35
+ uv pip install transformers==4.57.1 torch accelerate --torch-backend=auto
36
+ ```
37
+
38
+ Run the following Python script to run the model. Transformers will automatically download and cache the MiniMax-M2.1 model from Hugging Face.
39
+
40
+ ```python
41
+ from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
42
+ import torch
43
+
44
+ MODEL_PATH = "MiniMaxAI/MiniMax-M2.1"
45
+
46
+ model = AutoModelForCausalLM.from_pretrained(
47
+ MODEL_PATH,
48
+ device_map="auto",
49
+ trust_remote_code=True,
50
+ )
51
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
52
+
53
+ messages = [
54
+ {"role": "user", "content": [{"type": "text", "text": "What is your favourite condiment?"}]},
55
+ {"role": "assistant", "content": [{"type": "text", "text": "Well, I'm quite partial to a good squeeze of fresh lemon juice. It adds just the right amount of zesty flavour to whatever I'm cooking up in the kitchen!"}]},
56
+ {"role": "user", "content": [{"type": "text", "text": "Do you have mayonnaise recipes?"}]}
57
+ ]
58
+
59
+ model_inputs = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True).to("cuda")
60
+
61
+ generated_ids = model.generate(model_inputs, max_new_tokens=100, generation_config=model.generation_config)
62
+
63
+ response = tokenizer.batch_decode(generated_ids)[0]
64
+
65
+ print(response)
66
+ ```
67
+
68
+ ## Common Issues
69
+
70
+ ### Hugging Face Network Issues
71
+
72
+ If you encounter network issues, you can set up a proxy before pulling the model.
73
+
74
+ ```bash
75
+ export HF_ENDPOINT=https://hf-mirror.com
76
+ ```
77
+
78
+ ### MiniMax-M2 model is not currently supported
79
+
80
+ Please check that trust_remote_code=True.
81
+
82
+ ## Getting Support
83
+
84
+ If you encounter any issues while deploying the MiniMax model:
85
+
86
+ - Contact our technical support team through official channels such as email at [model@minimax.io](mailto:model@minimax.io)
87
+
88
+ - Submit an issue on our [GitHub](https://github.com/MiniMax-AI) repository
89
+
90
+ We continuously optimize the deployment experience for our models. Feedback is welcome!
91
+
docs/transformers_deploy_guide_cn.md ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MiniMax M2.1 模型 Transformers 部署指南
2
+
3
+ [英文版](./transformers_deploy_guide.md) | [中文版](./transformers_deploy_guide_cn.md)
4
+
5
+ ## 本文档适用模型
6
+
7
+ 本文档适用以下模型,只需在部署时修改模型名称即可。
8
+
9
+ - [MiniMaxAI/MiniMax-M2.1](https://huggingface.co/MiniMaxAI/MiniMax-M2.1)
10
+ - [MiniMaxAI/MiniMax-M2](https://huggingface.co/MiniMaxAI/MiniMax-M2)
11
+
12
+ 以下以 MiniMax-M2.1 为例说明部署流程。
13
+
14
+ ## 环境要求
15
+
16
+ - OS:Linux
17
+
18
+ - Python:3.9 - 3.12
19
+
20
+ - Transformers: 4.57.1
21
+
22
+ - GPU:
23
+
24
+ - compute capability 7.0 or higher
25
+
26
+ - 显存需求:权重需要 220 GB
27
+
28
+ ## 使用 Python 部署
29
+
30
+ 建议使用虚拟环境(如 **venv**、**conda**、**uv**)以避免依赖冲突。
31
+
32
+ 建议在全新的 Python 环境中安装 Transformers:
33
+
34
+ ```bash
35
+ uv pip install transformers==4.57.1 torch accelerate --torch-backend=auto
36
+ ```
37
+
38
+ 运行如下 Python 命令运行模型,Transformers 会自动从 Huggingface 下载并缓存 MiniMax-M2.1 模型。
39
+
40
+ ```python
41
+ from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig
42
+ import torch
43
+
44
+ MODEL_PATH = "MiniMaxAI/MiniMax-M2.1"
45
+
46
+ model = AutoModelForCausalLM.from_pretrained(
47
+ MODEL_PATH,
48
+ device_map="auto",
49
+ trust_remote_code=True,
50
+ )
51
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
52
+
53
+ messages = [
54
+ {"role": "user", "content": [{"type": "text", "text": "What is your favourite condiment?"}]},
55
+ {"role": "assistant", "content": [{"type": "text", "text": "Well, I'm quite partial to a good squeeze of fresh lemon juice. It adds just the right amount of zesty flavour to whatever I'm cooking up in the kitchen!"}]},
56
+ {"role": "user", "content": [{"type": "text", "text": "Do you have mayonnaise recipes?"}]}
57
+ ]
58
+
59
+ model_inputs = tokenizer.apply_chat_template(messages, return_tensors="pt", add_generation_prompt=True).to("cuda")
60
+
61
+ generated_ids = model.generate(model_inputs, max_new_tokens=100, generation_config=model.generation_config)
62
+
63
+ response = tokenizer.batch_decode(generated_ids)[0]
64
+
65
+ print(response)
66
+ ```
67
+
68
+ ## 常见问题
69
+
70
+ ### Huggingface 网络问题
71
+
72
+ 如果遇到网络问题,可以设置代理后再进行拉取。
73
+
74
+ ```bash
75
+ export HF_ENDPOINT=https://hf-mirror.com
76
+ ```
77
+
78
+ ### MiniMax-M2 model is not currently supported
79
+
80
+ 请确认开启 trust_remote_code=True。
81
+
82
+ ## 获取支持
83
+
84
+ 如果在部署 MiniMax 模型过程中遇到任何问题:
85
+
86
+ - 通过邮箱 [model@minimax.io](mailto:model@minimax.io) 等官方渠道联系我们的技术支持团队
87
+
88
+ - 在我们的 [GitHub](https://github.com/MiniMax-AI) 仓库提交 Issue
89
+
90
+ - 通过我们的 [官方企业微信交流群](https://github.com/MiniMax-AI/MiniMax-AI.github.io/blob/main/images/wechat-qrcode.jpeg) 反馈
91
+
92
+ 我们会持续优化模型的部署体验,欢迎反馈!
docs/vllm_deploy_guide.md ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MiniMax M2.1 Model vLLM Deployment Guide
2
+
3
+ [English Version](./vllm_deploy_guide.md) | [Chinese Version](./vllm_deploy_guide_cn.md)
4
+
5
+ We recommend using [vLLM](https://docs.vllm.ai/en/stable/) to deploy the [MiniMax-M2.1](https://huggingface.co/MiniMaxAI/MiniMax-M2.1) model. vLLM is a high-performance inference engine with excellent serving throughput, efficient and intelligent memory management, powerful batch request processing capabilities, and deeply optimized underlying performance. We recommend reviewing vLLM's official documentation to check hardware compatibility before deployment.
6
+
7
+ ## Applicable Models
8
+
9
+ This document applies to the following models. You only need to change the model name during deployment.
10
+
11
+ - [MiniMaxAI/MiniMax-M2.1](https://huggingface.co/MiniMaxAI/MiniMax-M2.1)
12
+
13
+ The deployment process is illustrated below using MiniMax-M2.1 as an example.
14
+
15
+ ## System Requirements
16
+
17
+ - OS: Linux
18
+
19
+ - Python: 3.9 - 3.12
20
+
21
+ - GPU:
22
+
23
+ - compute capability 7.0 or higher
24
+
25
+ - Memory requirements: 220 GB for weights, 240 GB per 1M context tokens
26
+
27
+ The following are recommended configurations; actual requirements should be adjusted based on your use case:
28
+
29
+ - 4x 96GB GPUs: Supported context length of up to 400K tokens.
30
+
31
+ - 8x 144GB GPUs: Supported context length of up to 3M tokens.
32
+
33
+ ## Deployment with Python
34
+
35
+ It is recommended to use a virtual environment (such as **venv**, **conda**, or **uv**) to avoid dependency conflicts.
36
+
37
+ We recommend installing vLLM in a fresh Python environment:
38
+
39
+ ```bash
40
+ uv venv
41
+ source .venv/bin/activate
42
+ uv pip install -U vllm --extra-index-url https://wheels.vllm.ai/nightly
43
+ ```
44
+
45
+ Run the following command to start the vLLM server. vLLM will automatically download and cache the MiniMax-M2.1 model from Hugging Face.
46
+
47
+ 4-GPU deployment command:
48
+
49
+ ```bash
50
+ SAFETENSORS_FAST_GPU=1 vllm serve \
51
+ MiniMaxAI/MiniMax-M2.1 --trust-remote-code \
52
+ --tensor-parallel-size 4 \
53
+ --enable-auto-tool-choice --tool-call-parser minimax_m2 \
54
+ --reasoning-parser minimax_m2_append_think
55
+ ```
56
+
57
+ 8-GPU deployment command:
58
+
59
+ ```bash
60
+ SAFETENSORS_FAST_GPU=1 vllm serve \
61
+ MiniMaxAI/MiniMax-M2.1 --trust-remote-code \
62
+ --enable_expert_parallel --tensor-parallel-size 8 \
63
+ --enable-auto-tool-choice --tool-call-parser minimax_m2 \
64
+ --reasoning-parser minimax_m2_append_think
65
+ ```
66
+
67
+ ## Testing Deployment
68
+
69
+ After startup, you can test the vLLM OpenAI-compatible API with the following command:
70
+
71
+ ```bash
72
+ curl http://localhost:8000/v1/chat/completions \
73
+ -H "Content-Type: application/json" \
74
+ -d '{
75
+ "model": "MiniMaxAI/MiniMax-M2.1",
76
+ "messages": [
77
+ {"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
78
+ {"role": "user", "content": [{"type": "text", "text": "Who won the world series in 2020?"}]}
79
+ ]
80
+ }'
81
+ ```
82
+
83
+ ## Common Issues
84
+
85
+ ### MiniMax-M2 model is not currently supported
86
+
87
+ This vLLM version is outdated. Please upgrade to the latest version.
88
+
89
+ ### torch.AcceleratorError: CUDA error: an illegal memory access was encountered
90
+ Add `--compilation-config "{\"cudagraph_mode\": \"PIECEWISE\"}"` to the startup parameters to resolve this issue. For example:
91
+
92
+ ```bash
93
+ SAFETENSORS_FAST_GPU=1 vllm serve \
94
+ MiniMaxAI/MiniMax-M2.1 --trust-remote-code \
95
+ --enable_expert_parallel --tensor-parallel-size 8 \
96
+ --enable-auto-tool-choice --tool-call-parser minimax_m2 \
97
+ --reasoning-parser minimax_m2_append_think \
98
+ --compilation-config "{\"cudagraph_mode\": \"PIECEWISE\"}"
99
+ ```
100
+
101
+ ### Output is garbled
102
+
103
+ If you encounter corrupted output when using vLLM to serve these models, you can upgrade to the nightly version (ensure it is a version after commit [cf3eacfe58fa9e745c2854782ada884a9f992cf7](https://github.com/vllm-project/vllm/commit/cf3eacfe58fa9e745c2854782ada884a9f992cf7))
104
+
105
+ ## Getting Support
106
+
107
+ If you encounter any issues while deploying the MiniMax model:
108
+
109
+ - Contact our technical support team through official channels such as email at [model@minimax.io](mailto:model@minimax.io)
110
+
111
+ - Submit an issue on our [GitHub](https://github.com/MiniMax-AI) repository
112
+
113
+ We continuously optimize the deployment experience for our models. Feedback is welcome!
docs/vllm_deploy_guide_cn.md ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MiniMax M2.1 模型 vLLM 部署指南
2
+
3
+ [英文版](./vllm_deploy_guide.md) | [中文版](./vllm_deploy_guide_cn.md)
4
+
5
+ 我们推荐使用 [vLLM](https://docs.vllm.ai/en/stable/) 来部署 [MiniMax-M2.1](https://huggingface.co/MiniMaxAI/MiniMax-M2.1) 模型。vLLM 是一个高性能的推理引擎,其具有卓越的服务吞吐、高效智能的内存管理机制、强大的批量请求处理能力、深度优化的底层性能等特性。我们建议在部署之前查看 vLLM 的官方文档以检查硬件兼容性。
6
+
7
+ ## 本文档适用模型
8
+
9
+ 本文档适用以下模型,只需在部署时修改模型名称即可。
10
+
11
+ - [MiniMaxAI/MiniMax-M2.1](https://huggingface.co/MiniMaxAI/MiniMax-M2.1)
12
+
13
+ 以下以 MiniMax-M2.1 为例说明部署流程。
14
+
15
+ ## 环境要求
16
+
17
+ - OS:Linux
18
+
19
+ - Python:3.9 - 3.12
20
+
21
+ - GPU:
22
+
23
+ - compute capability 7.0 or higher
24
+
25
+ - 显存需求:权重需要 220 GB,每 1M 上下文 token 需要 240 GB
26
+
27
+ 以下为推荐配置,实际需求请根据业务场景调整:
28
+
29
+ - 96G x4 GPU:支持 40 万 token 的总上下文。
30
+
31
+ - 144G x8 GPU:支持长达 300 万 token 的总上下文。
32
+
33
+ ## 使用 Python 部署
34
+
35
+ 建议使用虚拟环境(如 **venv**、**conda**、**uv**)以避免依赖冲突。
36
+
37
+ 建议在全新的 Python 环境中安装 vLLM:
38
+
39
+ ```bash
40
+ uv venv
41
+ source .venv/bin/activate
42
+ uv pip install -U vllm --extra-index-url https://wheels.vllm.ai/nightly
43
+ ```
44
+
45
+ 运行如下命令启动 vLLM 服务器,vLLM 会自动从 Huggingface 下载并缓存 MiniMax-M2.1 模型。
46
+
47
+ 4 卡部署命令:
48
+
49
+ ```bash
50
+ SAFETENSORS_FAST_GPU=1 vllm serve \
51
+ MiniMaxAI/MiniMax-M2.1 --trust-remote-code \
52
+ --tensor-parallel-size 4 \
53
+ --enable-auto-tool-choice --tool-call-parser minimax_m2 \
54
+ --reasoning-parser minimax_m2_append_think
55
+ ```
56
+
57
+ 8 卡部署命令:
58
+
59
+ ```bash
60
+ SAFETENSORS_FAST_GPU=1 vllm serve \
61
+ MiniMaxAI/MiniMax-M2.1 --trust-remote-code \
62
+ --enable_expert_parallel --tensor-parallel-size 8 \
63
+ --enable-auto-tool-choice --tool-call-parser minimax_m2 \
64
+ --reasoning-parser minimax_m2_append_think
65
+ ```
66
+
67
+ ## 测试部署
68
+
69
+ 启动后,可以通过如下命令测试 vLLM OpenAI 兼容接口:
70
+
71
+ ```bash
72
+ curl http://localhost:8000/v1/chat/completions \
73
+ -H "Content-Type: application/json" \
74
+ -d '{
75
+ "model": "MiniMaxAI/MiniMax-M2.1",
76
+ "messages": [
77
+ {"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
78
+ {"role": "user", "content": [{"type": "text", "text": "Who won the world series in 2020?"}]}
79
+ ]
80
+ }'
81
+ ```
82
+
83
+ ## 常见问题
84
+
85
+ ### Huggingface 网络问题
86
+
87
+ 如果遇到网络问题,可以设置代理后再进行拉取。
88
+
89
+ ```bash
90
+ export HF_ENDPOINT=https://hf-mirror.com
91
+ ```
92
+
93
+ ### MiniMax-M2 model is not currently supported
94
+
95
+ 该 vLLM 版本过旧,请升级到最新版本。
96
+
97
+ ### torch.AcceleratorError: CUDA error: an illegal memory access was encountered
98
+ 在启动参数添加 `--compilation-config "{\"cudagraph_mode\": \"PIECEWISE\"}"` 可以解决。例如:
99
+
100
+ ```bash
101
+ SAFETENSORS_FAST_GPU=1 vllm serve \
102
+ MiniMaxAI/MiniMax-M2.1 --trust-remote-code \
103
+ --enable_expert_parallel --tensor-parallel-size 8 \
104
+ --enable-auto-tool-choice --tool-call-parser minimax_m2 \
105
+ --reasoning-parser minimax_m2_append_think \
106
+ --compilation-config "{\"cudagraph_mode\": \"PIECEWISE\"}"
107
+ ```
108
+
109
+ ### 模型输出乱码
110
+
111
+ 如果您在使用 vLLM 运行这些模型时遇到输出乱码,可以升级到最新版本(请至少确保版本在提交 [cf3eacfe58fa9e745c2854782ada884a9f992cf7](https://github.com/vllm-project/vllm/commit/cf3eacfe58fa9e745c2854782ada884a9f992cf7) 之后)。
112
+
113
+ ## 获取支持
114
+
115
+ 如果在部署 MiniMax 模型过程中遇到任何问题:
116
+
117
+ - 通过邮箱 [model@minimax.io](mailto:model@minimax.io) 等官方渠道联系我们的技术支持团队
118
+
119
+ - 在我们的 [GitHub](https://github.com/MiniMax-AI) 仓库提交 Issue
120
+
121
+ - 通过我们的 [官方企业微信交流群](https://github.com/MiniMax-AI/MiniMax-AI.github.io/blob/main/images/wechat-qrcode.jpeg) 反馈
122
+
123
+ 我们会持续优化模型的部署体验,欢迎反馈!
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 200019,
3
+ "do_sample": true,
4
+ "eos_token_id": 200020,
5
+ "temperature": 1.0,
6
+ "top_p": 0.95,
7
+ "top_k": 40,
8
+ "transformers_version": "4.46.1"
9
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00042.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70e0bb86318c2b37c69723bb9d68beebffa6f35f69c2d02e4fad7aef5c8aedb6
3
+ size 2999958832
model-00003-of-00042.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:510b79c82e02a3484ce8cca8cd62df62e6d0ae520ff4edb5fd25cde30629b02b
3
+ size 2999673952
model-00005-of-00042.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50a86292245e33bcd0f61db0d5991bc69ed1cefba8b08d32d8f4d91e7c7a2f14
3
+ size 2999673912
model-00006-of-00042.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:defdab2df72efb1da10320740b08c7df501f815263516291718ad838dfb5084b
3
+ size 2999139880
model-00007-of-00042.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a18d1c5848998931b0cb1e924dccc5b6fec3ae1f7cca489e1f512001df65d35
3
+ size 2999675080
model-00014-of-00042.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9eaadc9b09fc628a4f8567a51a29c486e230802a108babda6edcf8410f6d4f12
3
+ size 2986527544
model-00015-of-00042.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d238a02c7a7e57790102631526b2c01b0dffbc14aa6d92d6d5d3c3241fae204
3
+ size 3000034144
model-00016-of-00042.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:884ee6bb812491575cc0da20aba7e7adc5fa1b53d8fa387d3c0d1a9e4d3ffdb7
3
+ size 2998289632
model-00017-of-00042.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38bcabc14982ccdfeb10c46a4cbfbeeaf7706232dedcfc5550d51e024d44bc2d
3
+ size 3000457424
model-00019-of-00042.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:553e556f0a5db5f01e76a311dc3a1d5f6c1f189f30ffe71b2907eec43b8ee07d
3
+ size 2999143416
model-00020-of-00042.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11e7a87ccc359f5e100ed903c075531bc1d6fcf0e47c67bd85f0a97438c7bc67
3
+ size 2999677504
model-00021-of-00042.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:113152ca7edbe9bfc3506e70f10978d0a7937780f279d2d5e7386ab86db10e4c
3
+ size 2999143416
model-00022-of-00042.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ddc1ed87576d68a798bd69c2c3bae55025ba7d747c1caeec8e64297d9fb8912
3
+ size 2999677496
model-00023-of-00042.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2014235134f5f3b4aa14234c1bab1aaf695f8bcd8d641d63926b19af5af9d336
3
+ size 2999143400
model-00024-of-00042.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ab9b6613a5609c6ff4317605850ac6c1e7be40ace26c0b7779ea3307ba4d0b9
3
+ size 2999677512
model-00027-of-00042.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb876f238a506efd166ceb83071656c1985337657b9fb633cb990e7fedcc64d6
3
+ size 2999143408
model-00028-of-00042.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5cc4a7fbf2438abf1fbc60536e56f19fc3d6bc7e8a1aead46f503f3b82d9e735
3
+ size 2999677480
model-00029-of-00042.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32363ad64e4abfab2fd71fa7aedefe658dfac4673f3b126937cbdaeddad6c8a6
3
+ size 2999143440
model-00030-of-00042.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:052443f4d11596d17c065ab244b10b177b846fc416fa2b8bf4ad10f9a6cc59f2
3
+ size 2999677384
model-00032-of-00042.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c36800ca8f2136d0bac22542409143fbd315a3297c8d619bf58188189ac20d59
3
+ size 2999677376
model-00033-of-00042.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d95d3ba9b8c1d389b0aa813944d98e563e68b9b6a09349c861886fd7979bd2e7
3
+ size 2999143448
model-00035-of-00042.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35e7609753285a066748d1b33ffcc8a5e0f5177384f46534fbdf1b2079ca4393
3
+ size 2999142272
model-00036-of-00042.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f8f02dec73dd55fb897a4d987cc6d1f275ca69f06ed31f4e10c8293d0ba0dd80
3
+ size 2999678648
model-00039-of-00042.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e0d8c4d3cb11901a35645de29c34967913a60e4c067f672165dd481f30272d3c
3
+ size 2999142080
model-00042-of-00042.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29abd4085e54c50427bd1770b91626c201bdbdea2d925aee17249aec28fc295c
3
+ size 1900911296
model.safetensors.index.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79e6cafdf5f56ec1a2d44eb4acd407931650faf7470ae6d19d3b1946921ca9bf
3
+ size 14015722
modeling_minimax_m2.py ADDED
@@ -0,0 +1,706 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
2
+ # This file was automatically generated from src/transformers/models/minimax_m2/modular_minimax_m2.py.
3
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
4
+ # the file from the modular. If any change should be done, please apply the change to the
5
+ # modular_minimax_m2.py file directly. One of our CI enforces this.
6
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
7
+ # coding=utf-8
8
+ # Copyright 2025 the HuggingFace Team. All rights reserved.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+
22
+
23
+ from collections.abc import Callable
24
+ from typing import Optional, Union, Unpack
25
+
26
+ import torch
27
+ from torch import nn
28
+
29
+ from transformers.activations import ACT2FN
30
+ from transformers.cache_utils import Cache, DynamicCache
31
+ from transformers.generation import GenerationMixin
32
+ from transformers.integrations import use_kernel_forward_from_hub
33
+ from transformers.masking_utils import create_causal_mask, create_sliding_window_causal_mask
34
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
35
+ from transformers.modeling_layers import (
36
+ GenericForQuestionAnswering,
37
+ GenericForSequenceClassification,
38
+ GenericForTokenClassification,
39
+ GradientCheckpointingLayer,
40
+ )
41
+ from transformers.modeling_outputs import MoeCausalLMOutputWithPast, MoeModelOutputWithPast
42
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
43
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
44
+ from transformers.utils import TransformersKwargs, auto_docstring, can_return_tuple
45
+ from transformers.utils.deprecation import deprecate_kwarg
46
+ from transformers.utils.generic import OutputRecorder, check_model_inputs
47
+ from .configuration_minimax_m2 import MiniMaxM2Config
48
+
49
+
50
+ class MiniMaxM2MLP(nn.Module):
51
+ def __init__(self, config: MiniMaxM2Config):
52
+ super().__init__()
53
+ self.ffn_dim = config.intermediate_size
54
+ self.hidden_dim = config.hidden_size
55
+
56
+ self.w1 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
57
+ self.w2 = nn.Linear(self.ffn_dim, self.hidden_dim, bias=False)
58
+ self.w3 = nn.Linear(self.hidden_dim, self.ffn_dim, bias=False)
59
+
60
+ self.act_fn = ACT2FN[config.hidden_act]
61
+
62
+ def forward(self, hidden_states):
63
+ current_hidden_states = self.act_fn(self.w1(hidden_states)) * self.w3(hidden_states)
64
+ current_hidden_states = self.w2(current_hidden_states)
65
+ return current_hidden_states
66
+
67
+
68
+ class MiniMaxM2Experts(nn.ModuleList):
69
+ """
70
+ ModuleList of experts.
71
+ """
72
+
73
+ def __init__(self, config: MiniMaxM2Config):
74
+ super().__init__()
75
+ self.top_k = config.num_experts_per_tok
76
+ self.num_experts = config.num_local_experts
77
+ for _ in range(self.num_experts):
78
+ self.append(MiniMaxM2MLP(config))
79
+
80
+ def forward(
81
+ self, hidden_states: torch.Tensor, top_k_index: torch.Tensor, top_k_weights: torch.Tensor
82
+ ) -> torch.Tensor:
83
+ """
84
+ Args:
85
+ hidden_states: (batch_size * sequence_length, hidden_dim)
86
+ selected_experts: (batch_size * sequence_length, top_k)
87
+ routing_weights: (batch_size * sequence_length, top_k)
88
+ Returns:
89
+ (batch_size * sequence_length, hidden_dim)
90
+ """
91
+ final_hidden_states = torch.zeros_like(hidden_states)
92
+ expert_mask = torch.nn.functional.one_hot(top_k_index, num_classes=self.num_experts).permute(2, 1, 0)
93
+
94
+ expert_hit = torch.greater(expert_mask.sum(dim=(-1, -2)), 0).nonzero()
95
+ for expert_idx in expert_hit:
96
+ idx, top_x = torch.where(expert_mask[expert_idx].squeeze(0))
97
+ current_state = hidden_states[None, top_x].reshape(-1, hidden_states.shape[-1])
98
+ current_hidden_states = self[expert_idx](current_state) * top_k_weights[top_x, idx, None]
99
+ final_hidden_states.index_add_(0, top_x, current_hidden_states.to(hidden_states.dtype))
100
+ return final_hidden_states
101
+
102
+
103
+ class MiniMaxM2SparseMoeBlock(nn.Module):
104
+ def __init__(self, config):
105
+ super().__init__()
106
+ self.top_k = config.num_experts_per_tok
107
+ self.jitter_noise = config.router_jitter_noise
108
+ self.gate = nn.Linear(config.hidden_size, config.num_local_experts, bias=False)
109
+ self.experts = MiniMaxM2Experts(config)
110
+ self.register_buffer("e_score_correction_bias", torch.zeros(config.num_local_experts))
111
+
112
+ def route_tokens_to_experts(self, router_logits):
113
+ routing_weights = torch.nn.functional.sigmoid(router_logits.float())
114
+ scores_for_choice = routing_weights + self.e_score_correction_bias
115
+ _, top_k_index = torch.topk(scores_for_choice, self.top_k, dim=-1, sorted=False)
116
+ top_k_weights = routing_weights.gather(1, top_k_index)
117
+ top_k_weights /= top_k_weights.sum(dim=-1, keepdim=True)
118
+ return top_k_index, top_k_weights.to(router_logits.dtype)
119
+
120
+ def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
121
+ batch_size, sequence_length, hidden_dim = hidden_states.shape
122
+ if self.training and self.jitter_noise > 0:
123
+ hidden_states *= torch.empty_like(hidden_states).uniform_(1.0 - self.jitter_noise, 1.0 + self.jitter_noise)
124
+ hidden_states = hidden_states.view(-1, hidden_states.shape[-1])
125
+ router_logits = self.gate(hidden_states)
126
+ top_k_index, top_k_weights = self.route_tokens_to_experts(router_logits)
127
+ hidden_states = self.experts(hidden_states, top_k_index, top_k_weights.to(hidden_states.dtype))
128
+ hidden_states = hidden_states.reshape(batch_size, sequence_length, hidden_dim)
129
+ return hidden_states, router_logits
130
+
131
+
132
+ @use_kernel_forward_from_hub("RMSNorm")
133
+ class MiniMaxM2RMSNorm(nn.Module):
134
+ def __init__(self, hidden_size, eps=1e-6):
135
+ """
136
+ MiniMaxM2RMSNorm is equivalent to T5LayerNorm
137
+ """
138
+ super().__init__()
139
+ self.weight = nn.Parameter(torch.ones(hidden_size))
140
+ self.variance_epsilon = eps
141
+
142
+ def forward(self, hidden_states):
143
+ input_dtype = hidden_states.dtype
144
+ hidden_states = hidden_states.to(torch.float32)
145
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
146
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
147
+ return self.weight * hidden_states.to(input_dtype)
148
+
149
+ def extra_repr(self):
150
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
151
+
152
+
153
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
154
+ """
155
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
156
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
157
+ """
158
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
159
+ if n_rep == 1:
160
+ return hidden_states
161
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
162
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
163
+
164
+
165
+ def eager_attention_forward(
166
+ module: nn.Module,
167
+ query: torch.Tensor,
168
+ key: torch.Tensor,
169
+ value: torch.Tensor,
170
+ attention_mask: Optional[torch.Tensor],
171
+ scaling: float,
172
+ dropout: float = 0.0,
173
+ **kwargs: Unpack[TransformersKwargs],
174
+ ):
175
+ key_states = repeat_kv(key, module.num_key_value_groups)
176
+ value_states = repeat_kv(value, module.num_key_value_groups)
177
+
178
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
179
+ if attention_mask is not None:
180
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
181
+ attn_weights = attn_weights + causal_mask
182
+
183
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
184
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
185
+ attn_output = torch.matmul(attn_weights, value_states)
186
+ attn_output = attn_output.transpose(1, 2).contiguous()
187
+
188
+ return attn_output, attn_weights
189
+
190
+
191
+ def rotate_half(x):
192
+ """Rotates half the hidden dims of the input."""
193
+ x1 = x[..., : x.shape[-1] // 2]
194
+ x2 = x[..., x.shape[-1] // 2 :]
195
+ return torch.cat((-x2, x1), dim=-1)
196
+
197
+
198
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
199
+ """Applies Rotary Position Embedding to the query and key tensors.
200
+
201
+ Args:
202
+ q (`torch.Tensor`): The query tensor.
203
+ k (`torch.Tensor`): The key tensor.
204
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
205
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
206
+ position_ids (`torch.Tensor`, *optional*):
207
+ Deprecated and unused.
208
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
209
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
210
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
211
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
212
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
213
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
214
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
215
+ Returns:
216
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
217
+ """
218
+ cos = cos.unsqueeze(unsqueeze_dim)
219
+ sin = sin.unsqueeze(unsqueeze_dim)
220
+
221
+ # Keep half or full tensor for later concatenation
222
+ rotary_dim = cos.shape[-1]
223
+ q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
224
+ k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
225
+
226
+ # Apply rotary embeddings on the first half or full tensor
227
+ q_embed = (q_rot * cos) + (rotate_half(q_rot) * sin)
228
+ k_embed = (k_rot * cos) + (rotate_half(k_rot) * sin)
229
+
230
+ # Concatenate back to full shape
231
+ q_embed = torch.cat([q_embed, q_pass], dim=-1)
232
+ k_embed = torch.cat([k_embed, k_pass], dim=-1)
233
+ return q_embed, k_embed
234
+
235
+
236
+ class MiniMaxM2Attention(nn.Module):
237
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
238
+
239
+ def __init__(self, config: MiniMaxM2Config, layer_idx: int):
240
+ super().__init__()
241
+ self.config = config
242
+ self.layer_idx = layer_idx
243
+ self.head_dim = getattr(config, "head_dim", None) or config.hidden_size // config.num_attention_heads
244
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
245
+ self.scaling = self.head_dim**-0.5
246
+ self.attention_dropout = config.attention_dropout
247
+ self.is_causal = True
248
+ self.q_proj = nn.Linear(config.hidden_size, config.num_attention_heads * self.head_dim, bias=False)
249
+ self.k_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
250
+ self.v_proj = nn.Linear(config.hidden_size, config.num_key_value_heads * self.head_dim, bias=False)
251
+ self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
252
+
253
+ self.use_qk_norm = config.use_qk_norm
254
+ if self.use_qk_norm:
255
+ self.q_norm = MiniMaxM2RMSNorm(self.head_dim * config.num_attention_heads, eps=config.rms_norm_eps)
256
+ self.k_norm = MiniMaxM2RMSNorm(self.head_dim * config.num_key_value_heads, eps=config.rms_norm_eps)
257
+
258
+ @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
259
+ def forward(
260
+ self,
261
+ hidden_states: torch.Tensor,
262
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
263
+ attention_mask: Optional[torch.Tensor],
264
+ past_key_values: Optional[Cache] = None,
265
+ cache_position: Optional[torch.LongTensor] = None,
266
+ **kwargs: Unpack[FlashAttentionKwargs],
267
+ ) -> tuple[torch.Tensor, Optional[torch.Tensor]]:
268
+ input_shape = hidden_states.shape[:-1]
269
+ hidden_shape = (*input_shape, -1, self.head_dim)
270
+
271
+ query_states = self.q_proj(hidden_states)
272
+ key_states = self.k_proj(hidden_states)
273
+ value_states = self.v_proj(hidden_states)
274
+
275
+ if self.use_qk_norm: # main diff from Llama
276
+ query_states = self.q_norm(query_states)
277
+ key_states = self.k_norm(key_states)
278
+
279
+ key_states = key_states.view(hidden_shape)
280
+ query_states = query_states.view(hidden_shape)
281
+ value_states = value_states.view(hidden_shape)
282
+
283
+ query_states = query_states.transpose(1, 2)
284
+ key_states = key_states.transpose(1, 2)
285
+ value_states = value_states.transpose(1, 2)
286
+
287
+ cos, sin = position_embeddings
288
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
289
+
290
+ if past_key_values is not None:
291
+ # sin and cos are specific to RoPE models; position_ids needed for the static cache
292
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
293
+ key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
294
+
295
+ attention_interface: Callable = eager_attention_forward
296
+ if self.config._attn_implementation != "eager":
297
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
298
+
299
+ attn_output, attn_weights = attention_interface(
300
+ self,
301
+ query_states,
302
+ key_states,
303
+ value_states,
304
+ attention_mask,
305
+ dropout=0.0 if not self.training else self.attention_dropout,
306
+ scaling=self.scaling,
307
+ **kwargs,
308
+ )
309
+
310
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
311
+ attn_output = self.o_proj(attn_output)
312
+ return attn_output, attn_weights
313
+
314
+
315
+ class MiniMaxM2DecoderLayer(GradientCheckpointingLayer):
316
+ def __init__(self, config: MiniMaxM2Config, layer_idx: int):
317
+ super().__init__()
318
+ self.hidden_size = config.hidden_size
319
+
320
+ self.self_attn = MiniMaxM2Attention(config, layer_idx)
321
+
322
+ self.block_sparse_moe = MiniMaxM2SparseMoeBlock(config)
323
+ self.input_layernorm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
324
+ self.post_attention_layernorm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
325
+
326
+ @deprecate_kwarg("past_key_value", new_name="past_key_values", version="4.58")
327
+ def forward(
328
+ self,
329
+ hidden_states: torch.Tensor,
330
+ position_embeddings: tuple[torch.Tensor, torch.Tensor],
331
+ attention_mask: Optional[torch.Tensor] = None,
332
+ position_ids: Optional[torch.LongTensor] = None,
333
+ past_key_values: Optional[Cache] = None,
334
+ cache_position: Optional[torch.LongTensor] = None,
335
+ **kwargs: Unpack[TransformersKwargs],
336
+ ) -> torch.FloatTensor:
337
+ residual = hidden_states
338
+
339
+ hidden_states = self.input_layernorm(hidden_states)
340
+
341
+ # Self Attention
342
+ hidden_states, _ = self.self_attn(
343
+ hidden_states=hidden_states,
344
+ position_embeddings=position_embeddings,
345
+ attention_mask=attention_mask,
346
+ position_ids=position_ids,
347
+ past_key_values=past_key_values,
348
+ cache_position=cache_position,
349
+ **kwargs,
350
+ )
351
+ hidden_states = residual + hidden_states
352
+
353
+ # Fully Connected
354
+ residual = hidden_states
355
+ hidden_states = self.post_attention_layernorm(hidden_states)
356
+ hidden_states, _ = self.block_sparse_moe(hidden_states)
357
+ hidden_states = residual + hidden_states
358
+
359
+ return hidden_states
360
+
361
+
362
+ class MiniMaxM2RotaryEmbedding(nn.Module):
363
+ inv_freq: torch.Tensor # fix linting for `register_buffer`
364
+
365
+ def __init__(self, config: MiniMaxM2Config, device=None):
366
+ super().__init__()
367
+ # BC: "rope_type" was originally "type"
368
+ if hasattr(config, "rope_scaling") and isinstance(config.rope_scaling, dict):
369
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
370
+ else:
371
+ self.rope_type = "default"
372
+ self.max_seq_len_cached = config.max_position_embeddings
373
+ self.original_max_seq_len = config.max_position_embeddings
374
+
375
+ self.config = config
376
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
377
+
378
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
379
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
380
+ self.original_inv_freq = self.inv_freq
381
+
382
+ @torch.no_grad()
383
+ @dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
384
+ def forward(self, x, position_ids):
385
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
386
+ position_ids_expanded = position_ids[:, None, :].float()
387
+
388
+ device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
389
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
390
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
391
+ emb = torch.cat((freqs, freqs), dim=-1)
392
+ cos = emb.cos() * self.attention_scaling
393
+ sin = emb.sin() * self.attention_scaling
394
+
395
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
396
+
397
+
398
+ @auto_docstring
399
+ class MiniMaxM2PreTrainedModel(PreTrainedModel):
400
+ config: MiniMaxM2Config
401
+ base_model_prefix = "model"
402
+ supports_gradient_checkpointing = True
403
+ _no_split_modules = ["MiniMaxM2DecoderLayer"]
404
+ _skip_keys_device_placement = ["past_key_values"]
405
+ _supports_flash_attn = True
406
+ _supports_sdpa = True
407
+ _supports_flex_attn = True
408
+ _can_compile_fullgraph = False # MoE models don't work with torch.compile (`torch.where(condition)` not supported)
409
+ _supports_attention_backend = True
410
+ _can_record_outputs = {
411
+ "router_logits": OutputRecorder(MiniMaxM2SparseMoeBlock, index=1),
412
+ "hidden_states": MiniMaxM2DecoderLayer,
413
+ "attentions": MiniMaxM2Attention,
414
+ }
415
+
416
+
417
+ @auto_docstring
418
+ class MiniMaxM2Model(MiniMaxM2PreTrainedModel):
419
+ def __init__(self, config: MiniMaxM2Config):
420
+ super().__init__(config)
421
+ self.padding_idx = config.pad_token_id
422
+ self.vocab_size = config.vocab_size
423
+
424
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
425
+ self.layers = nn.ModuleList(
426
+ [MiniMaxM2DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
427
+ )
428
+ self.norm = MiniMaxM2RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
429
+ self.rotary_emb = MiniMaxM2RotaryEmbedding(config=config)
430
+ self.gradient_checkpointing = False
431
+
432
+ # Initialize weights and apply final processing
433
+ self.post_init()
434
+
435
+ @check_model_inputs
436
+ @auto_docstring
437
+ def forward(
438
+ self,
439
+ input_ids: Optional[torch.LongTensor] = None,
440
+ attention_mask: Optional[torch.Tensor] = None,
441
+ position_ids: Optional[torch.LongTensor] = None,
442
+ past_key_values: Optional[Cache] = None,
443
+ inputs_embeds: Optional[torch.FloatTensor] = None,
444
+ use_cache: Optional[bool] = None,
445
+ cache_position: Optional[torch.LongTensor] = None,
446
+ **kwargs: Unpack[TransformersKwargs],
447
+ ) -> MoeModelOutputWithPast:
448
+ if (input_ids is None) ^ (inputs_embeds is not None):
449
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
450
+
451
+ if use_cache and past_key_values is None:
452
+ past_key_values = DynamicCache(config=self.config)
453
+
454
+ if inputs_embeds is None:
455
+ inputs_embeds = self.embed_tokens(input_ids)
456
+
457
+ if cache_position is None:
458
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
459
+ cache_position = torch.arange(
460
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
461
+ )
462
+ if position_ids is None:
463
+ position_ids = cache_position.unsqueeze(0)
464
+
465
+ mask_function = create_causal_mask if self.config.sliding_window is None else create_sliding_window_causal_mask
466
+ causal_mask = mask_function(
467
+ config=self.config,
468
+ input_embeds=inputs_embeds,
469
+ attention_mask=attention_mask,
470
+ cache_position=cache_position,
471
+ past_key_values=past_key_values,
472
+ position_ids=position_ids,
473
+ )
474
+
475
+ hidden_states = inputs_embeds
476
+
477
+ # create position embeddings to be shared across the decoder layers
478
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
479
+
480
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
481
+ hidden_states = decoder_layer(
482
+ hidden_states,
483
+ position_embeddings=position_embeddings,
484
+ attention_mask=causal_mask,
485
+ position_ids=position_ids,
486
+ past_key_values=past_key_values,
487
+ use_cache=use_cache,
488
+ cache_position=cache_position,
489
+ **kwargs,
490
+ )
491
+
492
+ hidden_states = self.norm(hidden_states)
493
+
494
+ return MoeModelOutputWithPast( # only diff with Mistral is the output type, we need MoE
495
+ last_hidden_state=hidden_states,
496
+ past_key_values=past_key_values,
497
+ )
498
+
499
+
500
+ def load_balancing_loss_func(
501
+ gate_logits: Union[torch.Tensor, tuple[torch.Tensor], None],
502
+ num_experts: Optional[int] = None,
503
+ top_k=2,
504
+ attention_mask: Optional[torch.Tensor] = None,
505
+ ) -> Union[torch.Tensor, int]:
506
+ r"""
507
+ Computes auxiliary load balancing loss as in Switch Transformer - implemented in Pytorch.
508
+
509
+ See Switch Transformer (https://huggingface.co/papers/2101.03961) for more details. This function implements the loss
510
+ function presented in equations (4) - (6) of the paper. It aims at penalizing cases where the routing between
511
+ experts is too unbalanced.
512
+
513
+ Args:
514
+ gate_logits:
515
+ Logits from the `gate`, should be a tuple of model.config.num_hidden_layers tensors of
516
+ shape [batch_size X sequence_length, num_experts].
517
+ num_experts:
518
+ Number of experts
519
+ top_k:
520
+ The number of experts to route per-token, can be also interpreted as the `top-k` routing
521
+ parameter.
522
+ attention_mask (`torch.Tensor`, *optional*):
523
+ The attention_mask used in forward function
524
+ shape [batch_size X sequence_length] if not None.
525
+
526
+ Returns:
527
+ The auxiliary loss.
528
+ """
529
+ if gate_logits is None or not isinstance(gate_logits, tuple):
530
+ return 0
531
+
532
+ if isinstance(gate_logits, tuple):
533
+ compute_device = gate_logits[0].device
534
+ concatenated_gate_logits = torch.cat([layer_gate.to(compute_device) for layer_gate in gate_logits], dim=0)
535
+
536
+ routing_weights = torch.nn.functional.softmax(concatenated_gate_logits, dim=-1)
537
+
538
+ _, selected_experts = torch.topk(routing_weights, top_k, dim=-1)
539
+
540
+ expert_mask = torch.nn.functional.one_hot(selected_experts, num_experts)
541
+
542
+ if attention_mask is None:
543
+ # Compute the percentage of tokens routed to each experts
544
+ tokens_per_expert = torch.mean(expert_mask.float(), dim=0)
545
+
546
+ # Compute the average probability of routing to these experts
547
+ router_prob_per_expert = torch.mean(routing_weights, dim=0)
548
+ else:
549
+ batch_size, sequence_length = attention_mask.shape
550
+ num_hidden_layers = concatenated_gate_logits.shape[0] // (batch_size * sequence_length)
551
+
552
+ # Compute the mask that masks all padding tokens as 0 with the same shape of expert_mask
553
+ expert_attention_mask = (
554
+ attention_mask[None, :, :, None, None]
555
+ .expand((num_hidden_layers, batch_size, sequence_length, top_k, num_experts))
556
+ .reshape(-1, top_k, num_experts)
557
+ .to(compute_device)
558
+ )
559
+
560
+ # Compute the percentage of tokens routed to each experts
561
+ tokens_per_expert = torch.sum(expert_mask.float() * expert_attention_mask, dim=0) / torch.sum(
562
+ expert_attention_mask, dim=0
563
+ )
564
+
565
+ # Compute the mask that masks all padding tokens as 0 with the same shape of tokens_per_expert
566
+ router_per_expert_attention_mask = (
567
+ attention_mask[None, :, :, None]
568
+ .expand((num_hidden_layers, batch_size, sequence_length, num_experts))
569
+ .reshape(-1, num_experts)
570
+ .to(compute_device)
571
+ )
572
+
573
+ # Compute the average probability of routing to these experts
574
+ router_prob_per_expert = torch.sum(routing_weights * router_per_expert_attention_mask, dim=0) / torch.sum(
575
+ router_per_expert_attention_mask, dim=0
576
+ )
577
+
578
+ overall_loss = torch.sum(tokens_per_expert * router_prob_per_expert.unsqueeze(0))
579
+ return overall_loss * num_experts
580
+
581
+
582
+ @auto_docstring
583
+ class MiniMaxM2ForCausalLM(MiniMaxM2PreTrainedModel, GenerationMixin):
584
+ _tied_weights_keys = ["lm_head.weight"]
585
+ _tp_plan = {"lm_head": "colwise_rep"}
586
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
587
+
588
+ def __init__(self, config):
589
+ super().__init__(config)
590
+ self.model = MiniMaxM2Model(config)
591
+ self.vocab_size = config.vocab_size
592
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
593
+ self.router_aux_loss_coef = config.router_aux_loss_coef
594
+ self.num_experts = config.num_local_experts
595
+ self.num_experts_per_tok = config.num_experts_per_tok
596
+
597
+ # Initialize weights and apply final processing
598
+ self.post_init()
599
+
600
+ @can_return_tuple
601
+ @auto_docstring
602
+ def forward(
603
+ self,
604
+ input_ids: Optional[torch.LongTensor] = None,
605
+ attention_mask: Optional[torch.Tensor] = None,
606
+ position_ids: Optional[torch.LongTensor] = None,
607
+ past_key_values: Optional[Cache] = None,
608
+ inputs_embeds: Optional[torch.FloatTensor] = None,
609
+ labels: Optional[torch.LongTensor] = None,
610
+ use_cache: Optional[bool] = None,
611
+ output_router_logits: Optional[bool] = None,
612
+ cache_position: Optional[torch.LongTensor] = None,
613
+ logits_to_keep: Union[int, torch.Tensor] = 0,
614
+ **kwargs: Unpack[TransformersKwargs],
615
+ ) -> MoeCausalLMOutputWithPast:
616
+ r"""
617
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
618
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
619
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
620
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
621
+
622
+ Example:
623
+
624
+ ```python
625
+ >>> from transformers import AutoTokenizer, MiniMaxM2ForCausalLM
626
+
627
+ >>> model = MiniMaxM2ForCausalLM.from_pretrained("mistralai/MiniMaxM2-8x7B-v0.1")
628
+ >>> tokenizer = AutoTokenizer.from_pretrained("mistralai/MiniMaxM2-8x7B-v0.1")
629
+
630
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
631
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
632
+
633
+ >>> # Generate
634
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
635
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
636
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
637
+ ```"""
638
+
639
+ output_router_logits = (
640
+ output_router_logits if output_router_logits is not None else self.config.output_router_logits
641
+ )
642
+
643
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
644
+ outputs: MoeModelOutputWithPast = self.model(
645
+ input_ids=input_ids,
646
+ attention_mask=attention_mask,
647
+ position_ids=position_ids,
648
+ past_key_values=past_key_values,
649
+ inputs_embeds=inputs_embeds,
650
+ use_cache=use_cache,
651
+ output_router_logits=output_router_logits,
652
+ cache_position=cache_position,
653
+ **kwargs,
654
+ )
655
+
656
+ hidden_states = outputs.last_hidden_state
657
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
658
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
659
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
660
+
661
+ loss = None
662
+ if labels is not None:
663
+ loss = self.loss_function(logits, labels, self.vocab_size, **kwargs)
664
+
665
+ aux_loss = None
666
+ if output_router_logits:
667
+ aux_loss = load_balancing_loss_func(
668
+ outputs.router_logits,
669
+ self.num_experts,
670
+ self.num_experts_per_tok,
671
+ attention_mask,
672
+ )
673
+ if labels is not None:
674
+ loss += self.router_aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
675
+
676
+ return MoeCausalLMOutputWithPast(
677
+ loss=loss,
678
+ aux_loss=aux_loss,
679
+ logits=logits,
680
+ past_key_values=outputs.past_key_values,
681
+ hidden_states=outputs.hidden_states,
682
+ attentions=outputs.attentions,
683
+ router_logits=outputs.router_logits,
684
+ )
685
+
686
+
687
+ class MiniMaxM2ForSequenceClassification(GenericForSequenceClassification, MiniMaxM2PreTrainedModel):
688
+ pass
689
+
690
+
691
+ class MiniMaxM2ForTokenClassification(GenericForTokenClassification, MiniMaxM2PreTrainedModel):
692
+ pass
693
+
694
+
695
+ class MiniMaxM2ForQuestionAnswering(GenericForQuestionAnswering, MiniMaxM2PreTrainedModel):
696
+ pass
697
+
698
+
699
+ __all__ = [
700
+ "MiniMaxM2ForCausalLM",
701
+ "MiniMaxM2ForQuestionAnswering",
702
+ "MiniMaxM2Model",
703
+ "MiniMaxM2PreTrainedModel",
704
+ "MiniMaxM2ForSequenceClassification",
705
+ "MiniMaxM2ForTokenClassification",
706
+ ]
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,495 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "200000": {
4
+ "content": "]!p~[",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "200001": {
12
+ "content": "<fim_prefix>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "200002": {
20
+ "content": "<fim_middle>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "200003": {
28
+ "content": "<fim_suffix>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "200004": {
36
+ "content": "<fim_pad>",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ },
43
+ "200005": {
44
+ "content": "<reponame>",
45
+ "lstrip": false,
46
+ "normalized": false,
47
+ "rstrip": false,
48
+ "single_word": false,
49
+ "special": true
50
+ },
51
+ "200006": {
52
+ "content": "<filename>",
53
+ "lstrip": false,
54
+ "normalized": false,
55
+ "rstrip": false,
56
+ "single_word": false,
57
+ "special": true
58
+ },
59
+ "200007": {
60
+ "content": "<gh_stars>",
61
+ "lstrip": false,
62
+ "normalized": false,
63
+ "rstrip": false,
64
+ "single_word": false,
65
+ "special": true
66
+ },
67
+ "200008": {
68
+ "content": "<issue_start>",
69
+ "lstrip": false,
70
+ "normalized": false,
71
+ "rstrip": false,
72
+ "single_word": false,
73
+ "special": true
74
+ },
75
+ "200009": {
76
+ "content": "<issue_comment>",
77
+ "lstrip": false,
78
+ "normalized": false,
79
+ "rstrip": false,
80
+ "single_word": false,
81
+ "special": true
82
+ },
83
+ "200010": {
84
+ "content": "<issue_closed>",
85
+ "lstrip": false,
86
+ "normalized": false,
87
+ "rstrip": false,
88
+ "single_word": false,
89
+ "special": true
90
+ },
91
+ "200011": {
92
+ "content": "<jupyter_start>",
93
+ "lstrip": false,
94
+ "normalized": false,
95
+ "rstrip": false,
96
+ "single_word": false,
97
+ "special": true
98
+ },
99
+ "200012": {
100
+ "content": "<jupyter_text>",
101
+ "lstrip": false,
102
+ "normalized": false,
103
+ "rstrip": false,
104
+ "single_word": false,
105
+ "special": true
106
+ },
107
+ "200013": {
108
+ "content": "<jupyter_code>",
109
+ "lstrip": false,
110
+ "normalized": false,
111
+ "rstrip": false,
112
+ "single_word": false,
113
+ "special": true
114
+ },
115
+ "200014": {
116
+ "content": "<jupyter_output>",
117
+ "lstrip": false,
118
+ "normalized": false,
119
+ "rstrip": false,
120
+ "single_word": false,
121
+ "special": true
122
+ },
123
+ "200015": {
124
+ "content": "<empty_output>",
125
+ "lstrip": false,
126
+ "normalized": false,
127
+ "rstrip": false,
128
+ "single_word": false,
129
+ "special": true
130
+ },
131
+ "200016": {
132
+ "content": "<commit_before>",
133
+ "lstrip": false,
134
+ "normalized": false,
135
+ "rstrip": false,
136
+ "single_word": false,
137
+ "special": true
138
+ },
139
+ "200017": {
140
+ "content": "<commit_msg>",
141
+ "lstrip": false,
142
+ "normalized": false,
143
+ "rstrip": false,
144
+ "single_word": false,
145
+ "special": true
146
+ },
147
+ "200018": {
148
+ "content": "<commit_after>",
149
+ "lstrip": false,
150
+ "normalized": false,
151
+ "rstrip": false,
152
+ "single_word": false,
153
+ "special": true
154
+ },
155
+ "200019": {
156
+ "content": "]~b]",
157
+ "lstrip": false,
158
+ "normalized": false,
159
+ "rstrip": false,
160
+ "single_word": false,
161
+ "special": true
162
+ },
163
+ "200020": {
164
+ "content": "[e~[",
165
+ "lstrip": false,
166
+ "normalized": false,
167
+ "rstrip": false,
168
+ "single_word": false,
169
+ "special": true
170
+ },
171
+ "200021": {
172
+ "content": "]!d~[",
173
+ "lstrip": false,
174
+ "normalized": false,
175
+ "rstrip": false,
176
+ "single_word": false,
177
+ "special": true
178
+ },
179
+ "200022": {
180
+ "content": "<function_call>",
181
+ "lstrip": false,
182
+ "normalized": false,
183
+ "rstrip": false,
184
+ "single_word": false,
185
+ "special": true
186
+ },
187
+ "200023": {
188
+ "content": "<code_interpreter>",
189
+ "lstrip": false,
190
+ "normalized": false,
191
+ "rstrip": false,
192
+ "single_word": false,
193
+ "special": true
194
+ },
195
+ "200024": {
196
+ "content": "]<]speech[>[",
197
+ "lstrip": false,
198
+ "normalized": false,
199
+ "rstrip": false,
200
+ "single_word": false,
201
+ "special": true
202
+ },
203
+ "200025": {
204
+ "content": "]<]image[>[",
205
+ "lstrip": false,
206
+ "normalized": false,
207
+ "rstrip": false,
208
+ "single_word": false,
209
+ "special": true
210
+ },
211
+ "200026": {
212
+ "content": "]<]video[>[",
213
+ "lstrip": false,
214
+ "normalized": false,
215
+ "rstrip": false,
216
+ "single_word": false,
217
+ "special": true
218
+ },
219
+ "200027": {
220
+ "content": "]<]start of speech[>[",
221
+ "lstrip": false,
222
+ "normalized": false,
223
+ "rstrip": false,
224
+ "single_word": false,
225
+ "special": true
226
+ },
227
+ "200028": {
228
+ "content": "]<]end of speech[>[",
229
+ "lstrip": false,
230
+ "normalized": false,
231
+ "rstrip": false,
232
+ "single_word": false,
233
+ "special": true
234
+ },
235
+ "200029": {
236
+ "content": "]<]start of image[>[",
237
+ "lstrip": false,
238
+ "normalized": false,
239
+ "rstrip": false,
240
+ "single_word": false,
241
+ "special": true
242
+ },
243
+ "200030": {
244
+ "content": "]<]end of image[>[",
245
+ "lstrip": false,
246
+ "normalized": false,
247
+ "rstrip": false,
248
+ "single_word": false,
249
+ "special": true
250
+ },
251
+ "200031": {
252
+ "content": "]<]start of video[>[",
253
+ "lstrip": false,
254
+ "normalized": false,
255
+ "rstrip": false,
256
+ "single_word": false,
257
+ "special": true
258
+ },
259
+ "200032": {
260
+ "content": "]<]end of video[>[",
261
+ "lstrip": false,
262
+ "normalized": false,
263
+ "rstrip": false,
264
+ "single_word": false,
265
+ "special": true
266
+ },
267
+ "200033": {
268
+ "content": "]<]vision pad[>[",
269
+ "lstrip": false,
270
+ "normalized": false,
271
+ "rstrip": false,
272
+ "single_word": false,
273
+ "special": true
274
+ },
275
+ "200034": {
276
+ "content": "]~!b[",
277
+ "lstrip": false,
278
+ "normalized": false,
279
+ "rstrip": false,
280
+ "single_word": false,
281
+ "special": true
282
+ },
283
+ "200035": {
284
+ "content": "<jupyter_error>",
285
+ "lstrip": false,
286
+ "normalized": false,
287
+ "rstrip": false,
288
+ "single_word": false,
289
+ "special": true
290
+ },
291
+ "200036": {
292
+ "content": "<add_file>",
293
+ "single_word": false,
294
+ "lstrip": false,
295
+ "rstrip": false,
296
+ "normalized": false,
297
+ "special": true
298
+ },
299
+ "200037": {
300
+ "content": "<delete_file>",
301
+ "lstrip": false,
302
+ "normalized": false,
303
+ "rstrip": false,
304
+ "single_word": false,
305
+ "special": true
306
+ },
307
+ "200038": {
308
+ "content": "<rename_file>",
309
+ "lstrip": false,
310
+ "normalized": false,
311
+ "rstrip": false,
312
+ "single_word": false,
313
+ "special": true
314
+ },
315
+ "200039": {
316
+ "content": "<edit_file>",
317
+ "lstrip": false,
318
+ "normalized": false,
319
+ "rstrip": false,
320
+ "single_word": false,
321
+ "special": true
322
+ },
323
+ "200040": {
324
+ "content": "<commit_message>",
325
+ "lstrip": false,
326
+ "normalized": false,
327
+ "rstrip": false,
328
+ "single_word": false,
329
+ "special": true
330
+ },
331
+ "200041": {
332
+ "content": "<empty_source_file>",
333
+ "lstrip": false,
334
+ "normalized": false,
335
+ "rstrip": false,
336
+ "single_word": false,
337
+ "special": true
338
+ },
339
+ "200042": {
340
+ "content": "<repo_struct>",
341
+ "lstrip": false,
342
+ "normalized": false,
343
+ "rstrip": false,
344
+ "single_word": false,
345
+ "special": true
346
+ },
347
+ "200043": {
348
+ "content": "<code_context>",
349
+ "single_word": false,
350
+ "lstrip": false,
351
+ "rstrip": false,
352
+ "normalized": false,
353
+ "special": true
354
+ },
355
+ "200044": {
356
+ "content": "<file_content>",
357
+ "single_word": false,
358
+ "lstrip": false,
359
+ "rstrip": false,
360
+ "normalized": false,
361
+ "special": true
362
+ },
363
+ "200045": {
364
+ "content": "<source_files>",
365
+ "single_word": false,
366
+ "lstrip": false,
367
+ "rstrip": false,
368
+ "normalized": false,
369
+ "special": true
370
+ },
371
+ "200046": {
372
+ "content": "<pr_start>",
373
+ "single_word": false,
374
+ "lstrip": false,
375
+ "rstrip": false,
376
+ "normalized": false,
377
+ "special": true
378
+ },
379
+ "200047": {
380
+ "content": "<review_comment>",
381
+ "single_word": false,
382
+ "lstrip": false,
383
+ "rstrip": false,
384
+ "normalized": false,
385
+ "special": true
386
+ },
387
+ "200048": {
388
+ "content": "<filepath>",
389
+ "single_word": false,
390
+ "lstrip": false,
391
+ "rstrip": false,
392
+ "normalized": false,
393
+ "special": true
394
+ },
395
+ "200049": {
396
+ "content": "<file_sep>",
397
+ "single_word": false,
398
+ "lstrip": false,
399
+ "rstrip": false,
400
+ "normalized": false,
401
+ "special": true
402
+ },
403
+ "200050": {
404
+ "content": "<think>",
405
+ "single_word": false,
406
+ "lstrip": false,
407
+ "rstrip": false,
408
+ "normalized": false,
409
+ "special": false
410
+ },
411
+ "200051": {
412
+ "content": "</think>",
413
+ "single_word": false,
414
+ "lstrip": false,
415
+ "rstrip": false,
416
+ "normalized": false,
417
+ "special": false
418
+ },
419
+ "200052": {
420
+ "content": "<minimax:tool_call>",
421
+ "single_word": false,
422
+ "lstrip": false,
423
+ "rstrip": false,
424
+ "normalized": false,
425
+ "special": false
426
+ },
427
+ "200053": {
428
+ "content": "</minimax:tool_call>",
429
+ "single_word": false,
430
+ "lstrip": false,
431
+ "rstrip": false,
432
+ "normalized": false,
433
+ "special": false
434
+ }
435
+ },
436
+ "additional_special_tokens": [
437
+ "<code_interpreter>",
438
+ "<commit_after>",
439
+ "<commit_before>",
440
+ "<commit_msg>",
441
+ "<empty_output>",
442
+ "<filename>",
443
+ "<fim_middle>",
444
+ "<fim_pad>",
445
+ "<fim_prefix>",
446
+ "<fim_suffix>",
447
+ "<function_call>",
448
+ "<gh_stars>",
449
+ "]<]speech[>[",
450
+ "]<]image[>[",
451
+ "]<]video[>[",
452
+ "]<]start of speech[>[",
453
+ "]<]end of speech[>[",
454
+ "]<]start of image[>[",
455
+ "]<]end of image[>[",
456
+ "]<]start of video[>[",
457
+ "]<]end of video[>[",
458
+ "]<]vision pad[>[",
459
+ "]~!b[",
460
+ "<issue_closed>",
461
+ "<issue_comment>",
462
+ "<issue_start>",
463
+ "<jupyter_code>",
464
+ "<jupyter_output>",
465
+ "<jupyter_start>",
466
+ "<jupyter_text>",
467
+ "<reponame>",
468
+ "[e~[",
469
+ "]!d~[",
470
+ "]!p~[",
471
+ "]~b]",
472
+ "<jupyter_error>",
473
+ "<add_file>",
474
+ "<delete_file>",
475
+ "<rename_file>",
476
+ "<edit_file>",
477
+ "<commit_message>",
478
+ "<empty_source_file>",
479
+ "<repo_struct>",
480
+ "<code_context>",
481
+ "<file_content>",
482
+ "<source_files>",
483
+ "<pr_start>",
484
+ "<review_comment>",
485
+ "<filepath>",
486
+ "<file_sep>"
487
+ ],
488
+ "add_prefix_space": false,
489
+ "bos_token": "]~!b[",
490
+ "clean_up_tokenization_spaces": false,
491
+ "eos_token": "[e~[",
492
+ "model_max_length": 40960000,
493
+ "tokenizer_class": "GPT2Tokenizer",
494
+ "unk_token": "]!d~["
495
+ }