wangrongsheng commited on
Commit
ea3cdf0
·
verified ·
1 Parent(s): f195c78

Add files using upload-large-folder tool

Browse files
Files changed (49) hide show
  1. .gitattributes +176 -0
  2. LICENSE +34 -0
  3. OPEN SOURCE SOFTWARE NOTICE +218 -0
  4. README.md +91 -6
  5. README_EN.md +91 -0
  6. config.json +146 -0
  7. configuration_openpangu_moe.py +82 -0
  8. doc/omniinfer_for_openPangu-Ultra-MoE-718B-V1.1-Int8.md +86 -0
  9. doc/omniinfer_for_openPangu-Ultra-MoE-718B-V1.1-Int8_EN.md +90 -0
  10. generation_config.json +11 -0
  11. output_metadata.json +19 -0
  12. quant_model_description.json +0 -0
  13. quant_model_weight_w8a8_dynamic-00011-of-00170.safetensors +3 -0
  14. quant_model_weight_w8a8_dynamic-00013-of-00170.safetensors +3 -0
  15. quant_model_weight_w8a8_dynamic-00016-of-00170.safetensors +3 -0
  16. quant_model_weight_w8a8_dynamic-00019-of-00170.safetensors +3 -0
  17. quant_model_weight_w8a8_dynamic-00023-of-00170.safetensors +3 -0
  18. quant_model_weight_w8a8_dynamic-00026-of-00170.safetensors +3 -0
  19. quant_model_weight_w8a8_dynamic-00033-of-00170.safetensors +3 -0
  20. quant_model_weight_w8a8_dynamic-00037-of-00170.safetensors +3 -0
  21. quant_model_weight_w8a8_dynamic-00043-of-00170.safetensors +3 -0
  22. quant_model_weight_w8a8_dynamic-00049-of-00170.safetensors +3 -0
  23. quant_model_weight_w8a8_dynamic-00055-of-00170.safetensors +3 -0
  24. quant_model_weight_w8a8_dynamic-00056-of-00170.safetensors +3 -0
  25. quant_model_weight_w8a8_dynamic-00057-of-00170.safetensors +3 -0
  26. quant_model_weight_w8a8_dynamic-00068-of-00170.safetensors +3 -0
  27. quant_model_weight_w8a8_dynamic-00069-of-00170.safetensors +0 -0
  28. quant_model_weight_w8a8_dynamic-00071-of-00170.safetensors +3 -0
  29. quant_model_weight_w8a8_dynamic-00084-of-00170.safetensors +3 -0
  30. quant_model_weight_w8a8_dynamic-00087-of-00170.safetensors +3 -0
  31. quant_model_weight_w8a8_dynamic-00093-of-00170.safetensors +3 -0
  32. quant_model_weight_w8a8_dynamic-00094-of-00170.safetensors +3 -0
  33. quant_model_weight_w8a8_dynamic-00098-of-00170.safetensors +3 -0
  34. quant_model_weight_w8a8_dynamic-00112-of-00170.safetensors +0 -0
  35. quant_model_weight_w8a8_dynamic-00114-of-00170.safetensors +3 -0
  36. quant_model_weight_w8a8_dynamic-00117-of-00170.safetensors +3 -0
  37. quant_model_weight_w8a8_dynamic-00118-of-00170.safetensors +3 -0
  38. quant_model_weight_w8a8_dynamic-00121-of-00170.safetensors +0 -0
  39. quant_model_weight_w8a8_dynamic-00128-of-00170.safetensors +3 -0
  40. quant_model_weight_w8a8_dynamic-00131-of-00170.safetensors +3 -0
  41. quant_model_weight_w8a8_dynamic-00139-of-00170.safetensors +3 -0
  42. quant_model_weight_w8a8_dynamic-00147-of-00170.safetensors +3 -0
  43. quant_model_weight_w8a8_dynamic-00148-of-00170.safetensors +3 -0
  44. quant_model_weight_w8a8_dynamic-00160-of-00170.safetensors +3 -0
  45. quant_model_weight_w8a8_dynamic-00161-of-00170.safetensors +3 -0
  46. quant_model_weight_w8a8_dynamic.safetensors.index.json +3 -0
  47. special_tokens_map.json +30 -0
  48. tokenizer.model +3 -0
  49. tokenizer_config.json +337 -0
.gitattributes CHANGED
@@ -33,3 +33,179 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ quant_model_weight_w8a8_dynamic-00170-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
37
+ quant_model_weight_w8a8_dynamic-00065-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
38
+ quant_model_weight_w8a8_dynamic-00154-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
39
+ quant_model_weight_w8a8_dynamic-00011-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
40
+ quant_model_weight_w8a8_dynamic-00036-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
41
+ quant_model_weight_w8a8_dynamic-00137-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
42
+ quant_model_weight_w8a8_dynamic-00167-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
43
+ quant_model_weight_w8a8_dynamic-00163-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
44
+ quant_model_weight_w8a8_dynamic-00052-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
45
+ quant_model_weight_w8a8_dynamic-00055-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
46
+ quant_model_weight_w8a8_dynamic-00116-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
47
+ quant_model_weight_w8a8_dynamic-00152-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
48
+ quant_model_weight_w8a8_dynamic-00103-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
49
+ quant_model_weight_w8a8_dynamic-00064-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
50
+ quant_model_weight_w8a8_dynamic-00082-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
51
+ quant_model_weight_w8a8_dynamic-00067-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
52
+ quant_model_weight_w8a8_dynamic-00026-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
53
+ quant_model_weight_w8a8_dynamic-00110-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
54
+ quant_model_weight_w8a8_dynamic-00161-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
55
+ quant_model_weight_w8a8_dynamic-00051-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
56
+ quant_model_weight_w8a8_dynamic-00125-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
57
+ quant_model_weight_w8a8_dynamic-00060-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
58
+ quant_model_weight_w8a8_dynamic-00160-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
59
+ mtp_float-2.safetensors filter=lfs diff=lfs merge=lfs -text
60
+ quant_model_weight_w8a8_dynamic-00095-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
61
+ quant_model_weight_w8a8_dynamic-00169-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
62
+ quant_model_weight_w8a8_dynamic-00104-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
63
+ quant_model_weight_w8a8_dynamic-00157-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
64
+ quant_model_weight_w8a8_dynamic-00010-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
65
+ quant_model_weight_w8a8_dynamic-00158-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
66
+ quant_model_weight_w8a8_dynamic-00032-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
67
+ quant_model_weight_w8a8_dynamic-00046-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
68
+ quant_model_weight_w8a8_dynamic-00100-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
69
+ quant_model_weight_w8a8_dynamic-00069-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
70
+ mtp_float-3.safetensors filter=lfs diff=lfs merge=lfs -text
71
+ quant_model_weight_w8a8_dynamic-00168-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
72
+ quant_model_weight_w8a8_dynamic-00115-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
73
+ quant_model_weight_w8a8_dynamic-00144-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
74
+ quant_model_weight_w8a8_dynamic-00121-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
75
+ quant_model_weight_w8a8_dynamic-00080-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
76
+ quant_model_weight_w8a8_dynamic-00096-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
77
+ quant_model_weight_w8a8_dynamic-00134-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
78
+ quant_model_weight_w8a8_dynamic-00056-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
79
+ quant_model_weight_w8a8_dynamic.safetensors.index.json filter=lfs diff=lfs merge=lfs -text
80
+ quant_model_weight_w8a8_dynamic-00111-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
81
+ quant_model_weight_w8a8_dynamic-00087-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
82
+ quant_model_weight_w8a8_dynamic-00120-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
83
+ quant_model_weight_w8a8_dynamic-00090-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
84
+ quant_model_weight_w8a8_dynamic-00085-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
85
+ quant_model_weight_w8a8_dynamic-00041-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
86
+ quant_model_weight_w8a8_dynamic-00156-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
87
+ quant_model_weight_w8a8_dynamic-00138-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
88
+ quant_model_weight_w8a8_dynamic-00047-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
89
+ quant_model_weight_w8a8_dynamic-00123-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
90
+ mtp_float-1.safetensors filter=lfs diff=lfs merge=lfs -text
91
+ quant_model_weight_w8a8_dynamic-00146-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
92
+ quant_model_weight_w8a8_dynamic-00030-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
93
+ quant_model_weight_w8a8_dynamic-00070-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
94
+ quant_model_weight_w8a8_dynamic-00098-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
95
+ quant_model_weight_w8a8_dynamic-00021-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
96
+ quant_model_weight_w8a8_dynamic-00094-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
97
+ quant_model_weight_w8a8_dynamic-00013-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
98
+ quant_model_weight_w8a8_dynamic-00140-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
99
+ quant_model_weight_w8a8_dynamic-00155-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
100
+ quant_model_weight_w8a8_dynamic-00006-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
101
+ quant_model_weight_w8a8_dynamic-00017-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
102
+ quant_model_weight_w8a8_dynamic-00061-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
103
+ mtp_float-4.safetensors filter=lfs diff=lfs merge=lfs -text
104
+ quant_model_weight_w8a8_dynamic-00129-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
105
+ quant_model_weight_w8a8_dynamic-00097-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
106
+ quant_model_weight_w8a8_dynamic-00009-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
107
+ quant_model_weight_w8a8_dynamic-00050-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
108
+ quant_model_weight_w8a8_dynamic-00019-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
109
+ quant_model_weight_w8a8_dynamic-00002-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
110
+ quant_model_weight_w8a8_dynamic-00038-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
111
+ quant_model_weight_w8a8_dynamic-00031-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
112
+ quant_model_weight_w8a8_dynamic-00166-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
113
+ quant_model_weight_w8a8_dynamic-00126-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
114
+ quant_model_weight_w8a8_dynamic-00054-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
115
+ quant_model_weight_w8a8_dynamic-00025-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
116
+ quant_model_weight_w8a8_dynamic-00063-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
117
+ quant_model_weight_w8a8_dynamic-00049-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
118
+ quant_model_weight_w8a8_dynamic-00027-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
119
+ quant_model_weight_w8a8_dynamic-00127-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
120
+ quant_model_weight_w8a8_dynamic-00062-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
121
+ output_metadata.json filter=lfs diff=lfs merge=lfs -text
122
+ quant_model_weight_w8a8_dynamic-00117-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
123
+ quant_model_weight_w8a8_dynamic-00034-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
124
+ quant_model_weight_w8a8_dynamic-00153-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
125
+ quant_model_weight_w8a8_dynamic-00108-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
126
+ quant_model_weight_w8a8_dynamic-00112-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
127
+ quant_model_weight_w8a8_dynamic-00072-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
128
+ quant_model_weight_w8a8_dynamic-00149-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
129
+ quant_model_weight_w8a8_dynamic-00147-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
130
+ quant_model_weight_w8a8_dynamic-00114-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
131
+ quant_model_weight_w8a8_dynamic-00020-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
132
+ quant_model_weight_w8a8_dynamic-00143-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
133
+ quant_model_weight_w8a8_dynamic-00148-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
134
+ quant_model_weight_w8a8_dynamic-00136-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
135
+ quant_model_weight_w8a8_dynamic-00044-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
136
+ quant_model_weight_w8a8_dynamic-00128-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
137
+ quant_model_weight_w8a8_dynamic-00091-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
138
+ quant_model_weight_w8a8_dynamic-00073-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
139
+ quant_model_weight_w8a8_dynamic-00018-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
140
+ quant_model_weight_w8a8_dynamic-00164-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
141
+ quant_model_weight_w8a8_dynamic-00029-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
142
+ quant_model_weight_w8a8_dynamic-00033-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
143
+ quant_model_weight_w8a8_dynamic-00075-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
144
+ quant_model_weight_w8a8_dynamic-00101-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
145
+ quant_model_weight_w8a8_dynamic-00135-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
146
+ quant_model_weight_w8a8_dynamic-00106-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
147
+ quant_model_weight_w8a8_dynamic-00088-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
148
+ quant_model_weight_w8a8_dynamic-00093-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
149
+ quant_model_weight_w8a8_dynamic-00083-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
150
+ quant_model_weight_w8a8_dynamic-00066-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
151
+ quant_model_weight_w8a8_dynamic-00053-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
152
+ quant_model_weight_w8a8_dynamic-00023-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
153
+ quant_model_weight_w8a8_dynamic-00102-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
154
+ quant_model_weight_w8a8_dynamic-00105-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
155
+ quant_model_weight_w8a8_dynamic-00004-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
156
+ quant_model_weight_w8a8_dynamic-00037-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
157
+ quant_model_weight_w8a8_dynamic-00024-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
158
+ quant_model_weight_w8a8_dynamic-00005-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
159
+ quant_model_weight_w8a8_dynamic-00130-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
160
+ quant_model_weight_w8a8_dynamic-00109-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
161
+ quant_model_weight_w8a8_dynamic-00099-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
162
+ quant_model_weight_w8a8_dynamic-00007-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
163
+ quant_model_weight_w8a8_dynamic-00132-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
164
+ quant_model_weight_w8a8_dynamic-00076-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
165
+ quant_model_weight_w8a8_dynamic-00074-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
166
+ quant_model_weight_w8a8_dynamic-00142-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
167
+ quant_model_weight_w8a8_dynamic-00122-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
168
+ quant_model_weight_w8a8_dynamic-00022-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
169
+ quant_model_weight_w8a8_dynamic-00040-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
170
+ quant_model_weight_w8a8_dynamic-00071-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
171
+ quant_model_weight_w8a8_dynamic-00058-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
172
+ quant_model_weight_w8a8_dynamic-00015-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
173
+ quant_model_weight_w8a8_dynamic-00079-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
174
+ quant_model_weight_w8a8_dynamic-00150-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
175
+ quant_model_weight_w8a8_dynamic-00059-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
176
+ quant_model_weight_w8a8_dynamic-00012-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
177
+ quant_model_weight_w8a8_dynamic-00003-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
178
+ quant_model_weight_w8a8_dynamic-00078-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
179
+ quant_model_weight_w8a8_dynamic-00107-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
180
+ quant_model_weight_w8a8_dynamic-00119-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
181
+ quant_model_weight_w8a8_dynamic-00089-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
182
+ quant_model_weight_w8a8_dynamic-00124-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
183
+ quant_model_weight_w8a8_dynamic-00035-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
184
+ quant_model_weight_w8a8_dynamic-00118-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
185
+ quant_model_weight_w8a8_dynamic-00001-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
186
+ quant_model_weight_w8a8_dynamic-00092-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
187
+ quant_model_weight_w8a8_dynamic-00162-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
188
+ quant_model_weight_w8a8_dynamic-00028-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
189
+ quant_model_weight_w8a8_dynamic-00084-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
190
+ quant_model_weight_w8a8_dynamic-00159-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
191
+ quant_model_weight_w8a8_dynamic-00139-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
192
+ quant_model_weight_w8a8_dynamic-00086-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
193
+ quant_model_weight_w8a8_dynamic-00077-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
194
+ quant_model_weight_w8a8_dynamic-00016-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
195
+ quant_model_weight_w8a8_dynamic-00068-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
196
+ quant_model_weight_w8a8_dynamic-00145-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
197
+ quant_model_weight_w8a8_dynamic-00008-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
198
+ quant_model_weight_w8a8_dynamic-00141-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
199
+ quant_model_weight_w8a8_dynamic-00048-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
200
+ quant_model_weight_w8a8_dynamic-00081-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
201
+ quant_model_weight_w8a8_dynamic-00042-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
202
+ quant_model_weight_w8a8_dynamic-00133-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
203
+ quant_model_weight_w8a8_dynamic-00045-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
204
+ quant_model_weight_w8a8_dynamic-00113-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
205
+ quant_model_weight_w8a8_dynamic-00131-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
206
+ quant_model_weight_w8a8_dynamic-00014-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
207
+ quant_model_weight_w8a8_dynamic-00039-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
208
+ quant_model_weight_w8a8_dynamic-00151-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
209
+ quant_model_weight_w8a8_dynamic-00057-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
210
+ quant_model_weight_w8a8_dynamic-00165-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
211
+ quant_model_weight_w8a8_dynamic-00043-of-00170.safetensors filter=lfs diff=lfs merge=lfs -text
LICENSE ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ OPENPANGU MODEL LICENSE AGREEMENT VERSION 1.0
2
+
3
+ This OPENPANGU MODEL LICENSE AGREEMENT VERSION 1.0 (the "Agreement") is a legal agreement between You and Huawei Technologies Co., Ltd. ("Huawei", "We" or "Us"), and it governs Your reproducing, use, modification, and distribution of openPangu as made available by Huawei under this Agreement.
4
+
5
+ By using, reproducing, modifying, distributing, performing or displaying any portion or element of openPangu, or otherwise accepting the terms of this Agreement, You agree to be bound by this Agreement.
6
+
7
+ 1. Definitions.
8
+ 1.1. “openPangu” or “Model” means openPangu large language models and software, including trained model weights, parameters (including optimizer states), accompanying source code and scripts released under this Agreement.
9
+ 1.2. “Derivative Model” means all (1) modifications to the Model, (2) works based on the Model, and (3) any other derivative works of the Model. For clarity, information or content results from operating or otherwise using the Model is not a Derivative Model.
10
+ 1.3. “You” or “Your” means an individual or Legal Entity exercising permissions granted by this Agreement and/or using the Model for any purpose.
11
+ 1.4. “Third Party” or “Third Parties” means individuals or legal entities that are not under common control with Us or You.
12
+
13
+ 2. License Grant. Subject to Your full compliance with the terms and conditions of this Agreement, We hereby grant to You a perpetual, worldwide, non-exclusive, non-transferable, no-charge, royalty-free license (except as stated in Section 3) to use, reproduce, modify, and distribute the Model.
14
+
15
+ 3. Conditions for License Grant. You represent and warrant that You will not, access, download, install, run, deploy, integrate, modify, or otherwise use the Model, directly or indirectly, within the European Union.
16
+
17
+
18
+ 4. Redistribution.
19
+ 4.1. If You distribute the Model or Derivative Model, You shall retain in Your distribution (1) a copy of this agreement, and (2) all copyright notices and other notices of origin included in the Model that are applicable to Your distribution.
20
+ 4.2. Further, if You distribute or make available to Third Parties a product or service (including another AI model) based on the Model, You are required to (1) display the acknowledgement “Powered by openPangu” and (2) include a trademark notice “openPangu is a trademark of Huawei Technologies Co., Ltd.” on related webpages, user manuals, product documentations or other advertising materials mentioning features of the Model.
21
+ 4.3. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for Derivative Model made by You as a whole, provided Your use, reproduction, and distribution of the Model otherwise complies with the terms and conditions of this Agreement.
22
+
23
+ 5. Ownership. We do not claim ownership to any information or content generated using the Model or Derivative Model that are made by You. You are solely responsible for evaluating the accuracy and appropriateness of such information or content for Your use case.
24
+
25
+ 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of Huawei, except as required for complying with Section 4.2.
26
+
27
+ 7. Indemnity. You will indemnify and hold harmless Huawei from and against any claim by any third party arising out of or related to Your use or distribution of the Model or Derivative Model made by You (e.g. a violation against Section 3). For avoidance of doubt, “third party” in this clause include supervisory authorities.
28
+
29
+ 8. THE MODEL IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE, NONINFRINGEMENT, ACCURACY, OR THE ABSENCE OF LATENT OR OTHER DEFECTS OR ERRORS, WHETHER OR NOT DISCOVERABLE, ALL TO THE GREATEST EXTENT PERMISSIBLE UNDER APPLICABLE LAW.
30
+
31
+ 9. IN NO EVENT SHALL WE BE LIABLE TO YOU FOR ANY DAMAGES, INCLUDING, BUT NOT LIMITED TO ANY DIRECT, OR INDIRECT, SPECIAL OR CONSEQUENTIAL DAMAGES ARISING FROM YOUR USE OR INABILITY TO USE THE MODEL, IN WHOLE OR IN PART, NO MATTER HOW IT’S CAUSED OR THE LEGAL THEORY IT IS BASED ON, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES.
32
+
33
+
34
+ END OF THE TERMS AND CONDITIONS
OPEN SOURCE SOFTWARE NOTICE ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ OPEN SOURCE SOFTWARE NOTICE
2
+
3
+ Please note we provide an open source software notice along with this product and/or this product firmware (in the following just “this product”). The open source software licenses are granted by the respective right holders. And the open source licenses prevail all other license information with regard to the respective open source software contained in the product, including but not limited to End User Software Licensing Agreement. This notice is provided on behalf of Huawei Technologies Co. Ltd. and any of its local subsidiaries which may have provided this product to you in your local country.
4
+
5
+ Warranty Disclaimer
6
+ THE OPEN SOURCE SOFTWARE IN THIS PRODUCT IS DISTRIBUTED IN THE HOPE THAT IT WILL BE USEFUL, BUT WITHOUT ANY WARRANTY, WITHOUT EVEN THE IMPLIED WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. SEE THE APPLICABLE LICENSES FOR MORE DETAILS.
7
+
8
+ Copyright Notice and License Texts
9
+
10
+ Software: transformers 4.48.2
11
+ Copyright notice:
12
+ Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
13
+
14
+ License Text:
15
+ ----------------------------------------
16
+
17
+ Apache License
18
+ Version 2.0, January 2004
19
+ http://www.apache.org/licenses/
20
+
21
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
22
+
23
+ 1. Definitions.
24
+
25
+ "License" shall mean the terms and conditions for use, reproduction,
26
+ and distribution as defined by Sections 1 through 9 of this document.
27
+
28
+ "Licensor" shall mean the copyright owner or entity authorized by
29
+ the copyright owner that is granting the License.
30
+
31
+ "Legal Entity" shall mean the union of the acting entity and all
32
+ other entities that control, are controlled by, or are under common
33
+ control with that entity. For the purposes of this definition,
34
+ "control" means (i) the power, direct or indirect, to cause the
35
+ direction or management of such entity, whether by contract or
36
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
37
+ outstanding shares, or (iii) beneficial ownership of such entity.
38
+
39
+ "You" (or "Your") shall mean an individual or Legal Entity
40
+ exercising permissions granted by this License.
41
+
42
+ "Source" form shall mean the preferred form for making modifications,
43
+ including but not limited to software source code, documentation
44
+ source, and configuration files.
45
+
46
+ "Object" form shall mean any form resulting from mechanical
47
+ transformation or translation of a Source form, including but
48
+ not limited to compiled object code, generated documentation,
49
+ and conversions to other media types.
50
+
51
+ "Work" shall mean the work of authorship, whether in Source or
52
+ Object form, made available under the License, as indicated by a
53
+ copyright notice that is included in or attached to the work
54
+ (an example is provided in the Appendix below).
55
+
56
+ "Derivative Works" shall mean any work, whether in Source or Object
57
+ form, that is based on (or derived from) the Work and for which the
58
+ editorial revisions, annotations, elaborations, or other modifications
59
+ represent, as a whole, an original work of authorship. For the purposes
60
+ of this License, Derivative Works shall not include works that remain
61
+ separable from, or merely link (or bind by name) to the interfaces of,
62
+ the Work and Derivative Works thereof.
63
+
64
+ "Contribution" shall mean any work of authorship, including
65
+ the original version of the Work and any modifications or additions
66
+ to that Work or Derivative Works thereof, that is intentionally
67
+ submitted to Licensor for inclusion in the Work by the copyright owner
68
+ or by an individual or Legal Entity authorized to submit on behalf of
69
+ the copyright owner. For the purposes of this definition, "submitted"
70
+ means any form of electronic, verbal, or written communication sent
71
+ to the Licensor or its representatives, including but not limited to
72
+ communication on electronic mailing lists, source code control systems,
73
+ and issue tracking systems that are managed by, or on behalf of, the
74
+ Licensor for the purpose of discussing and improving the Work, but
75
+ excluding communication that is conspicuously marked or otherwise
76
+ designated in writing by the copyright owner as "Not a Contribution."
77
+
78
+ "Contributor" shall mean Licensor and any individual or Legal Entity
79
+ on behalf of whom a Contribution has been received by Licensor and
80
+ subsequently incorporated within the Work.
81
+
82
+ 2. Grant of Copyright License. Subject to the terms and conditions of
83
+ this License, each Contributor hereby grants to You a perpetual,
84
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
85
+ copyright license to reproduce, prepare Derivative Works of,
86
+ publicly display, publicly perform, sublicense, and distribute the
87
+ Work and such Derivative Works in Source or Object form.
88
+
89
+ 3. Grant of Patent License. Subject to the terms and conditions of
90
+ this License, each Contributor hereby grants to You a perpetual,
91
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
92
+ (except as stated in this section) patent license to make, have made,
93
+ use, offer to sell, sell, import, and otherwise transfer the Work,
94
+ where such license applies only to those patent claims licensable
95
+ by such Contributor that are necessarily infringed by their
96
+ Contribution(s) alone or by combination of their Contribution(s)
97
+ with the Work to which such Contribution(s) was submitted. If You
98
+ institute patent litigation against any entity (including a
99
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
100
+ or a Contribution incorporated within the Work constitutes direct
101
+ or contributory patent infringement, then any patent licenses
102
+ granted to You under this License for that Work shall terminate
103
+ as of the date such litigation is filed.
104
+
105
+ 4. Redistribution. You may reproduce and distribute copies of the
106
+ Work or Derivative Works thereof in any medium, with or without
107
+ modifications, and in Source or Object form, provided that You
108
+ meet the following conditions:
109
+
110
+ (a) You must give any other recipients of the Work or
111
+ Derivative Works a copy of this License; and
112
+
113
+ (b) You must cause any modified files to carry prominent notices
114
+ stating that You changed the files; and
115
+
116
+ (c) You must retain, in the Source form of any Derivative Works
117
+ that You distribute, all copyright, patent, trademark, and
118
+ attribution notices from the Source form of the Work,
119
+ excluding those notices that do not pertain to any part of
120
+ the Derivative Works; and
121
+
122
+ (d) If the Work includes a "NOTICE" text file as part of its
123
+ distribution, then any Derivative Works that You distribute must
124
+ include a readable copy of the attribution notices contained
125
+ within such NOTICE file, excluding those notices that do not
126
+ pertain to any part of the Derivative Works, in at least one
127
+ of the following places: within a NOTICE text file distributed
128
+ as part of the Derivative Works; within the Source form or
129
+ documentation, if provided along with the Derivative Works; or,
130
+ within a display generated by the Derivative Works, if and
131
+ wherever such third-party notices normally appear. The contents
132
+ of the NOTICE file are for informational purposes only and
133
+ do not modify the License. You may add Your own attribution
134
+ notices within Derivative Works that You distribute, alongside
135
+ or as an addendum to the NOTICE text from the Work, provided
136
+ that such additional attribution notices cannot be construed
137
+ as modifying the License.
138
+
139
+ You may add Your own copyright statement to Your modifications and
140
+ may provide additional or different license terms and conditions
141
+ for use, reproduction, or distribution of Your modifications, or
142
+ for any such Derivative Works as a whole, provided Your use,
143
+ reproduction, and distribution of the Work otherwise complies with
144
+ the conditions stated in this License.
145
+
146
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
147
+ any Contribution intentionally submitted for inclusion in the Work
148
+ by You to the Licensor shall be under the terms and conditions of
149
+ this License, without any additional terms or conditions.
150
+ Notwithstanding the above, nothing herein shall supersede or modify
151
+ the terms of any separate license agreement you may have executed
152
+ with Licensor regarding such Contributions.
153
+
154
+ 6. Trademarks. This License does not grant permission to use the trade
155
+ names, trademarks, service marks, or product names of the Licensor,
156
+ except as required for reasonable and customary use in describing the
157
+ origin of the Work and reproducing the content of the NOTICE file.
158
+
159
+ 7. Disclaimer of Warranty. Unless required by applicable law or
160
+ agreed to in writing, Licensor provides the Work (and each
161
+ Contributor provides its Contributions) on an "AS IS" BASIS,
162
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
163
+ implied, including, without limitation, any warranties or conditions
164
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
165
+ PARTICULAR PURPOSE. You are solely responsible for determining the
166
+ appropriateness of using or redistributing the Work and assume any
167
+ risks associated with Your exercise of permissions under this License.
168
+
169
+ 8. Limitation of Liability. In no event and under no legal theory,
170
+ whether in tort (including negligence), contract, or otherwise,
171
+ unless required by applicable law (such as deliberate and grossly
172
+ negligent acts) or agreed to in writing, shall any Contributor be
173
+ liable to You for damages, including any direct, indirect, special,
174
+ incidental, or consequential damages of any character arising as a
175
+ result of this License or out of the use or inability to use the
176
+ Work (including but not limited to damages for loss of goodwill,
177
+ work stoppage, computer failure or malfunction, or any and all
178
+ other commercial damages or losses), even if such Contributor
179
+ has been advised of the possibility of such damages.
180
+
181
+ 9. Accepting Warranty or Additional Liability. While redistributing
182
+ the Work or Derivative Works thereof, You may choose to offer,
183
+ and charge a fee for, acceptance of support, warranty, indemnity,
184
+ or other liability obligations and/or rights consistent with this
185
+ License. However, in accepting such obligations, You may act only
186
+ on Your own behalf and on Your sole responsibility, not on behalf
187
+ of any other Contributor, and only if You agree to indemnify,
188
+ defend, and hold each Contributor harmless for any liability
189
+ incurred by, or claims asserted against, such Contributor by reason
190
+ of your accepting any such warranty or additional liability.
191
+
192
+ END OF TERMS AND CONDITIONS
193
+
194
+ APPENDIX: How to apply the Apache License to your work.
195
+
196
+ To apply the Apache License to your work, attach the following
197
+ boilerplate notice, with the fields enclosed by brackets "[]"
198
+ replaced with your own identifying information. (Don't include
199
+ the brackets!) The text should be enclosed in the appropriate
200
+ comment syntax for the file format. We also recommend that a
201
+ file or class name and description of purpose be included on the
202
+ same "printed page" as the copyright notice for easier
203
+ identification within third-party archives.
204
+
205
+ Copyright [yyyy] [name of copyright owner]
206
+
207
+ Licensed under the Apache License, Version 2.0 (the "License");
208
+ you may not use this file except in compliance with the License.
209
+ You may obtain a copy of the License at
210
+
211
+ http://www.apache.org/licenses/LICENSE-2.0
212
+
213
+ Unless required by applicable law or agreed to in writing, software
214
+ distributed under the License is distributed on an "AS IS" BASIS,
215
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
216
+ See the License for the specific language governing permissions and
217
+ limitations under the License.
218
+
README.md CHANGED
@@ -1,6 +1,91 @@
1
- ---
2
- license: other
3
- license_name: openpangu-model-license-agreement-version-1.0
4
- license_link: >-
5
- https://ai.gitcode.com/ascend-tribe/openPangu-Ultra-MoE-718B-V1.1-Int8/blob/main/LICENSE
6
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 开源盘古 Ultra-MoE-718B-V1.1-Int8
2
+ 中文 | [English](README_EN.md)
3
+
4
+ ## 1. 简介
5
+ openPangu-Ultra-MoE-718B-V1.1 是基于昇腾 NPU 训练的大规模混合专家语言模型,总参数量为718B,激活参数量为39B,同一个模型具备快思考和慢思考两种能力。
6
+ 相较 [[openPangu-Ultra-MoE-718B-V1.0](https://ai.gitcode.com/ascend-tribe/openpangu-ultra-moe-718b-model)] 版本,V1.1版本主要提升了Agent工具调用能力,降低了幻觉率,其他综合能力也进一步增强。
7
+
8
+ **openPangu-Ultra-MoE-718B-V1.1-Int8 是 [[openPangu-Ultra-MoE-718B-V1.1](https://ai.gitcode.com/ascend-tribe/openPangu-Ultra-MoE-718B-V1.1)] 的量化版本,使用动态 per-token 量化方法,能够减少约一半的显存占用,提升20%吞吐,综合精度损失小于1%。**
9
+
10
+ ## 2. 模型架构
11
+ openPangu-Ultra-MoE-718B-V1.1-Int8 的模型架构采用了业界主流的 Multi-head Latent Attention (MLA)、Multi-Token Prediction (MTP)、大稀疏比等架构,以及一些特有的设计:
12
+ - Depth-Scaled Sandwich-Norm 和 TinyInit:通过调整层归一化结构与参数初始化,提升训练稳定性。
13
+ - 基于 EP-Group 的负载均衡策略:通过优化负载均衡损失函数,改善专家特化效果。
14
+
15
+ ## 3. 推理说明
16
+ 使用Omni-Infer推理openPangu-Ultra-MoE-718B-V1.1-Int8的方式请参考[[Omni-Infer推理部署指南](doc/omniinfer_for_openPangu-Ultra-MoE-718B-V1.1-Int8.md)]。
17
+
18
+ ## 4. Function Call 调用示例
19
+ 当前开源的 Omni-Infer 推理引擎已支持 Function Call 调用,vllm_ascend 版本将很快更新。
20
+ ```
21
+ import requests,json
22
+
23
+ # 定义工具函数,Json列表格式,支持MCP协议规格
24
+ tools = [
25
+ {
26
+ "type": "function",
27
+ "function": {
28
+ "name": "get_current_weather",
29
+ "description": "获取指定城市的当前天气信息,包括温度、湿度、风速等数据。",
30
+ "parameters": {
31
+ "type": "object",
32
+ "properties": {
33
+ "location": {
34
+ "type": "string",
35
+ "description": "城市名称,例如:'北京'、'深圳'。支持中文或拼音输入。"
36
+ },
37
+ "date": {
38
+ "type": "string",
39
+ "description": "查询日期,格式为 YYYY-MM-DD(遵循 ISO 8601 标准)。例如:'2023-10-01'。"
40
+ }
41
+ },
42
+ "required": ["location", "date"],
43
+ "additionalProperties": False
44
+ }
45
+ }
46
+ }
47
+ ]
48
+
49
+ messages = [
50
+ {"role": "system", "content": "你是华为公司开发的盘古模型。\n现在是2025年10月13日"}, # 自定义system prompt,不需要使用时置空
51
+ {"role": "user", "content": "深圳后天的天气如何?"}
52
+ ]
53
+
54
+ headers = {'Content-Type': 'application/json'}
55
+ api_url = "xxxxxxxx"
56
+
57
+ payload = {
58
+ "model": "pangu_ultra_moe",
59
+ "messages": messages,
60
+ "tools": tools,
61
+ "chat_template_kwargs":{
62
+ "think": False, # 控制快慢思考,False快思考,默认True(慢思考)
63
+ "mcp_prompt": True # 控制是否使用默认的工具调用system prompt。默认True(使用)
64
+ }
65
+ }
66
+
67
+ api_response = requests.post(api_url, headers=headers, json=payload)
68
+
69
+ # 处理模型响应返回值
70
+ choice = api_response.json()["choices"][0]
71
+ reasoning_response = choice['message']['reasoning_content']
72
+ response = choice['message']['content']
73
+ tool_calls = choice['message']['tool_calls']
74
+
75
+ ```
76
+
77
+ **chat_template_kwargs 快慢切换和工具相关参数说明**
78
+ - think: 慢思考模式开关,默认 True,慢思考模式;
79
+ - mcp_prompt: Function Call 模式是否使用内置的默认工具调用指令,默认 True(使用)。如果为 True 且传入了 tools ,会在自定义 system prompt 之后插入内置的默认工具调用指令。
80
+
81
+ ## 5. 模型许可证
82
+ 除文件中对开源许可证另有约定外,openPangu-Ultra-MoE-718B-V1.1-Int8 模型根据 OPENPANGU MODEL LICENSE AGREEMENT VERSION 1.0 授权,旨在允许使用并促进人工智能技术的进一步发展。有关详细信息,请参阅模型存储库根目录中的 [LICENSE](LICENSE) 文件。
83
+
84
+ ## 6. 免责声明
85
+ 由于 openPangu-Ultra-MoE-718B-V1.1-Int8 (“模型”)所依赖的技术固有的限制,以及人工智能生成的内容是由盘古自动生成的,华为无法对以下事项做出任何保证:
86
+ - 该模型的输出通过AI算法自动生成,不能排除某些信息可能存在缺陷、不合理或引起不适的可能性,生成的内容不代表华为的态度或立场;
87
+ - 无法保证该模型100%准确、可靠、功能齐全、及时、安全、无错误、不间断、持续稳定或无任何故障;
88
+ - 该模型的输出内容不构成任何建议或决策,也不保证生���的内容的真实性、完整性、准确性、及时性、合法性、功能性或实用性。生成的内容不能替代医疗、法律等领域的专业人士回答您的问题。生成的内容仅供参考,不代表华为的任何态度、立场或观点。您需要根据实际情况做出独立判断,华为不承担任何责任。
89
+
90
+ ## 7. 反馈
91
+ 如果有任何意见和建议,请提交issue或联系[openPangu@huawei.com](url)。
README_EN.md ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # openPangu-Ultra-MoE-718B-V1.1-Int8
2
+ English | [中文](README.md)
3
+
4
+ ## 1. Introduction
5
+ The openPangu-Ultra-MoE-718B-V1.1 is a large-scale mixture-of-experts language model trained on Ascend NPU, with a total parameter count of 718B and 39B activated parameters per token. It is equipped with the capability to switch between fast and slow thinking. Compared to the earlier [[openPangu-Ultra-MoE-718B-V1.0](https://ai.gitcode.com/ascend-tribe/openpangu-ultra-moe-718b-model/blob/main/README_EN.md)] version, V1.1 improves greatly on better agentic tool use ability and lower hallucination rate, and it also strengths on other general and reasoning task.
6
+
7
+ **openPangu-Ultra-MoE-718B-V1.1-Int8 is a quantized version of [[openPangu-Ultra-MoE-718B-V1.1](https://ai.gitcode.com/ascend-tribe/openPangu-Ultra-MoE-718B-V1.1/blob/main/README_EN.md)] using dynamic per-token quantization, which reduces GPU memory usage by approximately half, increases throughput by 20%, and achieves an accuracy loss of less than 1%.**
8
+
9
+ ## 2. Model Architecture
10
+ The architecture of the openPangu-Ultra-MoE-718B-V1.1-Int8 adopts the mainstream Multi-head Latent Attention (MLA), Multi-Token Prediction (MTP), high MoE sparsity, and features several different designs:
11
+ - Depth-Scaled Sandwich-Norm and TinyInit: These techniques adjust the layer normalization structure and parameter initialization for improved training stability.
12
+ - EP-Group load balancing loss: This technique optimizes the load balancing loss, achieving better expert specialization.
13
+
14
+ ## 3. Inference Usage
15
+ The method for inferring openPangu-Ultra-MoE-718B-V1.1-Int8 using Omni-Infer is referenced as follows: [[Omni-Infer User Guide](doc/omniinfer_for_openPangu-Ultra-MoE-718B-V1.1-Int8_EN.md)].
16
+
17
+ ## 4. Function Call Usage Examples
18
+ So far the open-sourced Omni-Infer Engine supports Function Call,the support on vllm_ascend engine will be released soon.
19
+ ```
20
+ import requests,json
21
+
22
+ # Define tools in a list of Json format, it also support MCP protocol
23
+ tools = [
24
+ {
25
+ "type": "function",
26
+ "function": {
27
+ "name": "get_current_weather",
28
+ "description": "Get the current weather information of the specified city, including temperature, humidity, wind speed and other data.",
29
+ "parameters": {
30
+ "type": "object",
31
+ "properties": {
32
+ "location": {
33
+ "type": "string",
34
+ "description": "City name, for example: 'Beijing', 'Shenzhen'. Supports Chinese or Pinyin input."
35
+ },
36
+ "date": {
37
+ "type": "string",
38
+ "description": "Query date in the format of YYYY-MM-DD (compliant with ISO 8601). For example: '2023-10-01'."
39
+ }
40
+ },
41
+ "required": ["location", "date"],
42
+ "additionalProperties": False
43
+ }
44
+ }
45
+ }
46
+ ]
47
+
48
+ messages = [
49
+ {"role": "system", "content": "You are the Pangu model developed by Huawei. \nIt is October 13, 2025"}, # customize system prompt,set it to empty when not needed.
50
+ {"role": "user", "content": "How is the whether in ShenZhen the day after tomorrow?"}
51
+ ]
52
+
53
+ headers = {'Content-Type': 'application/json'}
54
+ api_url = "xxxxxxxx"
55
+
56
+ payload = {
57
+ "model": "pangu_ultra_moe",
58
+ "messages": messages,
59
+ "tools": tools,
60
+ "chat_template_kwargs":{
61
+ "think": False, # control the switch between fast and slow thinking. set False to be fast-thinking,and its True by default (slow-thiking)
62
+ "mcp_prompt": True # control whether to used the default system prompt for tool usage, and it's set to True by default.
63
+ }
64
+ }
65
+
66
+ api_response = requests.post(api_url, headers=headers, json=payload)
67
+
68
+ # dealing with returned messages
69
+ choice = api_response.json()["choices"][0]
70
+ reasoning_response = choice['message']['reasoning_content']
71
+ response = choice['message']['content']
72
+ tool_calls = choice['message']['tool_calls']
73
+
74
+ ```
75
+
76
+ **chat_template_kwargs parameters for switching fast and slow thinking and tool usage.**
77
+ - think: whether to turn on slow-thinking, and it's set to True by default;
78
+ - mcp_prompt: whether to use default system prompt for Function Call mode, it's set to True by default. If it is True and a list of tools are passed, we will append the customized system prompt with the defaults system prompt for Function Call mode.
79
+
80
+ ## 5. Model License
81
+ Unless otherwise noted, the openPangu-Ultra-MoE-718B-V1.1-Int8 model is licensed under the terms and conditions of OPENPANGU MODEL LICENSE AGREEMENT VERSION 1.0, which is intended to be used permissively and enable the further development of artificial intelligence technologies. Please refer to the [LICENSE](LICENSE) file located in the root directory of the model repository for details.
82
+
83
+ ## 6. Disclaimer
84
+ Due to the technical limitations inherent in the technology on which the openPangu-Ultra-MoE-718B-V1.1-Int8 (“Model”) relies and the fact that the artificial intelligence generated content is automatically produced by Model, Huawei cannot make any guarantees regarding the following matters:
85
+
86
+ - The output of this Model is automatically generated via AI algorithms, it does not rule out the possibility that some of the information may be flawed, unreasonable, or cause discomfort, and the generated content does not represent Huawei's attitude or standpoint;
87
+ - There is no guarantee that this Model is 100% accurate, reliable, functional, timely, secure and safety, error-free, uninterrupted, continuously stable, or free of any faults;
88
+ - The output of this Model does not constitute any advices or decisions for you, and it does not guarantee the authenticity, completeness, accuracy, timeliness, legality, functionality, or practicality of the generated content. The generated content cannot replace professionals in medical, legal, and other fields in answering your questions. The generated content is for your reference only and does not represent any attitude, standpoint, or position of Huawei. You need to make independent judgments based on your actual situation, and Huawei does not assume any responsibilities.
89
+
90
+ ## 7. Contact
91
+ If you have any question, please raise an issue or contact us at [openPangu@huawei.com](url).
config.json ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "PanguUltraMoEForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "auto_map": {
7
+ "AutoConfig": "configuration_openpangu_moe.PanguUltraMoEConfig",
8
+ "AutoModel": "modeling_openpangu_moe.PanguUltraMoEModel",
9
+ "AutoModelForCausalLM": "modeling_openpangu_moe.PanguUltraMoEForCausalLM"
10
+ },
11
+ "num_dense_layers": 3,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 7680,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 18432,
16
+ "attention_kv_lora_dim": 512,
17
+ "max_position_embeddings": 131072,
18
+ "model_type": "pangu_ultra_moe",
19
+ "moe_intermediate_size": 2048,
20
+ "num_routed_experts": 256,
21
+ "num_shared_experts": 1,
22
+ "num_attention_heads": 128,
23
+ "num_experts_per_tok": 8,
24
+ "num_hidden_layers": 61,
25
+ "num_key_value_heads": 128,
26
+ "num_mtp_layers": 1,
27
+ "attention_q_lora_dim": 1536,
28
+ "attention_qk_dim": 128,
29
+ "attention_qk_rope_dim": 64,
30
+ "rms_norm_eps": 1e-05,
31
+ "rope_theta": 25600000,
32
+ "routed_scaling_factor": 2.5,
33
+ "sandwich_norm": true,
34
+ "tie_word_embeddings": false,
35
+ "torch_dtype": "bfloat16",
36
+ "transformers_version": "4.48.2",
37
+ "use_cache": true,
38
+ "attention_v_dim": 128,
39
+ "vocab_size": 153600,
40
+ "quantize": "w8a8_dynamic",
41
+ "quantization_config": {
42
+ "config_groups": {
43
+ "group_0": {
44
+ "input_activations": {
45
+ "actorder": null,
46
+ "block_structure": null,
47
+ "dynamic": true,
48
+ "group_size": null,
49
+ "num_bits": 8,
50
+ "observer": "memoryless",
51
+ "observer_kwargs": {},
52
+ "strategy": "token",
53
+ "symmetric": true,
54
+ "type": "int"
55
+ },
56
+ "output_activations": null,
57
+ "targets": [
58
+ "Linear"
59
+ ],
60
+ "weights": {
61
+ "actorder": null,
62
+ "block_structure": null,
63
+ "dynamic": false,
64
+ "group_size": null,
65
+ "num_bits": 8,
66
+ "observer": "minmax",
67
+ "observer_kwargs": {},
68
+ "strategy": "channel",
69
+ "symmetric": true,
70
+ "type": "int"
71
+ }
72
+ }
73
+ },
74
+ "format": "int-quantized",
75
+ "global_compression_ratio": 1.5943962512751308,
76
+ "ignore": [
77
+ "model.layers.0.self_attn.kv_b_proj",
78
+ "model.layers.1.self_attn.kv_b_proj",
79
+ "model.layers.2.self_attn.kv_b_proj",
80
+ "model.layers.3.self_attn.kv_b_proj",
81
+ "model.layers.4.self_attn.kv_b_proj",
82
+ "model.layers.5.self_attn.kv_b_proj",
83
+ "model.layers.6.self_attn.kv_b_proj",
84
+ "model.layers.7.self_attn.kv_b_proj",
85
+ "model.layers.8.self_attn.kv_b_proj",
86
+ "model.layers.9.self_attn.kv_b_proj",
87
+ "model.layers.10.self_attn.kv_b_proj",
88
+ "model.layers.11.self_attn.kv_b_proj",
89
+ "model.layers.12.self_attn.kv_b_proj",
90
+ "model.layers.13.self_attn.kv_b_proj",
91
+ "model.layers.14.self_attn.kv_b_proj",
92
+ "model.layers.15.self_attn.kv_b_proj",
93
+ "model.layers.16.self_attn.kv_b_proj",
94
+ "model.layers.17.self_attn.kv_b_proj",
95
+ "model.layers.18.self_attn.kv_b_proj",
96
+ "model.layers.19.self_attn.kv_b_proj",
97
+ "model.layers.20.self_attn.kv_b_proj",
98
+ "model.layers.21.self_attn.kv_b_proj",
99
+ "model.layers.22.self_attn.kv_b_proj",
100
+ "model.layers.23.self_attn.kv_b_proj",
101
+ "model.layers.24.self_attn.kv_b_proj",
102
+ "model.layers.25.self_attn.kv_b_proj",
103
+ "model.layers.26.self_attn.kv_b_proj",
104
+ "model.layers.27.self_attn.kv_b_proj",
105
+ "model.layers.28.self_attn.kv_b_proj",
106
+ "model.layers.29.self_attn.kv_b_proj",
107
+ "model.layers.30.self_attn.kv_b_proj",
108
+ "model.layers.31.self_attn.kv_b_proj",
109
+ "model.layers.32.self_attn.kv_b_proj",
110
+ "model.layers.33.self_attn.kv_b_proj",
111
+ "model.layers.34.self_attn.kv_b_proj",
112
+ "model.layers.35.self_attn.kv_b_proj",
113
+ "model.layers.36.self_attn.kv_b_proj",
114
+ "model.layers.37.self_attn.kv_b_proj",
115
+ "model.layers.38.self_attn.kv_b_proj",
116
+ "model.layers.39.self_attn.kv_b_proj",
117
+ "model.layers.40.self_attn.kv_b_proj",
118
+ "model.layers.41.self_attn.kv_b_proj",
119
+ "model.layers.42.self_attn.kv_b_proj",
120
+ "model.layers.43.self_attn.kv_b_proj",
121
+ "model.layers.44.self_attn.kv_b_proj",
122
+ "model.layers.45.self_attn.kv_b_proj",
123
+ "model.layers.46.self_attn.kv_b_proj",
124
+ "model.layers.47.self_attn.kv_b_proj",
125
+ "model.layers.48.self_attn.kv_b_proj",
126
+ "model.layers.49.self_attn.kv_b_proj",
127
+ "model.layers.50.self_attn.kv_b_proj",
128
+ "model.layers.51.self_attn.kv_b_proj",
129
+ "model.layers.52.self_attn.kv_b_proj",
130
+ "model.layers.53.self_attn.kv_b_proj",
131
+ "model.layers.54.self_attn.kv_b_proj",
132
+ "model.layers.55.self_attn.kv_b_proj",
133
+ "model.layers.56.self_attn.kv_b_proj",
134
+ "model.layers.57.self_attn.kv_b_proj",
135
+ "model.layers.58.self_attn.kv_b_proj",
136
+ "model.layers.59.self_attn.kv_b_proj",
137
+ "model.layers.60.self_attn.kv_b_proj",
138
+ "lm_head",
139
+ "model.layers.61.self_attn.kv_b_proj",
140
+ "model.layers.61.shared_head.head"
141
+ ],
142
+ "kv_cache_scheme": null,
143
+ "quant_method": "compressed-tensors",
144
+ "quantization_status": "compressed"
145
+ }
146
+ }
configuration_openpangu_moe.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright (c) 2025 Huawei Technologies Co., Ltd. All rights reserved.
3
+
4
+ """openPanguUltraMoE 718B model configuration"""
5
+
6
+ from transformers.configuration_utils import PretrainedConfig
7
+
8
+
9
+ class PanguUltraMoEConfig(PretrainedConfig):
10
+
11
+ model_type = "pangu_ultra_moe"
12
+ keys_to_ignore_at_inference = ["past_key_values"]
13
+
14
+ def __init__(
15
+ self,
16
+ vocab_size=153600,
17
+ hidden_size=7680,
18
+ intermediate_size=18432,
19
+ moe_intermediate_size=2048,
20
+ num_hidden_layers=61,
21
+ num_mtp_layers=1,
22
+ num_attention_heads=128,
23
+ num_key_value_heads=128,
24
+ num_shared_experts=1,
25
+ num_routed_experts=256,
26
+ routed_scaling_factor=2.5,
27
+ attention_kv_lora_dim=512,
28
+ attention_q_lora_dim=1536,
29
+ attention_qk_rope_dim=64,
30
+ attention_v_dim=128,
31
+ attention_qk_dim=128,
32
+ num_experts_per_tok=8,
33
+ num_dense_layers=3,
34
+ norm_topk_prob=True,
35
+ hidden_act="silu",
36
+ max_position_embeddings=131072,
37
+ initializer_range=0.02,
38
+ rms_norm_eps=1e-5,
39
+ use_cache=True,
40
+ pad_token_id=None,
41
+ bos_token_id=0,
42
+ eos_token_id=1,
43
+ tie_word_embeddings=False,
44
+ rope_theta=25600000,
45
+ attention_dropout=0.0,
46
+ **kwargs,
47
+ ):
48
+ self.vocab_size = vocab_size
49
+ self.max_position_embeddings = max_position_embeddings
50
+ self.hidden_size = hidden_size
51
+ self.num_hidden_layers = num_hidden_layers
52
+ self.num_attention_heads = num_attention_heads
53
+ self.num_key_value_heads = num_key_value_heads
54
+ self.hidden_act = hidden_act
55
+ self.initializer_range = initializer_range
56
+ self.rms_norm_eps = rms_norm_eps
57
+ self.use_cache = use_cache
58
+ self.rope_theta = rope_theta
59
+
60
+ self.num_dense_layers = num_dense_layers
61
+ self.intermediate_size = intermediate_size
62
+ self.moe_intermediate_size = moe_intermediate_size
63
+ self.num_shared_experts = num_shared_experts
64
+ self.num_routed_experts = num_routed_experts
65
+ self.routed_scaling_factor = routed_scaling_factor
66
+ self.num_experts_per_tok = num_experts_per_tok
67
+ self.norm_topk_prob = norm_topk_prob
68
+ self.attention_kv_lora_dim = attention_kv_lora_dim
69
+ self.attention_q_lora_dim = attention_q_lora_dim
70
+ self.attention_qk_rope_dim = attention_qk_rope_dim
71
+ self.attention_v_dim = attention_v_dim
72
+ self.attention_qk_dim = attention_qk_dim
73
+ self.attention_dropout = attention_dropout
74
+ self.num_mtp_layers = num_mtp_layers
75
+
76
+ super().__init__(
77
+ pad_token_id=pad_token_id,
78
+ bos_token_id=bos_token_id,
79
+ eos_token_id=eos_token_id,
80
+ tie_word_embeddings=tie_word_embeddings,
81
+ **kwargs,
82
+ )
doc/omniinfer_for_openPangu-Ultra-MoE-718B-V1.1-Int8.md ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # openPangu-Ultra-MoE-718B-V1.1-Int8在Omni-Infer部署指导文档
2
+
3
+ ## 硬件环境和部署方式
4
+ 部署方式为4P1D,需要8台Atlas 800T A3机器。4个P实例中,每个P实例对应1台A3机器,1个D实例由4台A3机器组成。
5
+
6
+ ## 代码和镜像
7
+ - Omni-Infer代码版本:v0.4.1
8
+ - 配套镜像:参考 https://gitee.com/omniai/omniinfer/releases 中v0.4.1镜像,以A3硬件和arm架构为例,使用“docker pull swr.cn-east-4.myhuaweicloud.com/omni/omni_infer-a3-arm:release_v0.4.1”。
9
+
10
+ ## 拉起步骤
11
+ 参考 https://gitee.com/omniai/omniinfer/blob/v0.4.1/tools/ansible/template/README.md, 对于openPangu-Ultra-MoE-718B-V1.1-Int8拉起方式如下:
12
+ ### 1. 环境准备
13
+ 在执行机安装ansbile-playbook和sshpass,准备密钥文件
14
+ ```
15
+ yum install ansible
16
+ # 执行机安装 ansible-playbook
17
+
18
+ yum install openssh-server
19
+ # 执行机安装 sshpass
20
+
21
+ ssh-keygen -t ed25519 -C "Your SSH key comment" -f ~/.ssh/my_key # -t 指定密钥类型(推荐ed25519), -f 指定文件名
22
+ # 在执行机生成密钥对
23
+ chmod 700 ~/.ssh
24
+ chmod 600 ~/.ssh/id_ed25519 # 私钥必须设为 600
25
+ chmod 644 ~/.ssh/id_ed25519.pub
26
+ # 设置密钥文件权限
27
+ ssh-copy-id -i ~/.ssh/id_ed25519.pub user@remote-host
28
+ # 部署公钥到远程目标机
29
+ ```
30
+ ### 2. 修改配置
31
+ 修改omni_infer_inventory_used_for_4P1D.yml和omni_infer_server_template.yml
32
+
33
+ (1) tools/ansible/template/omni_infer_inventory_used_for_4P1D.yml
34
+
35
+ 修改ansbile_host和host_ip。以4P1D为例:ansible_host即为机器对应的ip。host_ip的具体规则是,p节点的host_ip和ansbile_host保持一致;d节点的host_ip和d0的ansible_host保持一致。c0的ansible_host与p0的ansbile_host一般保持一致,即在同一台机器上。
36
+
37
+ (2) tools/ansible/template/omni_infer_server_template.yml
38
+
39
+ 修改 MODEL_PATH、DOCKER_IMAGE_ID、 CODE_PATH、DOCKER_NAME_P、DOCKER_NAME_D和DOCKER_NAME_C。建议修改 LOG_PATH、LOG_PATH_IN_EXECUTOR、SCRIPTS_PATH 和 ranktable_save_path,防止路径下的文件被其他人覆盖。
40
+
41
+ (3) 除(2)外,openPangu-Ultra-MoE-718B-V1.1-Int8在tools/ansible/template/omni_infer_server_template.yml上的其它改动,如下所示:
42
+ ```
43
+ MODEL_EXTRA_CFG_PATH="/workspace/omniinfer/tests/test_config/test_config_prefill_pangu_ultra_moe.json"
44
+ # 179行
45
+
46
+ EXTRA_ARGS='--max-num-batched-tokens 30000 --enforce-eager --enable-expert-parallel --disable-log-requests --max-num-seqs 16 --no-enable-prefix-caching --enable-reasoning --reasoning-parser pangu --enable-auto-tool-choice --tool-call-parser pangu'
47
+ # 180行
48
+
49
+ PROFILING_NAMELIST=/workspace/omniinfer/omni/adaptors/vllm/patches/profiler_patches/proc_bind/proc_marker_namelist.yml bash /workspace/omniinfer/tools/scripts/pd_run_pangu_ultra_moe.sh \
50
+ # 215行
51
+
52
+ MODEL_EXTRA_CFG_PATH="/workspace/omniinfer/tests/test_config/test_config_decode_pangu_ultra_moe.json"
53
+ # 266行
54
+
55
+ EXTRA_ARGS='--enable-expert-parallel --disable-log-requests --max-num-seqs 32 --no-enable-prefix-caching --enable-reasoning --reasoning-parser pangu --enable-auto-tool-choice --tool-call-parser pangu'
56
+ # 267行
57
+
58
+ PROFILING_NAMELIST=/workspace/omniinfer/omni/adaptors/vllm/patches/profiler_patches/proc_bind/proc_marker_namelist.yml bash /workspace/omniinfer/tools/scripts/pd_run_pangu_ultra_moe.sh \
59
+ # 283行
60
+ ```
61
+ ### 3. 执行命令
62
+ 拉取对应代码,包括omniinfer和vllm,之后使用ansible执行,如下所示:
63
+ ```
64
+ cd /data/local_code_path
65
+ git clone -b v0.4.1 https://gitee.com/omniai/omniinfer.git
66
+ cd omniinfer/infer_engines/
67
+ git clone https://github.com/vllm-project/vllm.git 或者 git clone https://gitee.com/mirrors/vllm.git
68
+ cd omniinfer/tools/ansible/template
69
+ # 拉取omniinfer和vllm代码并进入ansible文件路径
70
+
71
+ ansible-playbook -i omni_infer_inventory_used_for_4P1D.yml omni_infer_server_template.yml
72
+ # 一键式拉起服务
73
+
74
+ ansible-playbook -i omni_infer_inventory_used_for_4P1D.yml omni_infer_server_template.yml --tags clean_up
75
+ # 一键式关闭服务并删除容器
76
+ ```
77
+ ### 4. 测试
78
+ 在c0对应机器上测试(或使用c0的ip,端口默认7000)
79
+ ```
80
+ curl --location 'http://0.0.0.0:7000/v1/chat/completions' --header 'Content-Type: application/json' --data '{
81
+ "model": "pangu_ultra_moe",
82
+ "messages": [{"role": "user", "content": "世界上有几个大洲?"}],
83
+ "temperature": 0,
84
+ "stream": false
85
+ }'
86
+ ```
doc/omniinfer_for_openPangu-Ultra-MoE-718B-V1.1-Int8_EN.md ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Deployment Guide for openPangu-Ultra-MoE-718B-V1.1-Int8 on Omni-Infer
2
+
3
+ ## Hardware Environment and Deployment Method
4
+ The deployment method is 4P1D, requiring 8 Atlas 800T A3 machines. Each of the 4 P instances corresponds to 1 A3 machine, and the 1 D instance is composed of 4 A3 machines.
5
+
6
+ ## Codes and Image
7
+ - Omni-Infer code version: v0.4.1
8
+ - Docker Image: Refer to the v0.4.1 image in https://gitee.com/omniai/omniinfer/releases. For example, for A3 hardware and ARM architecture, use "docker pull swr.cn-east-4.myhuaweicloud.com/omni/omni_infer-a3-arm:release_v0.4.1".
9
+
10
+ ## Steps to Start
11
+ Refer to https://gitee.com/omniai/omniinfer/blob/v0.4.1/tools/ansible/template/README.md. For openPangu-Ultra-MoE-718B-V1.1-Int8, the steps are as follows:
12
+
13
+ ### 1. Environment Preparation
14
+ Install ansible-playbook and sshpass on the execution machine, and prepare the key file
15
+ ```
16
+ yum install ansible
17
+ # Install ansible-playbook on the execution machine
18
+
19
+ yum install openssh-server
20
+ # Install sshpass on the execution machine
21
+
22
+ ssh-keygen -t ed25519 -C "Your SSH key comment" -f ~/.ssh/my_key # -t specifies the key type (recommended ed25519), -f specifies the filename
23
+ # Generate key pair on the execution machine
24
+ chmod 700 ~/.ssh
25
+ chmod 600 ~/.ssh/id_ed25519 # The private key must be set to 600
26
+ chmod 644 ~/.ssh/id_ed25519.pub
27
+ # Set key file permissions
28
+ ssh-copy-id -i ~/.ssh/id_ed25519.pub user@remote-host
29
+ # Deploy the public key to the remote target machine
30
+ ```
31
+
32
+ ### 2. Modify Configurations
33
+ Edit omni_infer_inventory_used_for_4P1D.yml and omni_infer_server_template.yml
34
+
35
+ (1) tools/ansible/template/omni_infer_inventory_used_for_4P1D.yml
36
+
37
+ Modify ansible_host and host_ip. For example, for 4P1D: ansible_host is the IP of the corresponding machine. The specific rule for host_ip is that the host_ip of p nodes is consistent with ansible_host; the host_ip of d nodes is consistent with the ansible_host of d0. The ansible_host of c0 is generally consistent with the ansible_host of p0, i.e., on the same machine.
38
+
39
+ (2) tools/ansible/template/omni_infer_server_template.yml
40
+
41
+ Modify MODEL_PATH, DOCKER_IMAGE_ID, CODE_PATH, DOCKER_NAME_P, DOCKER_NAME_D, and DOCKER_NAME_C. It is recommended to modify LOG_PATH, LOG_PATH_IN_EXECUTOR, SCRIPTS_PATH, and ranktable_save_path to prevent files under these paths from being overwritten by others.
42
+
43
+ (3) For openPangu-Ultra-MoE-718B-V1.1-Int8, in addition to (2), the following changes should also be made in tools/ansible/template/omni_infer_server_template.yml:
44
+ ```
45
+ MODEL_EXTRA_CFG_PATH="/workspace/omniinfer/tests/test_config/test_config_prefill_pangu_ultra_moe.json"
46
+ # Line 179
47
+
48
+ EXTRA_ARGS='--max-num-batched-tokens 30000 --enforce-eager --enable-expert-parallel --disable-log-requests --max-num-seqs 16 --no-enable-prefix-caching --enable-reasoning --reasoning-parser pangu --enable-auto-tool-choice --tool-call-parser pangu'
49
+ # Line 180
50
+
51
+ PROFILING_NAMELIST=/workspace/omniinfer/omni/adaptors/vllm/patches/profiler_patches/proc_bind/proc_marker_namelist.yml bash /workspace/omniinfer/tools/scripts/pd_run_pangu_ultra_moe.sh \
52
+ # Line 215
53
+
54
+ MODEL_EXTRA_CFG_PATH="/workspace/omniinfer/tests/test_config/test_config_decode_pangu_ultra_moe.json"
55
+ # Line 266
56
+
57
+ EXTRA_ARGS='--enable-expert-parallel --disable-log-requests --max-num-seqs 32 --no-enable-prefix-caching --enable-reasoning --reasoning-parser pangu --enable-auto-tool-choice --tool-call-parser pangu'
58
+ # Line 267
59
+
60
+ PROFILING_NAMELIST=/workspace/omniinfer/omni/adaptors/vllm/patches/profiler_patches/proc_bind/proc_marker_namelist.yml bash /workspace/omniinfer/tools/scripts/pd_run_pangu_ultra_moe.sh \
61
+ # Line 283
62
+ ```
63
+
64
+ ### 3. Execute Commands
65
+ Pull the corresponding code, including omniinfer and vllm, and then execute using ansible, as shown below:
66
+ ```
67
+ cd /data/local_code_path
68
+ git clone -b v0.4.1 https://gitee.com/omniai/omniinfer.git
69
+ cd omniinfer/infer_engines/
70
+ git clone https://github.com/vllm-project/vllm.git or git clone https://gitee.com/mirrors/vllm.git
71
+ cd omniinfer/tools/ansible/template
72
+ # Pull `omniinfer` and `vllm` code and navigate to the ansible file path
73
+
74
+ ansible-playbook -i omni_infer_inventory_used_for_4P1D.yml omni_infer_server_template.yml
75
+ # One-click service startup
76
+
77
+ ansible-playbook -i omni_infer_inventory_used_for_4P1D.yml omni_infer_server_template.yml --tags clean_up
78
+ # One-click service shutdown and container deletion
79
+ ```
80
+
81
+ ### 4. Test
82
+ Test on the machine corresponding to c0 (or use the IP of c0, default port 7000)
83
+ ```
84
+ curl --location 'http://0.0.0.0:7000/v1/chat/completions' --header 'Content-Type: application/json' --data '{
85
+ "model": "pangu_ultra_moe",
86
+ "messages": [{"role": "user", "content": "How many continents are there in the world?"}],
87
+ "temperature": 0,
88
+ "stream": false
89
+ }'
90
+ ```
generation_config.json ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 0,
4
+ "eos_token_id": 45892,
5
+ "do_sample": true,
6
+ "temperature": 0.7,
7
+ "top_p": 1.0,
8
+ "top_n_sigma": 0.05,
9
+ "top_k": -1,
10
+ "transformers_version": "4.48.2"
11
+ }
output_metadata.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "version": "1.0",
3
+ "models": [
4
+ {
5
+ "name": "model",
6
+ "path": "/",
7
+ "description": "export model",
8
+ "purpose": [
9
+ "train"
10
+ ],
11
+ "metrics": [
12
+ {
13
+ "name": "loss",
14
+ "value": 0.0
15
+ }
16
+ ]
17
+ }
18
+ ]
19
+ }
quant_model_description.json ADDED
The diff for this file is too large to render. See raw diff
 
quant_model_weight_w8a8_dynamic-00011-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7de1b82c3a81fc81a2269ec0481e05ecc110660aa3323a8af16906916f1d8975
3
+ size 4282567656
quant_model_weight_w8a8_dynamic-00013-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b7cdf149434f4d7772086c0c4167a2fcada01efbfb07b72b471fdd16bd829d4
3
+ size 4279809864
quant_model_weight_w8a8_dynamic-00016-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59a5828d23b56503ee2a02eee1b81de41a26134f53be37ab9e5d0c8758992bc9
3
+ size 4279809776
quant_model_weight_w8a8_dynamic-00019-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05ab2de8a99e4a1d19a8568c740627bb1199336ac110b4f9ead07c8c544b8767
3
+ size 4279809688
quant_model_weight_w8a8_dynamic-00023-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:91cf73b43c13100779e4ca2e798ea1a22ccddd6593cd532758acd5d772ebabc8
3
+ size 4282546304
quant_model_weight_w8a8_dynamic-00026-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca4fea490d1b06baac781036700aca3e5e3ef6b630ddfc3684e5c2f5967f1c76
3
+ size 4282568928
quant_model_weight_w8a8_dynamic-00033-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4aa60fee51b7fc35c226732df1234a47fc193557746aa2c0da8af2b55ace224
3
+ size 4279810840
quant_model_weight_w8a8_dynamic-00037-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e7655048d67cf7b41c50055afbe8f56f27ed1c0d5916c31ec6735d8b2d321f0
3
+ size 4282568480
quant_model_weight_w8a8_dynamic-00043-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71f50f1d39e3aad5c7db39f1c3d1f310f0a1794a92bf64c1f3cf8cf4606fc03e
3
+ size 4282546136
quant_model_weight_w8a8_dynamic-00049-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:777f2de6674f055c941751b31886e99e1a17a4264aac692c8171ad6a875e69d3
3
+ size 4282568856
quant_model_weight_w8a8_dynamic-00055-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5a0da7a796a468f26fcd22c852810b8017051ac91fc9f8bec1b562611398da2
3
+ size 4282546488
quant_model_weight_w8a8_dynamic-00056-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d65510ef8a8a6d51d093a30445d393171ebb2f4cf4e8e44c7213dcc0583b9540
3
+ size 4279810960
quant_model_weight_w8a8_dynamic-00057-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d155d2329a84c8b1058e560a94e8c534071c22edbffb477eea32efbea02fabe0
3
+ size 4282568296
quant_model_weight_w8a8_dynamic-00068-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3afe4643ee621001b52b5b669863a5d0a6d2d2c1744e183c4407857a9e97524a
3
+ size 4279810560
quant_model_weight_w8a8_dynamic-00069-of-00170.safetensors ADDED
File without changes
quant_model_weight_w8a8_dynamic-00071-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5cc9da4b6f80304d2f7e183a6bf86396073642615686e0c32654c2a7df39a5c
3
+ size 4279810472
quant_model_weight_w8a8_dynamic-00084-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7edb02663511cf70bf943f74eb61410cb74a6f2e7f6e1003cc8c2b2203e4e24
3
+ size 4282569112
quant_model_weight_w8a8_dynamic-00087-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdcd01aecd5d953cab72b64a2942ec708c58d58d59a4f0343a7eed667850c544
3
+ size 4282569144
quant_model_weight_w8a8_dynamic-00093-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd8156f17484348164782c441f69a85eb3a3f92110a26ea6b19bb497b4cddbc9
3
+ size 4282569128
quant_model_weight_w8a8_dynamic-00094-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:826cf93c6a1c84deedb9bfe56abcb546af8776b6a73818e71f10da79ec26c9f8
3
+ size 4279810560
quant_model_weight_w8a8_dynamic-00098-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:875c15304fc713b17ffd3df4a3855da14ccb9e57105a9e0c9aee32864b9ef67c
3
+ size 4282568760
quant_model_weight_w8a8_dynamic-00112-of-00170.safetensors ADDED
File without changes
quant_model_weight_w8a8_dynamic-00114-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c15cd7ffe4a966f860e3f23f6b310fa3fb807e18774a6cb715e836bcf8e0a47
3
+ size 4279810752
quant_model_weight_w8a8_dynamic-00117-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31087e5aeb7471dea2e5f60b54241aa991841b2d1a4779c5bd35c9cbf8114633
3
+ size 4279810664
quant_model_weight_w8a8_dynamic-00118-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e00288f3b561688091138ab0bbd7fff668cf853b9721c5ce7333d93c9baacbe
3
+ size 4282568576
quant_model_weight_w8a8_dynamic-00121-of-00170.safetensors ADDED
File without changes
quant_model_weight_w8a8_dynamic-00128-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f319206ecdf24bc2d6d18791b4c6db7783adf051a691332f7eec60e56fe497a
3
+ size 4282569128
quant_model_weight_w8a8_dynamic-00131-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cacf83f72bc35de35d34880a2a00d3c4dc431f6bcc65332df6cdbb85c3104124
3
+ size 4169710640
quant_model_weight_w8a8_dynamic-00139-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:601328dcc78d7a968d72ecd8a52157f83f1e3e7656e9ef67f1fd9ed74ed9e8b8
3
+ size 4282569144
quant_model_weight_w8a8_dynamic-00147-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:585dbc4fa42444d9def4b0d1e9b33ee1e87a5e0f3f908e871ccb0ca5caa7ebd3
3
+ size 4282546136
quant_model_weight_w8a8_dynamic-00148-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5adc82f8f6e5e23a758542f78c09ad7da103897d53439cce47944675f618bcf
3
+ size 4282569144
quant_model_weight_w8a8_dynamic-00160-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93023efb525e7b32cacafc47f183f6be6d89ede757e84583264b67fb8aaf27fc
3
+ size 4279810960
quant_model_weight_w8a8_dynamic-00161-of-00170.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fecd8ecc7f9fc2b0a22ba31c8d34fc6d00730aafbae446ea36de7744ff7b3a2b
3
+ size 4282568296
quant_model_weight_w8a8_dynamic.safetensors.index.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79848d3c3a2f20e590738abe9c415e032b8f8d629cfa58be993dc86f25d78047
3
+ size 17209018
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "[unused10]",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b16f1558c0cd4ae6ef1a2c605713be0a514f50e1ce2d2c878979ce988c148ec
3
+ size 2477809
tokenizer_config.json ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": true,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ },
30
+ "45806": {
31
+ "content": "<|User|>:",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
+ },
38
+ "45813": {
39
+ "content": "<|Bot|>:",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false,
44
+ "special": true
45
+ },
46
+ "45830": {
47
+ "content": "[unused0]",
48
+ "lstrip": false,
49
+ "normalized": false,
50
+ "rstrip": false,
51
+ "single_word": false,
52
+ "special": true
53
+ },
54
+ "45840": {
55
+ "content": "[unused1]",
56
+ "lstrip": false,
57
+ "normalized": false,
58
+ "rstrip": false,
59
+ "single_word": false,
60
+ "special": true
61
+ },
62
+ "45846": {
63
+ "content": "[unused2]",
64
+ "lstrip": false,
65
+ "normalized": false,
66
+ "rstrip": false,
67
+ "single_word": false,
68
+ "special": true
69
+ },
70
+ "45849": {
71
+ "content": "[unused3]",
72
+ "lstrip": false,
73
+ "normalized": false,
74
+ "rstrip": false,
75
+ "single_word": false,
76
+ "special": true
77
+ },
78
+ "45861": {
79
+ "content": "[unused4]",
80
+ "lstrip": false,
81
+ "normalized": false,
82
+ "rstrip": false,
83
+ "single_word": false,
84
+ "special": true
85
+ },
86
+ "45866": {
87
+ "content": "[unused5]",
88
+ "lstrip": false,
89
+ "normalized": false,
90
+ "rstrip": false,
91
+ "single_word": false,
92
+ "special": true
93
+ },
94
+ "45874": {
95
+ "content": "[unused6]",
96
+ "lstrip": false,
97
+ "normalized": false,
98
+ "rstrip": false,
99
+ "single_word": false,
100
+ "special": true
101
+ },
102
+ "45883": {
103
+ "content": "[unused7]",
104
+ "lstrip": false,
105
+ "normalized": false,
106
+ "rstrip": false,
107
+ "single_word": false,
108
+ "special": true
109
+ },
110
+ "45884": {
111
+ "content": "[unused8]",
112
+ "lstrip": false,
113
+ "normalized": false,
114
+ "rstrip": false,
115
+ "single_word": false,
116
+ "special": true
117
+ },
118
+ "45887": {
119
+ "content": "[unused9]",
120
+ "lstrip": false,
121
+ "normalized": false,
122
+ "rstrip": false,
123
+ "single_word": false,
124
+ "special": true
125
+ },
126
+ "45892": {
127
+ "content": "[unused10]",
128
+ "lstrip": false,
129
+ "normalized": false,
130
+ "rstrip": false,
131
+ "single_word": false,
132
+ "special": true
133
+ },
134
+ "45920": {
135
+ "content": "[unused11]",
136
+ "lstrip": false,
137
+ "normalized": false,
138
+ "rstrip": false,
139
+ "single_word": false,
140
+ "special": true
141
+ },
142
+ "45932": {
143
+ "content": "[unused12]",
144
+ "lstrip": false,
145
+ "normalized": false,
146
+ "rstrip": false,
147
+ "single_word": false,
148
+ "special": true
149
+ },
150
+ "45938": {
151
+ "content": "[unused13]",
152
+ "lstrip": false,
153
+ "normalized": false,
154
+ "rstrip": false,
155
+ "single_word": false,
156
+ "special": true
157
+ },
158
+ "45953": {
159
+ "content": "[unused14]",
160
+ "lstrip": false,
161
+ "normalized": false,
162
+ "rstrip": false,
163
+ "single_word": false,
164
+ "special": true
165
+ },
166
+ "45968": {
167
+ "content": "[unused15]",
168
+ "lstrip": false,
169
+ "normalized": false,
170
+ "rstrip": false,
171
+ "single_word": false,
172
+ "special": true
173
+ },
174
+ "45974": {
175
+ "content": "[unused16]",
176
+ "lstrip": false,
177
+ "normalized": false,
178
+ "rstrip": false,
179
+ "single_word": false,
180
+ "special": true
181
+ },
182
+ "45982": {
183
+ "content": "[unused17]",
184
+ "lstrip": false,
185
+ "normalized": false,
186
+ "rstrip": false,
187
+ "single_word": false,
188
+ "special": true
189
+ },
190
+ "45986": {
191
+ "content": "[unused18]",
192
+ "lstrip": false,
193
+ "normalized": false,
194
+ "rstrip": false,
195
+ "single_word": false,
196
+ "special": true
197
+ },
198
+ "46005": {
199
+ "content": "[unused19]",
200
+ "lstrip": false,
201
+ "normalized": false,
202
+ "rstrip": false,
203
+ "single_word": false,
204
+ "special": true
205
+ },
206
+ "46007": {
207
+ "content": "[unused20]",
208
+ "lstrip": false,
209
+ "normalized": false,
210
+ "rstrip": false,
211
+ "single_word": false,
212
+ "special": true
213
+ },
214
+ "46014": {
215
+ "content": "[unused21]",
216
+ "lstrip": false,
217
+ "normalized": false,
218
+ "rstrip": false,
219
+ "single_word": false,
220
+ "special": true
221
+ },
222
+ "46017": {
223
+ "content": "[unused22]",
224
+ "lstrip": false,
225
+ "normalized": false,
226
+ "rstrip": false,
227
+ "single_word": false,
228
+ "special": true
229
+ },
230
+ "46028": {
231
+ "content": "[unused23]",
232
+ "lstrip": false,
233
+ "normalized": false,
234
+ "rstrip": false,
235
+ "single_word": false,
236
+ "special": true
237
+ },
238
+ "46032": {
239
+ "content": "[unused24]",
240
+ "lstrip": false,
241
+ "normalized": false,
242
+ "rstrip": false,
243
+ "single_word": false,
244
+ "special": true
245
+ },
246
+ "46081": {
247
+ "content": "[unused25]",
248
+ "lstrip": false,
249
+ "normalized": false,
250
+ "rstrip": false,
251
+ "single_word": false,
252
+ "special": true
253
+ },
254
+ "46086": {
255
+ "content": "[unused26]",
256
+ "lstrip": false,
257
+ "normalized": false,
258
+ "rstrip": false,
259
+ "single_word": false,
260
+ "special": true
261
+ },
262
+ "46101": {
263
+ "content": "[unused27]",
264
+ "lstrip": false,
265
+ "normalized": false,
266
+ "rstrip": false,
267
+ "single_word": false,
268
+ "special": true
269
+ },
270
+ "46183": {
271
+ "content": "[unused28]",
272
+ "lstrip": false,
273
+ "normalized": false,
274
+ "rstrip": false,
275
+ "single_word": false,
276
+ "special": true
277
+ },
278
+ "46230": {
279
+ "content": "[unused29]",
280
+ "lstrip": false,
281
+ "normalized": false,
282
+ "rstrip": false,
283
+ "single_word": false,
284
+ "special": true
285
+ },
286
+ "46245": {
287
+ "content": "[unused30]",
288
+ "lstrip": false,
289
+ "normalized": false,
290
+ "rstrip": false,
291
+ "single_word": false,
292
+ "special": true
293
+ },
294
+ "46257": {
295
+ "content": "[unused31]",
296
+ "lstrip": false,
297
+ "normalized": false,
298
+ "rstrip": false,
299
+ "single_word": false,
300
+ "special": true
301
+ },
302
+ "144208": {
303
+ "content": "[unused32]",
304
+ "lstrip": false,
305
+ "normalized": false,
306
+ "rstrip": false,
307
+ "single_word": false,
308
+ "special": true
309
+ },
310
+ "144209": {
311
+ "content": "[unused33]",
312
+ "lstrip": false,
313
+ "normalized": false,
314
+ "rstrip": false,
315
+ "single_word": false,
316
+ "special": true
317
+ }
318
+ },
319
+ "auto_map": {
320
+ "AutoTokenizer": [
321
+ "tokenization_openpangu.PanguUltraMoETokenizer",
322
+ null
323
+ ]
324
+ },
325
+ "bos_token": "<s>",
326
+ "clean_up_tokenization_spaces": false,
327
+ "eos_token": "[unused10]",
328
+ "legacy": true,
329
+ "model_max_length": 1000000000000000019884624838656,
330
+ "pad_token": "<unk>",
331
+ "sp_model_kwargs": {},
332
+ "spaces_between_special_tokens": false,
333
+ "tokenizer_class": "PanguUltraMoETokenizer",
334
+ "unk_token": "<unk>",
335
+ "use_default_system_prompt": false,
336
+ "chat_template": "{%- set ns = namespace(is_first_tool=true) %}\n{%- if not mcp_prompt is defined %}\n {%- set mcp_prompt = true %}\n{%- endif %}\n{%- if not background is defined %}\n {%- set background = none %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n{%- if not think is defined %}\n {%- set think = true %}\n{%- endif %}\n\n{{- '[unused9]系统:' -}}\n{#- 提取系统消息 #}\n{%- set system_message = \"\" %}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n{%- endif %}\n{#- 如果传入工具将使用mcp人设,可以使用mcp_prompt字段禁用 #}\n{%- if mcp_prompt and tools %}\n {%- if system_message %}\n {%- set system_message = system_message + \"\\n\" %}\n {%- endif %}\n {%- set system_message = system_message + \"你是一个能够调用外部工具解决问题的专家,你的目标是高效、准确、清晰地完成任务。\\n你需要根据用户的问题,决定是否需要使用工具来完成任务。如果需要,请以明确的格式调用工具;如果不需要,请直接回答。\\n你可以根据上下文决定是否继续调用工具或基于已有结果直接回答用户。如果工具调用已足够,请合理组织语言向用户汇报结论。在没有获得显式的调用结果之前,在调用工具的当轮回复之内严禁虚构或者假设一个工具调用结果来完成任务或者回答问题。也不应在没有返回工具调用信息的情况下,在调用工具的当轮假设或者明确声称工具执行成功。\" %}\n{%- endif %}\n{{- system_message -}}\n\n{#- 工具使用描述和规范调用格式 #}\n{%- if tools %}\n {{- '\\n你将在<tools></tools>标签对内获得每个工具的描述:\\n<tools>\\n' }}\n {{- tools | tojson(ensure_ascii=False, sort_keys=False) }}\n {{- '\\n</tools>\\n' }}\n {{- \"对于每个函数调用,返回一个 JSON 对象,放在 [unused11][unused12] 标签对中,多个调用组成一个列表,其中每个函数包含函数名和对应函数的参数,格式如下:\\n\" }}\n {{- '[unused11]\\n[{\"name\": \"<函数名1>\", \"arguments\": <args1 json对象>}, {\"name\": \"<函数名2>\", \"arguments\": <args2 json对象>}, ...]\\n[unused12]' }}\n {{- '\\n<工具使用原则>\\n1. 只有在所有必填参数(required字段中列出的)都具��有效值时,才能调用该函数\\n2. 如果缺少任何必填参数,必须向用户询问缺失的参数,而不是直接调用函数\\n3. 可选参数如果没有提供可以忽略或使用默认值\\n</工具使用原则>' }}\n{%- endif %}\n\n{#- 背景信息字段 #}\n{%- if background is not none and background -%}\n {{- '\\n<背景信息>' -}}\n {{- background -}}\n {{- '</背景信息>' -}}\n{%- endif %}\n\n{%- if messages | length == 0 and not think %}\n {{- \" /no_think\" -}}\n{%- endif %}\n{{- '[unused10]' -}}\n\n{%- if messages | length != 0 %}\n {%- for message in messages[:-1] %}\n {%- if message['role'] == 'user' %}\n {{- '[unused9]用户:' + message['content'] -}}\n {%- if message.get('tool_calls') %}\n {{- '[unused11]\\n[' }}\n {%- set function_list = message.tool_calls | selectattr('function') | map(attribute='function') | list %}\n {%- for function_item in function_list %}\n {%- if not ns.is_first_tool %}\n {{- ', ' -}}\n {%- endif %}\n {{- '{\"name\": \"' + function_item.name + '\", \"arguments\": ' + function_item.arguments | tojson(ensure_ascii=False, sort_keys=False) + '}' -}}\n {%- set ns.is_first_tool = false %}\n {%- endfor %}\n {%- set ns.is_first_tool = true %}\n {{- ']\\n[unused12]' }}\n {%- endif %}\n {{- \" /no_think\" -}}\n {{- '[unused10]' -}}\n {%- endif %}\n\n {%- if message['role'] == 'assistant'%}\n {{- '[unused9]助手:[unused16][unused17]' -}}\n {{- message['content'] -}}\n {%- if message.get('tool_calls') %}\n {{- '[unused11]\\n[' }}\n {%- set function_list = message.tool_calls | selectattr('function') | map(attribute='function') | list %}\n {%- for function_item in function_list %}\n {%- if not ns.is_first_tool %}\n {{- ', ' -}}\n {%- endif %}\n {{- '{\"name\": \"' + function_item.name + '\", \"arguments\": ' + function_item.arguments | tojson(ensure_ascii=False, sort_keys=False) + '}' -}}\n {%- set ns.is_first_tool = false %}\n {%- endfor %}\n {%- set ns.is_first_tool = true %}\n {{- ']\\n[unused12]' }}\n {%- endif %}\n {{- '[unused10]' }}\n {%- endif %}\n \n {%- if message['role'] == 'tool' %}\n {{- '[unused9]' -}}\n {{- '工具:' + message['content'] + \" /no_think\" -}}\n {{- '[unused10]' -}}\n {%- endif %}\n {%- endfor %}\n \n {#- 处理最后一个角色,判断快慢思考 #}\n {%- if messages[-1]['role'] == \"user\" %}\n {{- '[unused9]' -}}\n {{- '用户:' + messages[-1]['content'] -}}\n {%- if messages[-1].get('tool_calls') %}\n {{- '[unused11]\\n[' }}\n {%- set function_list = messages[-1].tool_calls | selectattr('function') | map(attribute='function') | list %}\n {%- for function_item in function_list %}\n {%- if not ns.is_first_tool %}\n {{- ', ' -}}\n {%- endif %}\n {{- '{\"name\": \"' + function_item.name + '\", \"arguments\": ' + function_item.arguments | tojson(ensure_ascii=False, sort_keys=False) + '}' -}}\n {%- set ns.is_first_tool = false %}\n {%- endfor %}\n {%- set ns.is_first_tool = true %}\n {{- ']\\n[unused12]' }}\n {%- endif %}\n {%- if not think %}\n {{- \" /no_think\" -}}\n {%- endif %}\n {{- '[unused10]' -}}\n {%- endif %}\n {%- if messages[-1]['role'] == \"tool\" %}\n {{- '[unused9]' -}}\n {{- '工具:' + messages[-1]['content'] -}}\n {%- if not think %}\n {{- \" /no_think\" -}}\n {%- endif %}\n {{- '[unused10]' -}}\n {%- endif %}\n {%- if messages[-1]['role'] == \"assistant\" %}\n {{- '[unused9]' -}}\n {{- '助手:[unused16][unused17]' + messages[-1]['content'] -}}\n {%- if messages[-1].get('tool_calls') %}\n {{- '[unused11]\\n[' }}\n {%- set function_list = messages[-1].tool_calls | selectattr('function') | map(attribute='function') | list %}\n {%- for function_item in function_list %}\n {%- if not ns.is_first_tool %}\n {{- ', ' -}}\n {%- endif %}\n {{- '{\"name\": \"' + function_item.name + '\", \"arguments\": ' + function_item.arguments | tojson(ensure_ascii=False, sort_keys=False) + '}' -}}\n {%- set ns.is_first_tool = false %}\n {%- endfor %}\n {%- set ns.is_first_tool = true %}\n {{- ']\\n[unused12]' }}\n {%- endif %}\n {%- if not think %}\n {{- \" /no_think\" -}}\n {%- endif %}\n {{- '[unused10]' -}}\n {%- endif %}\n{%- endif %}\n\n{{-'[unused9]助手:'}}\n"
337
+ }