This view is limited to 50 files because it contains too many changes. See the raw diff here.
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. chat_template.jinja +46 -0
  3. config.json +126 -0
  4. configuration_deepseek.py +212 -0
  5. generation_config.json +4 -0
  6. model-00001-of-000062.safetensors +3 -0
  7. model-00002-of-000062.safetensors +3 -0
  8. model-00003-of-000062.safetensors +3 -0
  9. model-00004-of-000062.safetensors +3 -0
  10. model-00005-of-000062.safetensors +3 -0
  11. model-00006-of-000062.safetensors +3 -0
  12. model-00007-of-000062.safetensors +3 -0
  13. model-00008-of-000062.safetensors +3 -0
  14. model-00009-of-000062.safetensors +3 -0
  15. model-00010-of-000062.safetensors +3 -0
  16. model-00011-of-000062.safetensors +3 -0
  17. model-00012-of-000062.safetensors +3 -0
  18. model-00013-of-000062.safetensors +3 -0
  19. model-00014-of-000062.safetensors +3 -0
  20. model-00015-of-000062.safetensors +3 -0
  21. model-00016-of-000062.safetensors +3 -0
  22. model-00017-of-000062.safetensors +3 -0
  23. model-00018-of-000062.safetensors +3 -0
  24. model-00019-of-000062.safetensors +3 -0
  25. model-00020-of-000062.safetensors +3 -0
  26. model-00021-of-000062.safetensors +3 -0
  27. model-00022-of-000062.safetensors +3 -0
  28. model-00023-of-000062.safetensors +3 -0
  29. model-00024-of-000062.safetensors +3 -0
  30. model-00025-of-000062.safetensors +3 -0
  31. model-00026-of-000062.safetensors +3 -0
  32. model-00027-of-000062.safetensors +3 -0
  33. model-00028-of-000062.safetensors +3 -0
  34. model-00029-of-000062.safetensors +3 -0
  35. model-00030-of-000062.safetensors +3 -0
  36. model-00031-of-000062.safetensors +3 -0
  37. model-00032-of-000062.safetensors +3 -0
  38. model-00033-of-000062.safetensors +3 -0
  39. model-00034-of-000062.safetensors +3 -0
  40. model-00035-of-000062.safetensors +3 -0
  41. model-00036-of-000062.safetensors +3 -0
  42. model-00037-of-000062.safetensors +3 -0
  43. model-00038-of-000062.safetensors +3 -0
  44. model-00039-of-000062.safetensors +3 -0
  45. model-00040-of-000062.safetensors +3 -0
  46. model-00041-of-000062.safetensors +3 -0
  47. model-00042-of-000062.safetensors +3 -0
  48. model-00043-of-000062.safetensors +3 -0
  49. model-00044-of-000062.safetensors +3 -0
  50. model-00045-of-000062.safetensors +3 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ model.safetensors.index.json filter=lfs diff=lfs merge=lfs -text
chat_template.jinja ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools -%}
2
+ <|im_system|>tool_declare<|im_middle|>
3
+ # Tools
4
+ {{ tools | tojson }}<|im_end|>
5
+ {%- endif -%}
6
+ {%- for message in messages -%}
7
+ {%- if loop.first and messages[0]['role'] != 'system' -%}
8
+ <|im_system|>system<|im_middle|>You are Kimi, an AI assistant created by Moonshot AI.<|im_end|>
9
+ {%- endif -%}
10
+
11
+ {%- set role_name = message.get('name') or message['role'] -%}
12
+ {%- if message['role'] == 'user' -%}
13
+ <|im_user|>{{role_name}}<|im_middle|>
14
+ {%- elif message['role'] == 'assistant' -%}
15
+ <|im_assistant|>{{role_name}}<|im_middle|>
16
+ {%- else -%}
17
+ <|im_system|>{{role_name}}<|im_middle|>
18
+ {% endif %}
19
+
20
+ {%- if message['role'] == 'assistant' and message.get('tool_calls') -%}
21
+ {%- if message['content'] -%}{{ message['content'] }}{%- endif -%}
22
+ <|tool_calls_section_begin|>
23
+ {%- for tool_call in message['tool_calls'] -%}
24
+ {%- set formatted_id = tool_call['id'] -%}
25
+ <|tool_call_begin|>{{ formatted_id }}<|tool_call_argument_begin|>{% if tool_call['function']['arguments'] is string %}{{ tool_call['function']['arguments'] }}{% else %}{{ tool_call['function']['arguments'] | tojson }}{% endif %}<|tool_call_end|>
26
+ {%- endfor -%}
27
+ <|tool_calls_section_end|>
28
+ {%- elif message['role'] == 'tool' -%}
29
+ ## Return of {{ message.tool_call_id }}
30
+ {{ message['content'] }}
31
+ {%- elif message['content'] is string -%}
32
+ {{ message['content'] }}
33
+ {%- elif message['content'] is not none -%}
34
+ {% for content in message['content'] -%}
35
+ {% if content['type'] == 'image' or 'image' in content or 'image_url' in content -%}
36
+ <|media_start|>image<|media_content|><|media_pad|><|media_end|>
37
+ {% else -%}
38
+ {{ content['text'] }}
39
+ {%- endif -%}
40
+ {%- endfor -%}
41
+ {%- endif -%}
42
+ <|im_end|>
43
+ {%- endfor -%}
44
+ {%- if add_generation_prompt -%}
45
+ <|im_assistant|>assistant<|im_middle|>
46
+ {%- endif -%}
config.json ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_attn_implementation_autoset": false,
3
+ "architectures": [
4
+ "DeepseekV3ForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "auto_map": {
9
+ "AutoConfig": "configuration_deepseek.DeepseekV3Config",
10
+ "AutoModel": "modeling_deepseek.DeepseekV3Model",
11
+ "AutoModelForCausalLM": "modeling_deepseek.DeepseekV3ForCausalLM"
12
+ },
13
+ "aux_loss_alpha": 0.001,
14
+ "bos_token_id": 163584,
15
+ "dtype": "bfloat16",
16
+ "eos_token_id": 163586,
17
+ "ep_size": 1,
18
+ "first_k_dense_replace": 1,
19
+ "hidden_act": "silu",
20
+ "hidden_size": 7168,
21
+ "initializer_range": 0.02,
22
+ "intermediate_size": 18432,
23
+ "kv_lora_rank": 512,
24
+ "max_position_embeddings": 262144,
25
+ "model_type": "deepseek_v3",
26
+ "moe_intermediate_size": 2048,
27
+ "moe_layer_freq": 1,
28
+ "n_group": 1,
29
+ "n_routed_experts": 384,
30
+ "n_shared_experts": 1,
31
+ "norm_topk_prob": true,
32
+ "num_attention_heads": 64,
33
+ "num_experts_per_tok": 8,
34
+ "num_hidden_layers": 61,
35
+ "num_key_value_heads": 64,
36
+ "num_nextn_predict_layers": 0,
37
+ "pad_token_id": 163839,
38
+ "pretraining_tp": 1,
39
+ "q_lora_rank": 1536,
40
+ "qk_nope_head_dim": 128,
41
+ "qk_rope_head_dim": 64,
42
+ "rms_norm_eps": 1e-05,
43
+ "rope_scaling": {
44
+ "beta_fast": 1.0,
45
+ "beta_slow": 1.0,
46
+ "factor": 64.0,
47
+ "mscale": 1.0,
48
+ "mscale_all_dim": 1.0,
49
+ "original_max_position_embeddings": 4096,
50
+ "type": "yarn"
51
+ },
52
+ "rope_theta": 50000.0,
53
+ "routed_scaling_factor": 2.827,
54
+ "scoring_func": "sigmoid",
55
+ "seq_aux": true,
56
+ "tie_word_embeddings": false,
57
+ "topk_group": 1,
58
+ "topk_method": "noaux_tc",
59
+ "transformers_version": "4.56.1",
60
+ "unsloth_fixed": true,
61
+ "use_cache": true,
62
+ "v_head_dim": 128,
63
+ "vocab_size": 163840,
64
+ "quantization_config": {
65
+ "global_quant_config": {
66
+ "input_tensors": {
67
+ "dtype": "fp4",
68
+ "is_dynamic": true,
69
+ "qscheme": "per_group",
70
+ "ch_axis": -1,
71
+ "group_size": 32,
72
+ "symmetric": null,
73
+ "round_method": "half_even",
74
+ "scale_type": "float",
75
+ "scale_format": "e8m0",
76
+ "scale_calculation_mode": "even",
77
+ "mx_element_dtype": null,
78
+ "observer_cls": "PerBlockMXObserver",
79
+ "is_scale_quant": false
80
+ },
81
+ "output_tensors": null,
82
+ "weight": {
83
+ "dtype": "fp4",
84
+ "is_dynamic": false,
85
+ "qscheme": "per_group",
86
+ "ch_axis": -1,
87
+ "group_size": 32,
88
+ "symmetric": null,
89
+ "round_method": "half_even",
90
+ "scale_type": "float",
91
+ "scale_format": "e8m0",
92
+ "scale_calculation_mode": "even",
93
+ "mx_element_dtype": null,
94
+ "observer_cls": "PerBlockMXObserver",
95
+ "is_scale_quant": false
96
+ },
97
+ "bias": null,
98
+ "target_device": null
99
+ },
100
+ "exclude": [
101
+ "re:model.layers.*self_attn.*",
102
+ "re:model.layers.*mlp.gate",
103
+ "lm_head",
104
+ "re:model.layers.*mlp.gate_proj",
105
+ "re:model.layers.*mlp.up_proj",
106
+ "re:model.layers.*mlp.down_proj",
107
+ "re:model.layers.*shared_experts.*"
108
+ ],
109
+ "algo_config": null,
110
+ "softmax_quant_spec": null,
111
+ "quant_method": "quark",
112
+ "layer_type_quant_config": {},
113
+ "layer_quant_config": {},
114
+ "kv_cache_quant_config": {},
115
+ "kv_cache_post_rope": false,
116
+ "quant_mode": "eager_mode",
117
+ "version": "0.12+15553c6a61",
118
+ "export": {
119
+ "kv_cache_group": [],
120
+ "min_kv_scale": 0.0,
121
+ "pack_method": "reorder",
122
+ "weight_format": "real_quantized",
123
+ "weight_merge_groups": null
124
+ }
125
+ }
126
+ }
configuration_deepseek.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copy from https://huggingface.co/deepseek-ai/DeepSeek-V3/blob/main/configuration_deepseek.py
2
+
3
+ from transformers.configuration_utils import PretrainedConfig
4
+ from transformers.utils import logging
5
+
6
+ logger = logging.get_logger(__name__)
7
+
8
+ DEEPSEEK_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
9
+ class DeepseekV3Config(PretrainedConfig):
10
+ r"""
11
+ This is the configuration class to store the configuration of a [`DeepseekV3Model`]. It is used to instantiate an DeepSeek
12
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
13
+ defaults will yield a similar configuration to that of the DeepSeek-V3.
14
+
15
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
16
+ documentation from [`PretrainedConfig`] for more information.
17
+
18
+
19
+ Args:
20
+ vocab_size (`int`, *optional*, defaults to 129280):
21
+ Vocabulary size of the Deep model. Defines the number of different tokens that can be represented by the
22
+ `inputs_ids` passed when calling [`DeepseekV3Model`]
23
+ hidden_size (`int`, *optional*, defaults to 4096):
24
+ Dimension of the hidden representations.
25
+ intermediate_size (`int`, *optional*, defaults to 11008):
26
+ Dimension of the MLP representations.
27
+ moe_intermediate_size (`int`, *optional*, defaults to 1407):
28
+ Dimension of the MoE representations.
29
+ num_hidden_layers (`int`, *optional*, defaults to 32):
30
+ Number of hidden layers in the Transformer decoder.
31
+ num_nextn_predict_layers (`int`, *optional*, defaults to 1):
32
+ Number of nextn predict layers in the DeepSeekV3 Model.
33
+ num_attention_heads (`int`, *optional*, defaults to 32):
34
+ Number of attention heads for each attention layer in the Transformer decoder.
35
+ n_shared_experts (`int`, *optional*, defaults to None):
36
+ Number of shared experts, None means dense model.
37
+ n_routed_experts (`int`, *optional*, defaults to None):
38
+ Number of routed experts, None means dense model.
39
+ routed_scaling_factor (`float`, *optional*, defaults to 1.0):
40
+ Scaling factor or routed experts.
41
+ topk_method (`str`, *optional*, defaults to `gready`):
42
+ Topk method used in routed gate.
43
+ n_group (`int`, *optional*, defaults to None):
44
+ Number of groups for routed experts.
45
+ topk_group (`int`, *optional*, defaults to None):
46
+ Number of selected groups for each token(for each token, ensuring the selected experts is only within `topk_group` groups).
47
+ num_experts_per_tok (`int`, *optional*, defaults to None):
48
+ Number of selected experts, None means dense model.
49
+ moe_layer_freq (`int`, *optional*, defaults to 1):
50
+ The frequency of the MoE layer: one expert layer for every `moe_layer_freq - 1` dense layers.
51
+ first_k_dense_replace (`int`, *optional*, defaults to 0):
52
+ Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head).
53
+ \--k dense layers--/
54
+ norm_topk_prob (`bool`, *optional*, defaults to False):
55
+ Whether to normalize the weights of the routed experts.
56
+ scoring_func (`str`, *optional*, defaults to 'softmax'):
57
+ Method of computing expert weights.
58
+ aux_loss_alpha (`float`, *optional*, defaults to 0.001):
59
+ Auxiliary loss weight coefficient.
60
+ seq_aux = (`bool`, *optional*, defaults to True):
61
+ Whether to compute the auxiliary loss for each individual sample.
62
+ num_key_value_heads (`int`, *optional*):
63
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
64
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
65
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
66
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
67
+ by meanpooling all the original heads within that group. For more details checkout [this
68
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
69
+ `num_attention_heads`.
70
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
71
+ The non-linear activation function (function or string) in the decoder.
72
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
73
+ The maximum sequence length that this model might ever be used with.
74
+ initializer_range (`float`, *optional*, defaults to 0.02):
75
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
76
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
77
+ The epsilon used by the rms normalization layers.
78
+ use_cache (`bool`, *optional*, defaults to `True`):
79
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
80
+ relevant if `config.is_decoder=True`.
81
+ pad_token_id (`int`, *optional*):
82
+ Padding token id.
83
+ bos_token_id (`int`, *optional*, defaults to 1):
84
+ Beginning of stream token id.
85
+ eos_token_id (`int`, *optional*, defaults to 2):
86
+ End of stream token id.
87
+ pretraining_tp (`int`, *optional*, defaults to 1):
88
+ Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
89
+ document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
90
+ necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
91
+ issue](https://github.com/pytorch/pytorch/issues/76232).
92
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
93
+ Whether to tie weight embeddings
94
+ rope_theta (`float`, *optional*, defaults to 10000.0):
95
+ The base period of the RoPE embeddings.
96
+ rope_scaling (`Dict`, *optional*):
97
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
98
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
99
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
100
+ `max_position_embeddings` to the expected new maximum.
101
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
102
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
103
+ attention_dropout (`float`, *optional*, defaults to 0.0):
104
+ The dropout ratio for the attention probabilities.
105
+
106
+ ```python
107
+ >>> from transformers import DeepseekV3Model, DeepseekV3Config
108
+
109
+ >>> # Initializing a Deepseek-V3 style configuration
110
+ >>> configuration = DeepseekV3Config()
111
+
112
+ >>> # Accessing the model configuration
113
+ >>> configuration = model.config
114
+ ```"""
115
+
116
+ model_type = "deepseek_v3"
117
+ keys_to_ignore_at_inference = ["past_key_values"]
118
+
119
+ def __init__(
120
+ self,
121
+ vocab_size=129280,
122
+ hidden_size=7168,
123
+ intermediate_size=18432,
124
+ moe_intermediate_size = 2048,
125
+ num_hidden_layers=61,
126
+ num_nextn_predict_layers=1,
127
+ num_attention_heads=128,
128
+ num_key_value_heads=128,
129
+ n_shared_experts = 1,
130
+ n_routed_experts = 256,
131
+ ep_size = 1,
132
+ routed_scaling_factor = 2.5,
133
+ kv_lora_rank = 512,
134
+ q_lora_rank = 1536,
135
+ qk_rope_head_dim = 64,
136
+ v_head_dim = 128,
137
+ qk_nope_head_dim = 128,
138
+ topk_method = 'noaux_tc',
139
+ n_group = 8,
140
+ topk_group = 4,
141
+ num_experts_per_tok = 8,
142
+ moe_layer_freq = 1,
143
+ first_k_dense_replace = 3,
144
+ norm_topk_prob = True,
145
+ scoring_func = 'sigmoid',
146
+ aux_loss_alpha = 0.001,
147
+ seq_aux = True,
148
+ hidden_act="silu",
149
+ max_position_embeddings=4096,
150
+ initializer_range=0.02,
151
+ rms_norm_eps=1e-6,
152
+ use_cache=True,
153
+ pad_token_id=None,
154
+ bos_token_id=0,
155
+ eos_token_id=1,
156
+ pretraining_tp=1,
157
+ tie_word_embeddings=False,
158
+ rope_theta=10000.0,
159
+ rope_scaling=None,
160
+ attention_bias=False,
161
+ attention_dropout=0.0,
162
+ **kwargs,
163
+ ):
164
+ self.vocab_size = vocab_size
165
+ self.max_position_embeddings = max_position_embeddings
166
+ self.hidden_size = hidden_size
167
+ self.intermediate_size = intermediate_size
168
+ self.moe_intermediate_size = moe_intermediate_size
169
+ self.num_hidden_layers = num_hidden_layers
170
+ self.num_nextn_predict_layers = num_nextn_predict_layers
171
+ self.num_attention_heads = num_attention_heads
172
+ self.n_shared_experts = n_shared_experts
173
+ self.n_routed_experts = n_routed_experts
174
+ self.ep_size = ep_size
175
+ self.routed_scaling_factor = routed_scaling_factor
176
+ self.kv_lora_rank = kv_lora_rank
177
+ self.q_lora_rank = q_lora_rank
178
+ self.qk_rope_head_dim = qk_rope_head_dim
179
+ self.v_head_dim = v_head_dim
180
+ self.qk_nope_head_dim = qk_nope_head_dim
181
+ self.topk_method = topk_method
182
+ self.n_group = n_group
183
+ self.topk_group = topk_group
184
+ self.num_experts_per_tok = num_experts_per_tok
185
+ self.moe_layer_freq = moe_layer_freq
186
+ self.first_k_dense_replace = first_k_dense_replace
187
+ self.norm_topk_prob = norm_topk_prob
188
+ self.scoring_func = scoring_func
189
+ self.aux_loss_alpha = aux_loss_alpha
190
+ self.seq_aux = seq_aux
191
+ # for backward compatibility
192
+ if num_key_value_heads is None:
193
+ num_key_value_heads = num_attention_heads
194
+
195
+ self.num_key_value_heads = num_key_value_heads
196
+ self.hidden_act = hidden_act
197
+ self.initializer_range = initializer_range
198
+ self.rms_norm_eps = rms_norm_eps
199
+ self.pretraining_tp = pretraining_tp
200
+ self.use_cache = use_cache
201
+ self.rope_theta = rope_theta
202
+ self.rope_scaling = rope_scaling
203
+ self.attention_bias = attention_bias
204
+ self.attention_dropout = attention_dropout
205
+
206
+ super().__init__(
207
+ pad_token_id=pad_token_id,
208
+ bos_token_id=bos_token_id,
209
+ eos_token_id=eos_token_id,
210
+ tie_word_embeddings=tie_word_embeddings,
211
+ **kwargs,
212
+ )
generation_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_length": 131072,
3
+ "eos_token_id": 163586
4
+ }
model-00001-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97785c9495b804480f5eb035136a7a3dccdeabe7a34af023976a72cee7afcf67
3
+ size 995002080
model-00002-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:963e36e08e711243e5e3d0288946461635f7d6d514dd241cec05788babf6711f
3
+ size 9280353488
model-00003-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5300a977f64c22c7ceacf68a8d32942c8b42945b99c4048176d71c2c2f1ac50
3
+ size 9280353488
model-00004-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a52a59480dccd8886f86ccf4ad918d1e7212cf01f919180a4a8a32aa00dcd108
3
+ size 9280353488
model-00005-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a506e995c8a44725f54236b6e75f3421ca9dbfd97fc82caee49349165c64c45f
3
+ size 9280353488
model-00006-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3e4eef3783e699df8a214745feb3f342fc851213a1bbba5e7e556947707774c
3
+ size 9280353488
model-00007-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb81e2a40be415a4fd8eb586a8ac59952b17e46ddc7aaa5802f8854c08c26829
3
+ size 9280353488
model-00008-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99e785d380862075ec4b2064ca1621614496b4a5c270d97df0856a8ae2e98883
3
+ size 9280353488
model-00009-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f1ab0033fb20262518f7323dee65918398d8805199a31e2b29cf19df58d0cb04
3
+ size 9280353488
model-00010-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ace527c71fcd37b085cba3e44de8e3f6727c9d0f42b41dcb12bb0863eb7cafc2
3
+ size 9280353488
model-00011-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0569facf80d944f7715aeb2b8a105c29a680349b7d1ba3d67f4edbd117e9316d
3
+ size 9280355808
model-00012-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2f19b7cc95a2e9f24e89d620ccba924832fd68fa72e921648f0c860bda742b8
3
+ size 9280355808
model-00013-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a8d84793a00e9e29d55b59183ead1d7b8d0d52affc0331dcac51a4ec922c8fe
3
+ size 9280355808
model-00014-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fef1fee0f6f31ec4392c445c5ac8569374f4ed42c1b739011174d831e4028cfd
3
+ size 9280355808
model-00015-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd80e24b60865a2b9134a85604f6852c951cf7658e8e2be015de7d42a6585aa6
3
+ size 9280355808
model-00016-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32e55abb6cf8399be5c31e5bc4718c8a06dfcfa840f7ccb6faa9d27c20aacd98
3
+ size 9280355808
model-00017-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06e5fb01edb2ba9f136db01dae2811b59b69372979fdcb55749ee8037b24e7a8
3
+ size 9280355808
model-00018-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8c47a9d10e9cf4e65688d0ba75cbd2a8ab59e9e3e54637edd4a0515a7fe622d3
3
+ size 9280355808
model-00019-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6be632fa6059062ab5d3afbc0a9afa8b57f7b633e39543a9ad8df78f41af51e
3
+ size 9280355808
model-00020-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48a1f3d892281252d82c52f7c5a3e2500aa11332500229bc559f29e578f35a12
3
+ size 9280355808
model-00021-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:698704cef5152a4af86721b9c24103ae857f0506542186416adf0a8bb3ba3349
3
+ size 9280355808
model-00022-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38b5a470e2432e4c6c581c6bd1b72460ee24bd627092233382a779e9bcf50399
3
+ size 9280355808
model-00023-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f0bb40a80d5eba3cd5db1e7e96d2fa506cd3fe36f9437838e759ea2a0765001
3
+ size 9280355808
model-00024-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:617d6b4946f9e1fc4a6ae32efc53fbb9f562dd49dbfbd6a9c855a7a68d477038
3
+ size 9280355808
model-00025-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86dce5b6fc028d000f5908c2ab5180b23011a5836de280c3322c337cce6dca18
3
+ size 9280355808
model-00026-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c70a71fc3ca1659fbdcedfdc956c0a5b25c409335b1846db2534c2218be1865c
3
+ size 9280355808
model-00027-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e623d550e6f77651868902f6868e9e0f6bc5e51b9e12c12f51ed91dbd36e832c
3
+ size 9280355808
model-00028-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df0f8d11cfa1afe2f40ff11a9e9a9ad90b0a9db4180ea18735905d5cd77c87d7
3
+ size 9280355808
model-00029-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:703d5fe54d24099e60be172136d4eba5f43ffeb6a62dbfc4a55052e1913a38e6
3
+ size 9280355808
model-00030-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4041b382ec48ff3c0631aff5dc7ecca0b0e2aace81d2c6812a918f5bb2c9e91e
3
+ size 9280355808
model-00031-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cbb3a53aa7a160cf1395549c4b96d9ff86a073fe3222d5be4117b64fd954b370
3
+ size 9280355808
model-00032-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a8299a78346022989d95258343d7b7ca505fa89cf0c095ce625dbec8b604a1c8
3
+ size 9280355808
model-00033-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a180ad15eb8ad7010ac877cef865de956e9cc7a979e590042d9a4c89f2dbbfd7
3
+ size 9280355808
model-00034-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f54d0760643f2f09fef77f82f169cb232c502e404578edac2079635139fbcc4e
3
+ size 9280355808
model-00035-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f22723f6510c57561e1b436a0d2b92bde5a68ccb696e1ca98aadd470f776d153
3
+ size 9280355808
model-00036-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df4f4bccdcd7e35a0e1cee41e78961a34c59d5fefa9acd2a0182d862e34d3d93
3
+ size 9280355808
model-00037-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e188c3bcb37dee2b61e5727315bab70f43d879283d26a9a36bb3fdd5837887a
3
+ size 9280355808
model-00038-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c51bafa930721e047ba34bc6506b497c250e9ec5100bd9c863fe0c7ad82fe71
3
+ size 9280355808
model-00039-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd31abf5e5ecbe23e471570f3abebf0d8870a3662b6c595a50ec27da6f20405d
3
+ size 9280355808
model-00040-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7edf9873cd645394a0ecb00a6b4486878f82bcc50efa7d6c0b5a71074417a2dd
3
+ size 9280355808
model-00041-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99fa1d782af5be52c5546fccc50425b14846e56a68a42ecb3e8aed53e3ee336b
3
+ size 9280355808
model-00042-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b7f8a0614de1591a39fbe795b387f487de097956fc8e421e86f9e844f42d70c
3
+ size 9280355808
model-00043-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9433105359d6799aa0c43c3be5d95605e829892ef9cc64b1cd7e0b3df9281a54
3
+ size 9280355808
model-00044-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b381669a23a70c3e39afc87cc83586962b6a7cb671e3841a780f0397f0cfe457
3
+ size 9280355808
model-00045-of-000062.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06f3176abb91022659845dad62351afcaa9c850fe6d68413a51efc3490b6ff1a
3
+ size 9280355808