TakalaWang commited on
Commit
9939f8d
·
verified ·
1 Parent(s): 6160563

Fine-tuned Qwen/Qwen3-Embedding-4B for code search

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
1_Pooling/config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "word_embedding_dimension": 2560,
3
+ "pooling_mode_cls_token": false,
4
+ "pooling_mode_mean_tokens": false,
5
+ "pooling_mode_max_tokens": false,
6
+ "pooling_mode_mean_sqrt_len_tokens": false,
7
+ "pooling_mode_weightedmean_tokens": false,
8
+ "pooling_mode_lasttoken": true,
9
+ "include_prompt": true
10
+ }
README.md ADDED
@@ -0,0 +1,439 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - sentence-transformers
4
+ - sentence-similarity
5
+ - feature-extraction
6
+ - dense
7
+ - generated_from_trainer
8
+ - dataset_size:800
9
+ - loss:ContrastiveLoss
10
+ base_model: Qwen/Qwen3-Embedding-4B
11
+ widget:
12
+ - source_sentence: "Collects the tracing data from the given parameters.\n \
13
+ \ :param request: The Flask request.\n :param response: The flask response.\n\
14
+ \ :param error: The error occurred if any.\n :param latency: The\
15
+ \ time elapsed to process the request.\n :return: The tracing data."
16
+ sentences:
17
+ - "def enum_to_yaml(cls: Type[T_EnumToYAML], representer: Representer, data: T_EnumToYAML)\
18
+ \ -> ruamel.yaml.nodes.ScalarNode:\n \n return representer.represent_scalar(\n\
19
+ \ f\"!{cls.__name__}\",\n f\"{str(data)}\"\n )"
20
+ - "def subclasses(self, inherited=False):\n \n data = clips.data.DataObject(self._env)\n\
21
+ \n lib.EnvClassSubclasses(self._env, self._cls, data.byref, int(inherited))\n\
22
+ \n for klass in classes(self._env, data.value):\n yield klass"
23
+ - "def identify(self, req, resp, resource, uri_kwargs):\n \n header\
24
+ \ = req.get_header(, False)\n auth = header.split() if header else None\n\
25
+ \n if auth is None or auth[0].lower() != :\n return None\n\n\
26
+ \ if len(auth) != 2:\n raise HTTPBadRequest(\n \
27
+ \ \"Invalid Authorization header\",\n \"The Authorization header\
28
+ \ for Token auth should be in form:\\n\"\n \"Authorization: Token\
29
+ \ <token_value>\"\n )\n\n return auth[1]"
30
+ - source_sentence: "A wrapper for `os.walk` that skips hidden files and directories.\n\
31
+ \n This function does not have the parameter `topdown` from\n `os.walk`:\
32
+ \ the directories must always be recursed top-down when\n using this function.\n\
33
+ \n See also\n --------\n os.walk : For a description of the parameters"
34
+ sentences:
35
+ - "def expand_all(self):\n \n\n def aux(item):\n self.item(item,\
36
+ \ open=True)\n children = self.get_children(item)\n for\
37
+ \ c in children:\n aux(c)\n\n children = self.get_children(\"\
38
+ \")\n for c in children:\n aux(c)"
39
+ - "def create_extended_model(model, db_penalty=None, ex_penalty=None,\n \
40
+ \ tp_penalty=None, penalties=None):\n \n\n \n model_extended\
41
+ \ = model.create_metabolic_model()\n extra_compartment = model.extracellular_compartment\n\
42
+ \n compartment_ids = set(c.id for c in model.compartments)\n\n \n if\
43
+ \ len(compartment_ids) > 0:\n logger.info(\n .format(\n \
44
+ \ .join(.format(c) for c in compartment_ids)))\n db_added =\
45
+ \ add_all_database_reactions(model_extended, compartment_ids)\n else:\n \
46
+ \ logger.warning(\n \n \n )\n db_added\
47
+ \ = set()\n\n \n logger.info(\n .format(\n extra_compartment))\n\
48
+ \ ex_added = add_all_exchange_reactions(\n model_extended, extra_compartment,\
49
+ \ allow_duplicates=True)\n\n \n boundaries = model.compartment_boundaries\n\
50
+ \ if len(boundaries) > 0:\n logger.info(\n \n \
51
+ \ .format(\n .join(.format(c1, c2) for c1, c2 in boundaries)))\n\
52
+ \ tp_added = add_all_transport_reactions(\n model_extended,\
53
+ \ boundaries, allow_duplicates=True)\n else:\n logger.warning(\n \
54
+ \ \n )\n tp_added = set()\n\n \n weights = {}\n\
55
+ \ if db_penalty is not None:\n weights.update((rxnid, db_penalty) for\
56
+ \ rxnid in db_added)\n if tp_penalty is not None:\n weights.update((rxnid,\
57
+ \ tp_penalty) for rxnid in tp_added)\n if ex_penalty is not None:\n \
58
+ \ weights.update((rxnid, ex_penalty) for rxnid in ex_added)\n\n if penalties\
59
+ \ is not None:\n for rxnid, penalty in iteritems(penalties):\n \
60
+ \ weights[rxnid] = penalty\n return model_extended, weights"
61
+ - "def walk_skip_hidden(top, onerror=None, followlinks=False):\n \n\n for\
62
+ \ root, dirs, files in os.walk(\n top, topdown=True, onerror=onerror,\n\
63
+ \ followlinks=followlinks):\n \n \n dirs[:] =\
64
+ \ [d for d in dirs if not is_path_hidden(d)]\n files[:] = [f for f in files\
65
+ \ if not is_path_hidden(f)]\n yield root, dirs, files"
66
+ - source_sentence: Show stack frames for a task
67
+ sentences:
68
+ - "def do_where(self, taskid: int) -> None:\n \n task = task_by_id(taskid,\
69
+ \ self._loop)\n if task:\n self._sout.write(_format_stack(task))\n\
70
+ \ self._sout.write()\n else:\n self._sout.write(\
71
+ \ % taskid)"
72
+ - "def apt_add_repository_from_apt_string(apt_string, apt_file):\n \n\n apt_file_path\
73
+ \ = % apt_file\n\n if not file_contains(apt_file_path, apt_string.lower(),\
74
+ \ use_sudo=True):\n file_append(apt_file_path, apt_string.lower(), use_sudo=True)\n\
75
+ \n with hide(, ):\n sudo(\"DEBIAN_FRONTEND=noninteractive apt-get\
76
+ \ update\")"
77
+ - "def _kl_laplace_laplace(a, b, name=None):\n \n with tf.name_scope(name or \"\
78
+ kl_laplace_laplace\"):\n \n \n distance = tf.abs(a.loc - b.loc)\n \
79
+ \ ratio = a.scale / b.scale\n\n return (-tf.math.log(ratio) - 1 + distance\
80
+ \ / b.scale +\n ratio * tf.exp(-distance / a.scale))"
81
+ - source_sentence: Read the ical file
82
+ sentences:
83
+ - "def read_ical(self, ical_file_location): \n \n with open(ical_file_location,\
84
+ \ ) as ical_file:\n data = ical_file.read()\n self.cal = Calendar.from_ical(data)\n\
85
+ \ return self.cal"
86
+ - "def _size_from_header(cls, header):\n \n\n \n result = []\n\
87
+ \n for data in header:\n \n\n \n result.append(header[data])\n\
88
+ \n \n return result"
89
+ - "def dockermachine_ip() -> Optional[str]:\n \n if not check_dockermachine():\n\
90
+ \ return None\n\n \n try:\n out = subprocess.check_output([,\
91
+ \ ])\n return out.decode(\"utf-8\").strip()\n except Exception:\n \
92
+ \ logger.debug(f\"docker machine not present\")\n return None"
93
+ - source_sentence: 'list[VolumeExtent]: sections.'
94
+ sentences:
95
+ - "def cast_to_a1_notation(method):\n \n @wraps(method)\n def wrapper(self,\
96
+ \ *args, **kwargs):\n try:\n if len(args):\n \
97
+ \ int(args[0])\n\n \n range_start = rowcol_to_a1(*args[:2])\n\
98
+ \ range_end = rowcol_to_a1(*args[-2:])\n range_name = .join((range_start,\
99
+ \ range_end))\n\n args = (range_name,) + args[4:]\n except ValueError:\n\
100
+ \ pass\n\n return method(self, *args, **kwargs)\n\n return\
101
+ \ wrapper"
102
+ - "def readfmt(self, fmt):\n \n size = struct.calcsize(fmt)\n \
103
+ \ blob = self.read(size)\n obj, = struct.unpack(fmt, blob)\n \
104
+ \ return obj"
105
+ - "def equals(self, other):\n \n self._run(unittest_case.assertEqual,\
106
+ \ (self._subject, other))\n return ChainInspector(self._subject)"
107
+ pipeline_tag: sentence-similarity
108
+ library_name: sentence-transformers
109
+ ---
110
+
111
+ # SentenceTransformer based on Qwen/Qwen3-Embedding-4B
112
+
113
+ This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [Qwen/Qwen3-Embedding-4B](https://huggingface.co/Qwen/Qwen3-Embedding-4B). It maps sentences & paragraphs to a 2560-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.
114
+
115
+ ## Model Details
116
+
117
+ ### Model Description
118
+ - **Model Type:** Sentence Transformer
119
+ - **Base model:** [Qwen/Qwen3-Embedding-4B](https://huggingface.co/Qwen/Qwen3-Embedding-4B) <!-- at revision 5cf2132abc99cad020ac570b19d031efec650f2b -->
120
+ - **Maximum Sequence Length:** 40960 tokens
121
+ - **Output Dimensionality:** 2560 dimensions
122
+ - **Similarity Function:** Cosine Similarity
123
+ <!-- - **Training Dataset:** Unknown -->
124
+ <!-- - **Language:** Unknown -->
125
+ <!-- - **License:** Unknown -->
126
+
127
+ ### Model Sources
128
+
129
+ - **Documentation:** [Sentence Transformers Documentation](https://sbert.net)
130
+ - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)
131
+ - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)
132
+
133
+ ### Full Model Architecture
134
+
135
+ ```
136
+ SentenceTransformer(
137
+ (0): Transformer({'max_seq_length': 40960, 'do_lower_case': False, 'architecture': 'Qwen3Model'})
138
+ (1): Pooling({'word_embedding_dimension': 2560, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': True, 'include_prompt': True})
139
+ (2): Normalize()
140
+ )
141
+ ```
142
+
143
+ ## Usage
144
+
145
+ ### Direct Usage (Sentence Transformers)
146
+
147
+ First install the Sentence Transformers library:
148
+
149
+ ```bash
150
+ pip install -U sentence-transformers
151
+ ```
152
+
153
+ Then you can load this model and run inference.
154
+ ```python
155
+ from sentence_transformers import SentenceTransformer
156
+
157
+ # Download from the 🤗 Hub
158
+ model = SentenceTransformer("TakalaWang/qwen3-embedding-4B-code-search")
159
+ # Run inference
160
+ queries = [
161
+ "list[VolumeExtent]: sections.",
162
+ ]
163
+ documents = [
164
+ 'def equals(self, other):\n \n self._run(unittest_case.assertEqual, (self._subject, other))\n return ChainInspector(self._subject)',
165
+ 'def cast_to_a1_notation(method):\n \n @wraps(method)\n def wrapper(self, *args, **kwargs):\n try:\n if len(args):\n int(args[0])\n\n \n range_start = rowcol_to_a1(*args[:2])\n range_end = rowcol_to_a1(*args[-2:])\n range_name = .join((range_start, range_end))\n\n args = (range_name,) + args[4:]\n except ValueError:\n pass\n\n return method(self, *args, **kwargs)\n\n return wrapper',
166
+ 'def readfmt(self, fmt):\n \n size = struct.calcsize(fmt)\n blob = self.read(size)\n obj, = struct.unpack(fmt, blob)\n return obj',
167
+ ]
168
+ query_embeddings = model.encode_query(queries)
169
+ document_embeddings = model.encode_document(documents)
170
+ print(query_embeddings.shape, document_embeddings.shape)
171
+ # [1, 2560] [3, 2560]
172
+
173
+ # Get the similarity scores for the embeddings
174
+ similarities = model.similarity(query_embeddings, document_embeddings)
175
+ print(similarities)
176
+ # tensor([[1., 1., 1.]], dtype=torch.float16)
177
+ ```
178
+
179
+ <!--
180
+ ### Direct Usage (Transformers)
181
+
182
+ <details><summary>Click to see the direct usage in Transformers</summary>
183
+
184
+ </details>
185
+ -->
186
+
187
+ <!--
188
+ ### Downstream Usage (Sentence Transformers)
189
+
190
+ You can finetune this model on your own dataset.
191
+
192
+ <details><summary>Click to expand</summary>
193
+
194
+ </details>
195
+ -->
196
+
197
+ <!--
198
+ ### Out-of-Scope Use
199
+
200
+ *List how the model may foreseeably be misused and address what users ought not to do with the model.*
201
+ -->
202
+
203
+ <!--
204
+ ## Bias, Risks and Limitations
205
+
206
+ *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.*
207
+ -->
208
+
209
+ <!--
210
+ ### Recommendations
211
+
212
+ *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.*
213
+ -->
214
+
215
+ ## Training Details
216
+
217
+ ### Training Dataset
218
+
219
+ #### Unnamed Dataset
220
+
221
+ * Size: 800 training samples
222
+ * Columns: <code>sentence_0</code>, <code>sentence_1</code>, and <code>label</code>
223
+ * Approximate statistics based on the first 800 samples:
224
+ | | sentence_0 | sentence_1 | label |
225
+ |:--------|:------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------|:--------------------------------------------------------------|
226
+ | type | string | string | float |
227
+ | details | <ul><li>min: 2 tokens</li><li>mean: 65.92 tokens</li><li>max: 3156 tokens</li></ul> | <ul><li>min: 21 tokens</li><li>mean: 132.37 tokens</li><li>max: 1236 tokens</li></ul> | <ul><li>min: 0.0</li><li>mean: 0.5</li><li>max: 1.0</li></ul> |
228
+ * Samples:
229
+ | sentence_0 | sentence_1 | label |
230
+ |:---------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------|
231
+ | <code>Go through a stream and print out anything not in observed set</code> | <code>def t_VAR(self, t):<br> r<br> t.type = self.reserved.get(t.value.lower(), )<br> return t</code> | <code>0.0</code> |
232
+ | <code>Move a page to before some other page of the document. Specify 'to = -1' to move after last page.</code> | <code>def movePage(self, pno, to = -1):<br> <br> pl = list(range(len(self)))<br> if pno < 0 or pno > pl[-1]:<br> raise ValueError(" page number out of range")<br> if to < -1 or to > pl[-1]:<br> raise ValueError(" page number out of range")<br> pl.remove(pno)<br> if to == -1:<br> pl.append(pno)<br> else:<br> pl.insert(to-1, pno)<br> return self.select(pl)</code> | <code>1.0</code> |
233
+ | <code>Create an empty dataset in the current repo.</code> | <code>def libvlc_media_player_set_agl(p_mi, drawable):<br> <br> f = _Cfunctions.get(, None) or \<br> _Cfunction(, ((1,), (1,),), None,<br> None, MediaPlayer, ctypes.c_uint32)<br> return f(p_mi, drawable)</code> | <code>0.0</code> |
234
+ * Loss: [<code>ContrastiveLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#contrastiveloss) with these parameters:
235
+ ```json
236
+ {
237
+ "distance_metric": "SiameseDistanceMetric.COSINE_DISTANCE",
238
+ "margin": 0.5,
239
+ "size_average": true
240
+ }
241
+ ```
242
+
243
+ ### Training Hyperparameters
244
+ #### Non-Default Hyperparameters
245
+
246
+ - `per_device_train_batch_size`: 1
247
+ - `per_device_eval_batch_size`: 1
248
+ - `num_train_epochs`: 2
249
+ - `multi_dataset_batch_sampler`: round_robin
250
+
251
+ #### All Hyperparameters
252
+ <details><summary>Click to expand</summary>
253
+
254
+ - `overwrite_output_dir`: False
255
+ - `do_predict`: False
256
+ - `eval_strategy`: no
257
+ - `prediction_loss_only`: True
258
+ - `per_device_train_batch_size`: 1
259
+ - `per_device_eval_batch_size`: 1
260
+ - `per_gpu_train_batch_size`: None
261
+ - `per_gpu_eval_batch_size`: None
262
+ - `gradient_accumulation_steps`: 1
263
+ - `eval_accumulation_steps`: None
264
+ - `torch_empty_cache_steps`: None
265
+ - `learning_rate`: 5e-05
266
+ - `weight_decay`: 0.0
267
+ - `adam_beta1`: 0.9
268
+ - `adam_beta2`: 0.999
269
+ - `adam_epsilon`: 1e-08
270
+ - `max_grad_norm`: 1.0
271
+ - `num_train_epochs`: 2
272
+ - `max_steps`: -1
273
+ - `lr_scheduler_type`: linear
274
+ - `lr_scheduler_kwargs`: {}
275
+ - `warmup_ratio`: 0.0
276
+ - `warmup_steps`: 0
277
+ - `log_level`: passive
278
+ - `log_level_replica`: warning
279
+ - `log_on_each_node`: True
280
+ - `logging_nan_inf_filter`: True
281
+ - `save_safetensors`: True
282
+ - `save_on_each_node`: False
283
+ - `save_only_model`: False
284
+ - `restore_callback_states_from_checkpoint`: False
285
+ - `no_cuda`: False
286
+ - `use_cpu`: False
287
+ - `use_mps_device`: False
288
+ - `seed`: 42
289
+ - `data_seed`: None
290
+ - `jit_mode_eval`: False
291
+ - `use_ipex`: False
292
+ - `bf16`: False
293
+ - `fp16`: False
294
+ - `fp16_opt_level`: O1
295
+ - `half_precision_backend`: auto
296
+ - `bf16_full_eval`: False
297
+ - `fp16_full_eval`: False
298
+ - `tf32`: None
299
+ - `local_rank`: 0
300
+ - `ddp_backend`: None
301
+ - `tpu_num_cores`: None
302
+ - `tpu_metrics_debug`: False
303
+ - `debug`: []
304
+ - `dataloader_drop_last`: False
305
+ - `dataloader_num_workers`: 0
306
+ - `dataloader_prefetch_factor`: None
307
+ - `past_index`: -1
308
+ - `disable_tqdm`: False
309
+ - `remove_unused_columns`: True
310
+ - `label_names`: None
311
+ - `load_best_model_at_end`: False
312
+ - `ignore_data_skip`: False
313
+ - `fsdp`: []
314
+ - `fsdp_min_num_params`: 0
315
+ - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}
316
+ - `fsdp_transformer_layer_cls_to_wrap`: None
317
+ - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}
318
+ - `parallelism_config`: None
319
+ - `deepspeed`: None
320
+ - `label_smoothing_factor`: 0.0
321
+ - `optim`: adamw_torch_fused
322
+ - `optim_args`: None
323
+ - `adafactor`: False
324
+ - `group_by_length`: False
325
+ - `length_column_name`: length
326
+ - `ddp_find_unused_parameters`: None
327
+ - `ddp_bucket_cap_mb`: None
328
+ - `ddp_broadcast_buffers`: False
329
+ - `dataloader_pin_memory`: True
330
+ - `dataloader_persistent_workers`: False
331
+ - `skip_memory_metrics`: True
332
+ - `use_legacy_prediction_loop`: False
333
+ - `push_to_hub`: False
334
+ - `resume_from_checkpoint`: None
335
+ - `hub_model_id`: None
336
+ - `hub_strategy`: every_save
337
+ - `hub_private_repo`: None
338
+ - `hub_always_push`: False
339
+ - `hub_revision`: None
340
+ - `gradient_checkpointing`: False
341
+ - `gradient_checkpointing_kwargs`: None
342
+ - `include_inputs_for_metrics`: False
343
+ - `include_for_metrics`: []
344
+ - `eval_do_concat_batches`: True
345
+ - `fp16_backend`: auto
346
+ - `push_to_hub_model_id`: None
347
+ - `push_to_hub_organization`: None
348
+ - `mp_parameters`:
349
+ - `auto_find_batch_size`: False
350
+ - `full_determinism`: False
351
+ - `torchdynamo`: None
352
+ - `ray_scope`: last
353
+ - `ddp_timeout`: 1800
354
+ - `torch_compile`: False
355
+ - `torch_compile_backend`: None
356
+ - `torch_compile_mode`: None
357
+ - `include_tokens_per_second`: False
358
+ - `include_num_input_tokens_seen`: False
359
+ - `neftune_noise_alpha`: None
360
+ - `optim_target_modules`: None
361
+ - `batch_eval_metrics`: False
362
+ - `eval_on_start`: False
363
+ - `use_liger_kernel`: False
364
+ - `liger_kernel_config`: None
365
+ - `eval_use_gather_object`: False
366
+ - `average_tokens_across_devices`: False
367
+ - `prompts`: None
368
+ - `batch_sampler`: batch_sampler
369
+ - `multi_dataset_batch_sampler`: round_robin
370
+ - `router_mapping`: {}
371
+ - `learning_rate_mapping`: {}
372
+
373
+ </details>
374
+
375
+ ### Training Logs
376
+ | Epoch | Step | Training Loss |
377
+ |:-----:|:----:|:-------------:|
378
+ | 0.625 | 500 | 0.0583 |
379
+ | 1.25 | 1000 | 0.0635 |
380
+ | 1.875 | 1500 | 0.0638 |
381
+
382
+
383
+ ### Framework Versions
384
+ - Python: 3.13.7
385
+ - Sentence Transformers: 5.1.1
386
+ - Transformers: 4.56.2
387
+ - PyTorch: 2.8.0+cu128
388
+ - Accelerate: 1.10.1
389
+ - Datasets: 4.1.1
390
+ - Tokenizers: 0.22.1
391
+
392
+ ## Citation
393
+
394
+ ### BibTeX
395
+
396
+ #### Sentence Transformers
397
+ ```bibtex
398
+ @inproceedings{reimers-2019-sentence-bert,
399
+ title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks",
400
+ author = "Reimers, Nils and Gurevych, Iryna",
401
+ booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing",
402
+ month = "11",
403
+ year = "2019",
404
+ publisher = "Association for Computational Linguistics",
405
+ url = "https://arxiv.org/abs/1908.10084",
406
+ }
407
+ ```
408
+
409
+ #### ContrastiveLoss
410
+ ```bibtex
411
+ @inproceedings{hadsell2006dimensionality,
412
+ author={Hadsell, R. and Chopra, S. and LeCun, Y.},
413
+ booktitle={2006 IEEE Computer Society Conference on Computer Vision and Pattern Recognition (CVPR'06)},
414
+ title={Dimensionality Reduction by Learning an Invariant Mapping},
415
+ year={2006},
416
+ volume={2},
417
+ number={},
418
+ pages={1735-1742},
419
+ doi={10.1109/CVPR.2006.100}
420
+ }
421
+ ```
422
+
423
+ <!--
424
+ ## Glossary
425
+
426
+ *Clearly define terms in order to be accessible across audiences.*
427
+ -->
428
+
429
+ <!--
430
+ ## Model Card Authors
431
+
432
+ *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.*
433
+ -->
434
+
435
+ <!--
436
+ ## Model Card Contact
437
+
438
+ *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.*
439
+ -->
added_tokens.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</tool_call>": 151658,
3
+ "<tool_call>": 151657,
4
+ "<|box_end|>": 151649,
5
+ "<|box_start|>": 151648,
6
+ "<|endoftext|>": 151643,
7
+ "<|file_sep|>": 151664,
8
+ "<|fim_middle|>": 151660,
9
+ "<|fim_pad|>": 151662,
10
+ "<|fim_prefix|>": 151659,
11
+ "<|fim_suffix|>": 151661,
12
+ "<|im_end|>": 151645,
13
+ "<|im_start|>": 151644,
14
+ "<|image_pad|>": 151655,
15
+ "<|object_ref_end|>": 151647,
16
+ "<|object_ref_start|>": 151646,
17
+ "<|quad_end|>": 151651,
18
+ "<|quad_start|>": 151650,
19
+ "<|repo_name|>": 151663,
20
+ "<|video_pad|>": 151656,
21
+ "<|vision_end|>": 151653,
22
+ "<|vision_pad|>": 151654,
23
+ "<|vision_start|>": 151652
24
+ }
chat_template.jinja ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0]['role'] == 'system' %}
4
+ {{- messages[0]['content'] }}
5
+ {%- else %}
6
+ {{- 'You are a helpful assistant.' }}
7
+ {%- endif %}
8
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
+ {%- for tool in tools %}
10
+ {{- "\n" }}
11
+ {{- tool | tojson }}
12
+ {%- endfor %}
13
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
+ {%- else %}
15
+ {%- if messages[0]['role'] == 'system' %}
16
+ {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
17
+ {%- else %}
18
+ {{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}
19
+ {%- endif %}
20
+ {%- endif %}
21
+ {%- for message in messages %}
22
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
23
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
24
+ {%- elif message.role == "assistant" %}
25
+ {{- '<|im_start|>' + message.role }}
26
+ {%- if message.content %}
27
+ {{- '\n' + message.content }}
28
+ {%- endif %}
29
+ {%- for tool_call in message.tool_calls %}
30
+ {%- if tool_call.function is defined %}
31
+ {%- set tool_call = tool_call.function %}
32
+ {%- endif %}
33
+ {{- '\n<tool_call>\n{"name": "' }}
34
+ {{- tool_call.name }}
35
+ {{- '", "arguments": ' }}
36
+ {{- tool_call.arguments | tojson }}
37
+ {{- '}\n</tool_call>' }}
38
+ {%- endfor %}
39
+ {{- '<|im_end|>\n' }}
40
+ {%- elif message.role == "tool" %}
41
+ {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
42
+ {{- '<|im_start|>user' }}
43
+ {%- endif %}
44
+ {{- '\n<tool_response>\n' }}
45
+ {{- message.content }}
46
+ {{- '\n</tool_response>' }}
47
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
48
+ {{- '<|im_end|>\n' }}
49
+ {%- endif %}
50
+ {%- endif %}
51
+ {%- endfor %}
52
+ {%- if add_generation_prompt %}
53
+ {{- '<|im_start|>assistant\n' }}
54
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen3Model"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 151643,
8
+ "dtype": "float16",
9
+ "eos_token_id": 151645,
10
+ "head_dim": 128,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 2560,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 9728,
15
+ "layer_types": [
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention",
38
+ "full_attention",
39
+ "full_attention",
40
+ "full_attention",
41
+ "full_attention",
42
+ "full_attention",
43
+ "full_attention",
44
+ "full_attention",
45
+ "full_attention",
46
+ "full_attention",
47
+ "full_attention",
48
+ "full_attention",
49
+ "full_attention",
50
+ "full_attention",
51
+ "full_attention"
52
+ ],
53
+ "max_position_embeddings": 40960,
54
+ "max_window_layers": 36,
55
+ "model_type": "qwen3",
56
+ "num_attention_heads": 32,
57
+ "num_hidden_layers": 36,
58
+ "num_key_value_heads": 8,
59
+ "rms_norm_eps": 1e-06,
60
+ "rope_scaling": null,
61
+ "rope_theta": 1000000,
62
+ "sliding_window": null,
63
+ "tie_word_embeddings": true,
64
+ "transformers_version": "4.56.2",
65
+ "use_cache": true,
66
+ "use_sliding_window": false,
67
+ "vocab_size": 151665
68
+ }
config_sentence_transformers.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "prompts": {
3
+ "query": "Instruct: Given a web search query, retrieve relevant passages that answer the query\nQuery:",
4
+ "document": ""
5
+ },
6
+ "default_prompt_name": null,
7
+ "similarity_fn_name": "cosine",
8
+ "model_type": "SentenceTransformer",
9
+ "__version__": {
10
+ "sentence_transformers": "5.1.1",
11
+ "transformers": "4.56.2",
12
+ "pytorch": "2.8.0+cu128"
13
+ }
14
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67c488152fa1ba90ec7e8b20cf97e0a851021f7b89fdf26db9d09a7a861c0d7b
3
+ size 4965826240
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ddc7bafb784fb713db949ead68680a2505871116c26f1829e40f7f3589aeeec
3
+ size 3077765456
model.safetensors.index.json ADDED
@@ -0,0 +1,406 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_parameters": 4021774336,
4
+ "total_size": 8043548672
5
+ },
6
+ "weight_map": {
7
+ "embed_tokens.weight": "model-00001-of-00002.safetensors",
8
+ "layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
9
+ "layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
10
+ "layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
11
+ "layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
12
+ "layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
13
+ "layers.0.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
14
+ "layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
15
+ "layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
16
+ "layers.0.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
17
+ "layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
18
+ "layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
19
+ "layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
20
+ "layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
21
+ "layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
22
+ "layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
23
+ "layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
24
+ "layers.1.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
25
+ "layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
26
+ "layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
27
+ "layers.1.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
28
+ "layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
29
+ "layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
30
+ "layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
31
+ "layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
32
+ "layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
33
+ "layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
34
+ "layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
35
+ "layers.10.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
36
+ "layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
37
+ "layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
38
+ "layers.10.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
39
+ "layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
40
+ "layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
41
+ "layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
42
+ "layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
43
+ "layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
44
+ "layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
45
+ "layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
46
+ "layers.11.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
47
+ "layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
48
+ "layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
49
+ "layers.11.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
50
+ "layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
51
+ "layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
52
+ "layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
53
+ "layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
54
+ "layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
55
+ "layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
56
+ "layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
57
+ "layers.12.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
58
+ "layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
59
+ "layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
60
+ "layers.12.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
61
+ "layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
62
+ "layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
63
+ "layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
64
+ "layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
65
+ "layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
66
+ "layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
67
+ "layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
68
+ "layers.13.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
69
+ "layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
70
+ "layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
71
+ "layers.13.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
72
+ "layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
73
+ "layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
74
+ "layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
75
+ "layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
76
+ "layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
77
+ "layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
78
+ "layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
79
+ "layers.14.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
80
+ "layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
81
+ "layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
82
+ "layers.14.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
83
+ "layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
84
+ "layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
85
+ "layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
86
+ "layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
87
+ "layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
88
+ "layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
89
+ "layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
90
+ "layers.15.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
91
+ "layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
92
+ "layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
93
+ "layers.15.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
94
+ "layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
95
+ "layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
96
+ "layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
97
+ "layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
98
+ "layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
99
+ "layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
100
+ "layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
101
+ "layers.16.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
102
+ "layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
103
+ "layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
104
+ "layers.16.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
105
+ "layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
106
+ "layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
107
+ "layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
108
+ "layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
109
+ "layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
110
+ "layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
111
+ "layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
112
+ "layers.17.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
113
+ "layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
114
+ "layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
115
+ "layers.17.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
116
+ "layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
117
+ "layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
118
+ "layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
119
+ "layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
120
+ "layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
121
+ "layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
122
+ "layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
123
+ "layers.18.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
124
+ "layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
125
+ "layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
126
+ "layers.18.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
127
+ "layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
128
+ "layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
129
+ "layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
130
+ "layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
131
+ "layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
132
+ "layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
133
+ "layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
134
+ "layers.19.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
135
+ "layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
136
+ "layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
137
+ "layers.19.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
138
+ "layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
139
+ "layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
140
+ "layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
141
+ "layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
142
+ "layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
143
+ "layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
144
+ "layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
145
+ "layers.2.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
146
+ "layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
147
+ "layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
148
+ "layers.2.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
149
+ "layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
150
+ "layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
151
+ "layers.20.input_layernorm.weight": "model-00002-of-00002.safetensors",
152
+ "layers.20.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
153
+ "layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
154
+ "layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
155
+ "layers.20.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
156
+ "layers.20.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
157
+ "layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
158
+ "layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
159
+ "layers.20.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
160
+ "layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
161
+ "layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
162
+ "layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
163
+ "layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
164
+ "layers.21.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
165
+ "layers.21.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
166
+ "layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
167
+ "layers.21.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
168
+ "layers.21.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
169
+ "layers.21.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
170
+ "layers.21.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
171
+ "layers.21.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
172
+ "layers.21.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
173
+ "layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
174
+ "layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
175
+ "layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
176
+ "layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
177
+ "layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
178
+ "layers.22.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
179
+ "layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
180
+ "layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
181
+ "layers.22.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
182
+ "layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
183
+ "layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
184
+ "layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
185
+ "layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
186
+ "layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
187
+ "layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
188
+ "layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
189
+ "layers.23.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
190
+ "layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
191
+ "layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
192
+ "layers.23.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
193
+ "layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
194
+ "layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
195
+ "layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
196
+ "layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
197
+ "layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
198
+ "layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
199
+ "layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
200
+ "layers.24.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
201
+ "layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
202
+ "layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
203
+ "layers.24.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
204
+ "layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
205
+ "layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
206
+ "layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
207
+ "layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
208
+ "layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
209
+ "layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
210
+ "layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
211
+ "layers.25.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
212
+ "layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
213
+ "layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
214
+ "layers.25.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
215
+ "layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
216
+ "layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
217
+ "layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
218
+ "layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
219
+ "layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
220
+ "layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
221
+ "layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
222
+ "layers.26.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
223
+ "layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
224
+ "layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
225
+ "layers.26.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
226
+ "layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
227
+ "layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
228
+ "layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
229
+ "layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
230
+ "layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
231
+ "layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
232
+ "layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
233
+ "layers.27.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
234
+ "layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
235
+ "layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
236
+ "layers.27.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
237
+ "layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
238
+ "layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
239
+ "layers.28.input_layernorm.weight": "model-00002-of-00002.safetensors",
240
+ "layers.28.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
241
+ "layers.28.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
242
+ "layers.28.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
243
+ "layers.28.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
244
+ "layers.28.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
245
+ "layers.28.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
246
+ "layers.28.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
247
+ "layers.28.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
248
+ "layers.28.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
249
+ "layers.28.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
250
+ "layers.29.input_layernorm.weight": "model-00002-of-00002.safetensors",
251
+ "layers.29.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
252
+ "layers.29.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
253
+ "layers.29.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
254
+ "layers.29.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
255
+ "layers.29.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
256
+ "layers.29.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
257
+ "layers.29.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
258
+ "layers.29.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
259
+ "layers.29.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
260
+ "layers.29.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
261
+ "layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
262
+ "layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
263
+ "layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
264
+ "layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
265
+ "layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
266
+ "layers.3.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
267
+ "layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
268
+ "layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
269
+ "layers.3.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
270
+ "layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
271
+ "layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
272
+ "layers.30.input_layernorm.weight": "model-00002-of-00002.safetensors",
273
+ "layers.30.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
274
+ "layers.30.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
275
+ "layers.30.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
276
+ "layers.30.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
277
+ "layers.30.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
278
+ "layers.30.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
279
+ "layers.30.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
280
+ "layers.30.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
281
+ "layers.30.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
282
+ "layers.30.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
283
+ "layers.31.input_layernorm.weight": "model-00002-of-00002.safetensors",
284
+ "layers.31.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
285
+ "layers.31.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
286
+ "layers.31.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
287
+ "layers.31.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
288
+ "layers.31.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
289
+ "layers.31.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
290
+ "layers.31.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
291
+ "layers.31.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
292
+ "layers.31.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
293
+ "layers.31.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
294
+ "layers.32.input_layernorm.weight": "model-00002-of-00002.safetensors",
295
+ "layers.32.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
296
+ "layers.32.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
297
+ "layers.32.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
298
+ "layers.32.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
299
+ "layers.32.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
300
+ "layers.32.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
301
+ "layers.32.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
302
+ "layers.32.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
303
+ "layers.32.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
304
+ "layers.32.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
305
+ "layers.33.input_layernorm.weight": "model-00002-of-00002.safetensors",
306
+ "layers.33.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
307
+ "layers.33.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
308
+ "layers.33.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
309
+ "layers.33.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
310
+ "layers.33.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
311
+ "layers.33.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
312
+ "layers.33.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
313
+ "layers.33.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
314
+ "layers.33.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
315
+ "layers.33.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
316
+ "layers.34.input_layernorm.weight": "model-00002-of-00002.safetensors",
317
+ "layers.34.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
318
+ "layers.34.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
319
+ "layers.34.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
320
+ "layers.34.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
321
+ "layers.34.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
322
+ "layers.34.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
323
+ "layers.34.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
324
+ "layers.34.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
325
+ "layers.34.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
326
+ "layers.34.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
327
+ "layers.35.input_layernorm.weight": "model-00002-of-00002.safetensors",
328
+ "layers.35.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
329
+ "layers.35.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
330
+ "layers.35.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
331
+ "layers.35.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
332
+ "layers.35.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
333
+ "layers.35.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
334
+ "layers.35.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
335
+ "layers.35.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
336
+ "layers.35.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
337
+ "layers.35.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
338
+ "layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
339
+ "layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
340
+ "layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
341
+ "layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
342
+ "layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
343
+ "layers.4.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
344
+ "layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
345
+ "layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
346
+ "layers.4.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
347
+ "layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
348
+ "layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
349
+ "layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
350
+ "layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
351
+ "layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
352
+ "layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
353
+ "layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
354
+ "layers.5.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
355
+ "layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
356
+ "layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
357
+ "layers.5.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
358
+ "layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
359
+ "layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
360
+ "layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
361
+ "layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
362
+ "layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
363
+ "layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
364
+ "layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
365
+ "layers.6.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
366
+ "layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
367
+ "layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
368
+ "layers.6.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
369
+ "layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
370
+ "layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
371
+ "layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
372
+ "layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
373
+ "layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
374
+ "layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
375
+ "layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
376
+ "layers.7.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
377
+ "layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
378
+ "layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
379
+ "layers.7.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
380
+ "layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
381
+ "layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
382
+ "layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
383
+ "layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
384
+ "layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
385
+ "layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
386
+ "layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
387
+ "layers.8.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
388
+ "layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
389
+ "layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
390
+ "layers.8.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
391
+ "layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
392
+ "layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
393
+ "layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
394
+ "layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
395
+ "layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
396
+ "layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
397
+ "layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
398
+ "layers.9.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
399
+ "layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
400
+ "layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
401
+ "layers.9.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
402
+ "layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
403
+ "layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
404
+ "norm.weight": "model-00002-of-00002.safetensors"
405
+ }
406
+ }
modules.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "idx": 0,
4
+ "name": "0",
5
+ "path": "",
6
+ "type": "sentence_transformers.models.Transformer"
7
+ },
8
+ {
9
+ "idx": 1,
10
+ "name": "1",
11
+ "path": "1_Pooling",
12
+ "type": "sentence_transformers.models.Pooling"
13
+ },
14
+ {
15
+ "idx": 2,
16
+ "name": "2",
17
+ "path": "2_Normalize",
18
+ "type": "sentence_transformers.models.Normalize"
19
+ }
20
+ ]
sentence_bert_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "max_seq_length": 40960,
3
+ "do_lower_case": false
4
+ }
special_tokens_map.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>"
16
+ ],
17
+ "eos_token": {
18
+ "content": "<|im_end|>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ },
24
+ "pad_token": {
25
+ "content": "<|endoftext|>",
26
+ "lstrip": false,
27
+ "normalized": false,
28
+ "rstrip": false,
29
+ "single_word": false
30
+ }
31
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7e24abbf8db66065c500459b3f6b876165878c4d45486d8a54466da3b8e0f81
3
+ size 11423215
tokenizer_config.json ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ }
181
+ },
182
+ "additional_special_tokens": [
183
+ "<|im_start|>",
184
+ "<|im_end|>",
185
+ "<|object_ref_start|>",
186
+ "<|object_ref_end|>",
187
+ "<|box_start|>",
188
+ "<|box_end|>",
189
+ "<|quad_start|>",
190
+ "<|quad_end|>",
191
+ "<|vision_start|>",
192
+ "<|vision_end|>",
193
+ "<|vision_pad|>",
194
+ "<|image_pad|>",
195
+ "<|video_pad|>"
196
+ ],
197
+ "bos_token": null,
198
+ "clean_up_tokenization_spaces": false,
199
+ "eos_token": "<|im_end|>",
200
+ "errors": "replace",
201
+ "extra_special_tokens": {},
202
+ "model_max_length": 131072,
203
+ "pad_token": "<|endoftext|>",
204
+ "split_special_tokens": false,
205
+ "tokenizer_class": "Qwen2Tokenizer",
206
+ "unk_token": null
207
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff