tsor13 commited on
Commit
1c567fa
·
verified ·
1 Parent(s): 7dbb55f

Initial upload of fine‑tuned Gemma + custom tokenizer

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<image_soft_token>": 262144
3
+ }
config.json ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Gemma3ForConditionalGeneration"
4
+ ],
5
+ "boi_token_index": 255999,
6
+ "eoi_token_index": 256000,
7
+ "eos_token_id": [
8
+ 1,
9
+ 106
10
+ ],
11
+ "image_token_index": 262144,
12
+ "initializer_range": 0.02,
13
+ "mm_tokens_per_image": 256,
14
+ "model_type": "gemma3",
15
+ "text_config": {
16
+ "attention_bias": false,
17
+ "attention_dropout": 0.0,
18
+ "attn_logit_softcapping": null,
19
+ "cache_implementation": "hybrid",
20
+ "final_logit_softcapping": null,
21
+ "head_dim": 256,
22
+ "hidden_activation": "gelu_pytorch_tanh",
23
+ "hidden_size": 3840,
24
+ "initializer_range": 0.02,
25
+ "intermediate_size": 15360,
26
+ "max_position_embeddings": 131072,
27
+ "model_type": "gemma3_text",
28
+ "num_attention_heads": 16,
29
+ "num_hidden_layers": 48,
30
+ "num_key_value_heads": 8,
31
+ "query_pre_attn_scalar": 256,
32
+ "rms_norm_eps": 1e-06,
33
+ "rope_local_base_freq": 10000.0,
34
+ "rope_scaling": {
35
+ "factor": 8.0,
36
+ "rope_type": "linear"
37
+ },
38
+ "rope_theta": 1000000.0,
39
+ "sliding_window": 1024,
40
+ "sliding_window_pattern": 6,
41
+ "torch_dtype": "float32",
42
+ "use_cache": true,
43
+ "vocab_size": 262208
44
+ },
45
+ "torch_dtype": "bfloat16",
46
+ "transformers_version": "4.51.3",
47
+ "vision_config": {
48
+ "attention_dropout": 0.0,
49
+ "hidden_act": "gelu_pytorch_tanh",
50
+ "hidden_size": 1152,
51
+ "image_size": 896,
52
+ "intermediate_size": 4304,
53
+ "layer_norm_eps": 1e-06,
54
+ "model_type": "siglip_vision_model",
55
+ "num_attention_heads": 16,
56
+ "num_channels": 3,
57
+ "num_hidden_layers": 27,
58
+ "patch_size": 14,
59
+ "torch_dtype": "float32",
60
+ "vision_use_head": false
61
+ }
62
+ }
gemma_explicit_tokenizer.py ADDED
@@ -0,0 +1,429 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Custom Gemma Tokenizer for explicit Format
3
+
4
+ This tokenizer implements the explicit format for message processing:
5
+ Format: Uses the standard chat template with proper role labels (user/assistant)
6
+
7
+ The explicit format uses the model's built-in chat template and includes proper
8
+ loss computation flags for training.
9
+
10
+ To save:
11
+ uv run tokenizers/gemma_explicit_tokenizer.py
12
+ which will save the tokenizer to the repos/explicit-gemma-tokenizer directory.
13
+ mkdir repos/explicit12b
14
+ # copy model over
15
+ cp models_v8/base_modified-google-gemma-3-12b-pt-/models/_explicit/checkpoint-8/* repos/explicit12b/
16
+ # copy tokenizer over
17
+ cp repos/explicit-gemma-tokenizer/* repos/explicit12b/
18
+ # upload to hf
19
+
20
+ uv run upload_to_hf.py \
21
+ --folder repos/explicit12b \
22
+ --repo-id tsor13/explicit12b
23
+ """
24
+
25
+ from typing import List, Dict, Any, Optional, Union
26
+ from transformers import AutoTokenizer
27
+ from transformers.models.gemma.tokenization_gemma_fast import GemmaTokenizerFast
28
+ from transformers.models.gemma.tokenization_gemma import GemmaTokenizer
29
+ import warnings
30
+ import difflib
31
+ import json
32
+ import os
33
+ import sys
34
+
35
+ # # Add parent directory to path to import chat_utils
36
+ # sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
37
+ # from chat_utils import chat_messages_to_text_loss, chat_messages_to_raw_text
38
+
39
+
40
+ class GemmaExplicitTokenizer(GemmaTokenizerFast):
41
+ """
42
+ Custom tokenizer for Gemma models that implements explicit format message processing.
43
+
44
+ This tokenizer formats messages using the explicit format where:
45
+ - Messages use the standard chat template with proper role labels
46
+ - Uses the model's built-in chat formatting
47
+ - Loss is computed on the assistant/output sections
48
+
49
+ Attributes:
50
+ start_string (str): The starting string used for output generation (depends on tokenizer)
51
+ end_string (str): The ending string used for output generation (depends on tokenizer)
52
+ """
53
+
54
+ def __init__(self, *args, **kwargs):
55
+ """
56
+ Initialize the custom tokenizer.
57
+
58
+ Accepts the same arguments as GemmaTokenizerFast.
59
+ """
60
+ super().__init__(*args, **kwargs)
61
+
62
+ # For explicit format, we use the tokenizer's own chat template
63
+ # The start/end strings will be determined by the chat template
64
+ self.start_string = "<start_of_turn>"
65
+ self.end_string = "<end_of_turn>"
66
+
67
+ # Add custom attributes to the tokenizer config for saving/loading
68
+ if not hasattr(self, 'init_kwargs'):
69
+ self.init_kwargs = {}
70
+ self.init_kwargs['start_string'] = self.start_string
71
+ self.init_kwargs['end_string'] = self.end_string
72
+
73
+ @classmethod
74
+ def from_gemma_pretrained(cls, pretrained_model_name_or_path, *args, **kwargs):
75
+ """
76
+ Load a tokenizer from a pretrained model or path.
77
+
78
+ This method ensures our custom class is used instead of the base GemmaTokenizerFast.
79
+ """
80
+ # Load the base tokenizer first to get all configuration
81
+ base_tokenizer = GemmaTokenizerFast.from_pretrained(
82
+ pretrained_model_name_or_path, *args, **kwargs
83
+ )
84
+
85
+ # Create new instance of our custom class by copying the base tokenizer
86
+ custom_tokenizer = cls.__new__(cls)
87
+
88
+ # Copy all attributes from base tokenizer
89
+ for attr, value in base_tokenizer.__dict__.items():
90
+ setattr(custom_tokenizer, attr, value)
91
+
92
+ # Initialize our custom attributes for explicit format
93
+ custom_tokenizer.start_string = "<start_of_turn>"
94
+ custom_tokenizer.end_string = "<end_of_turn>"
95
+
96
+ # Update init_kwargs to include our custom attributes
97
+ if not hasattr(custom_tokenizer, 'init_kwargs'):
98
+ custom_tokenizer.init_kwargs = {}
99
+ custom_tokenizer.init_kwargs['start_string'] = custom_tokenizer.start_string
100
+ custom_tokenizer.init_kwargs['end_string'] = custom_tokenizer.end_string
101
+
102
+ return custom_tokenizer
103
+
104
+ def save_pretrained(self, save_directory: Union[str, os.PathLike], **kwargs):
105
+ """
106
+ Save the tokenizer to a directory, including custom configuration.
107
+ """
108
+ # Call parent save method
109
+ super().save_pretrained(save_directory, **kwargs)
110
+
111
+ # Save custom configuration
112
+ config_file = os.path.join(save_directory, "tokenizer_config.json")
113
+ if os.path.exists(config_file):
114
+ with open(config_file, 'r') as f:
115
+ config = json.load(f)
116
+ else:
117
+ config = {}
118
+
119
+ # Add our custom class info
120
+ config["tokenizer_class"] = "GemmaExplicitTokenizer"
121
+ config["start_string"] = self.start_string
122
+ config["end_string"] = self.end_string
123
+ # Point to our custom class in the uploaded file
124
+ config["auto_map"] = {
125
+ "AutoTokenizer": ["gemma_explicit_tokenizer.GemmaExplicitTokenizer", "gemma_explicit_tokenizer.GemmaExplicitTokenizer"]
126
+ }
127
+
128
+ with open(config_file, 'w') as f:
129
+ json.dump(config, f, indent=2)
130
+
131
+ def messages_to_loss_texts(
132
+ self,
133
+ messages: List[Dict[str, Any]],
134
+ loss_on_start_token: bool = False,
135
+ ) -> List[Dict[str, Any]]:
136
+ """
137
+ From messages (description / input / output) to texts (text / compute_loss) with whether or not loss should be calculated on the text for training.
138
+ Uses the explicit format matching chat_utils.py.
139
+ """
140
+ # FOR NOW, OVERRIDING TO FALSE
141
+ loss_on_start_token = False
142
+
143
+ texts = []
144
+ has_description = False
145
+ first_output = True
146
+
147
+ for message in messages:
148
+ role = message["role"]
149
+ content = message["content"]
150
+
151
+ if role == "description":
152
+ has_description = True
153
+ text = f"{self.start_string}{role}\n{content}{self.end_string}\n"
154
+ texts.append({"text": text, "compute_loss": False, **message})
155
+ elif role == "input":
156
+ text = f"{self.start_string}{role}\n{content}{self.end_string}\n"
157
+ texts.append({"text": text, "compute_loss": False, **message})
158
+ elif role == "output":
159
+ if loss_on_start_token:
160
+ raise ValueError("Loss on start token is not supported for chat formatters.")
161
+ else:
162
+ start_text = f"{self.start_string}{role}\n"
163
+ texts.append({"text": start_text, "compute_loss": False, **message})
164
+ text = f"{content}{self.end_string}"
165
+ # Apply conditional loss computation
166
+ if first_output and not has_description:
167
+ texts.append({"text": text, "compute_loss": False, **message})
168
+ else:
169
+ texts.append({"text": text, "compute_loss": True, **message})
170
+ texts.append({"text": "\n", "compute_loss": False, **message})
171
+ first_output = False
172
+ else:
173
+ raise ValueError(f"Unknown role: {role}. Must be description, input, or output.")
174
+
175
+ return texts
176
+
177
+ def messages_to_text(
178
+ self,
179
+ messages: List[Dict[str, Any]],
180
+ start_generation: bool = False,
181
+ ) -> str:
182
+ """
183
+ Messages (description / input / output) to raw text (text).
184
+ Uses the explicit format matching chat_utils.py.
185
+ """
186
+ texts = self.messages_to_loss_texts(messages)
187
+ text = "".join([text["text"] for text in texts])
188
+ if start_generation:
189
+ text = text + self.start_string + "output\n"
190
+ return text
191
+
192
+
193
+ def tokenize_messages(
194
+ self,
195
+ messages: List[Dict[str, Any]] | List[List[Dict[str, Any]]],
196
+ start_generation: bool = False,
197
+ **kwargs,
198
+ ):
199
+ """
200
+ For tokenizing from messages to texts. Supports batching. Good for generation
201
+ """
202
+ if isinstance(messages, list) and isinstance(messages[0], list):
203
+ # Handle list of lists of messages
204
+ all_texts = []
205
+ for message_list in messages:
206
+ texts = self.messages_to_text(message_list, start_generation)
207
+ all_texts.append(texts)
208
+ else:
209
+ # Handle single list of messages
210
+ texts = self.messages_to_text(messages, start_generation)
211
+ all_texts = [texts]
212
+
213
+ # Tokenize all texts
214
+ processed = self(text=all_texts, **kwargs)
215
+ return processed
216
+
217
+
218
+ def tokenize_loss_texts(
219
+ self,
220
+ texts: List[Dict[str, Any]],
221
+ loss_on_start_token: bool = False,
222
+ loss_on_eos: bool = False,
223
+ include_eos: bool = True,
224
+ ):
225
+ """
226
+ Tokenize texts (text / compute_loss) to tokenized texts (input_ids / attention_mask / labels).
227
+
228
+ Needs more complex logic to handle the back and forth labeling.
229
+ """
230
+ if loss_on_eos:
231
+ raise ValueError("Loss on EOS is not currently supported.")
232
+
233
+ # Handle single string input
234
+ if isinstance(texts, str):
235
+ processed = self(text=texts)
236
+ # Add EOS token if needed
237
+ if (self.eos_token_id is not None and
238
+ processed["input_ids"][-1] != self.eos_token_id):
239
+ processed["input_ids"] = processed["input_ids"] + [self.eos_token_id]
240
+ processed["attention_mask"] = processed["attention_mask"] + [1]
241
+ return processed
242
+
243
+ # Handle list of text dictionaries
244
+ all_processed = []
245
+ all_texts = ''
246
+ example_inds = []
247
+ dataset_inds = []
248
+
249
+ for i, item in enumerate(texts):
250
+ processed = self(text=item["text"])
251
+
252
+ # Remove BOS token from all but first item
253
+ if i != 0 and self.bos_token_id == processed["input_ids"][0]:
254
+ processed["input_ids"] = processed["input_ids"][1:]
255
+ processed["attention_mask"] = processed["attention_mask"][1:]
256
+
257
+ # Remove EOS token if present at the end
258
+ if processed["input_ids"][-1] == self.eos_token_id:
259
+ processed["input_ids"] = processed["input_ids"][:-1]
260
+ processed["attention_mask"] = processed["attention_mask"][:-1]
261
+
262
+ # Check for EOS token in the middle (with special handling for <|im_end|>)
263
+ if self.eos_token_id in processed["input_ids"]:
264
+ if not self.decode([self.eos_token_id]) == "<|im_end|>":
265
+ raise ValueError(f"EOS token is present in input_ids: {processed['input_ids']}. Not currently supported.")
266
+
267
+ # Set labels based on compute_loss flag
268
+ if item["compute_loss"]:
269
+ processed["labels"] = processed["input_ids"].copy()
270
+ else:
271
+ processed["labels"] = [-100] * len(processed["input_ids"])
272
+
273
+ # Remove duplicate BOS tokens
274
+ if all_processed:
275
+ if processed["input_ids"][0] == self.bos_token_id:
276
+ processed["input_ids"] = processed["input_ids"][1:]
277
+ processed["attention_mask"] = processed["attention_mask"][1:]
278
+ processed["labels"] = processed["labels"][1:]
279
+
280
+ all_processed.append(processed)
281
+ all_texts += item["text"]
282
+
283
+ # Handle example indices
284
+ this_num = -1
285
+ if 'example_ind' in item.keys():
286
+ if item["example_ind"] is not None:
287
+ this_num = item["example_ind"]
288
+ example_inds.extend([this_num] * len(processed["input_ids"]))
289
+
290
+ # Handle dataset indices
291
+ dataset_ind = -1
292
+ if "data_id" in item.keys():
293
+ if item["data_id"] is not None:
294
+ dataset_ind = item["data_id"]
295
+ dataset_inds.extend([dataset_ind] * len(processed["input_ids"]))
296
+
297
+ # Combine all processed results
298
+ processed = all_processed[0].copy()
299
+ processed["input_ids"] = [item for sublist in [p["input_ids"] for p in all_processed] for item in sublist]
300
+ processed["attention_mask"] = [item for sublist in [p["attention_mask"] for p in all_processed] for item in sublist]
301
+ processed["labels"] = [item for sublist in [p["labels"] for p in all_processed] for item in sublist]
302
+ processed["example_inds"] = example_inds
303
+ processed["data_ids"] = dataset_inds
304
+
305
+ # Validate by tokenizing all_texts at once and comparing
306
+ processed_all = self(text=all_texts)
307
+ if len(processed_all["input_ids"]) != len(processed["input_ids"]):
308
+ warnings.warn(f"All texts are not the same length as the first text. Please check your dataset. {len(processed_all['input_ids'])} != {len(processed['input_ids'])}")
309
+
310
+ # Generate diff for debugging
311
+ all_text = self.decode(processed_all["input_ids"], skip_special_tokens=False)
312
+ processed_text = self.decode(processed["input_ids"], skip_special_tokens=False)
313
+
314
+ diff = difflib.unified_diff(all_text.splitlines(), processed_text.splitlines())
315
+ diff_str = "\n".join(diff)
316
+ print("Diff between texts:")
317
+ print(diff_str)
318
+
319
+ # Token diff
320
+ all_tokens_str = '\n'.join([str(s) for s in processed_all["input_ids"]])
321
+ processed_tokens_str = '\n'.join([str(s) for s in processed["input_ids"]])
322
+ token_diff = difflib.unified_diff(all_tokens_str.splitlines(), processed_tokens_str.splitlines())
323
+ token_diff_str = "\n".join(token_diff)
324
+ print("Diff between tokenized texts:")
325
+ print(token_diff_str)
326
+
327
+ # Add EOS token if needed
328
+ if (self.eos_token_id is not None and
329
+ processed["input_ids"][-1] != self.eos_token_id):
330
+ processed["input_ids"] = processed["input_ids"] + [self.eos_token_id]
331
+ processed["example_inds"] = processed["example_inds"] + [-1]
332
+ processed["attention_mask"] = processed["attention_mask"] + [1]
333
+ if processed["labels"] is not None:
334
+ if loss_on_eos:
335
+ processed["labels"] = processed["labels"] + [self.eos_token_id]
336
+ else:
337
+ processed["labels"] = processed["labels"] + [-100]
338
+ if "data_ids" in processed:
339
+ processed["data_ids"] = processed["data_ids"] + [-1]
340
+
341
+ if not include_eos:
342
+ # check if EOS token is present
343
+ if processed["input_ids"][-1] == self.eos_token_id:
344
+ # remove EOS token
345
+ processed["input_ids"] = processed["input_ids"][:-1]
346
+ processed["attention_mask"] = processed["attention_mask"][:-1]
347
+ processed["labels"] = processed["labels"][:-1]
348
+ processed["example_inds"] = processed["example_inds"][:-1]
349
+ processed["data_ids"] = processed["data_ids"][:-1]
350
+
351
+ return processed
352
+
353
+ def tokenize_messages(
354
+ self,
355
+ messages: List[Dict[str, Any]],
356
+ loss_on_start_token: bool = False,
357
+ loss_on_eos: bool = False,
358
+ include_eos: bool = True,
359
+ ) -> Dict[str, Any]:
360
+ """
361
+ Intended for tokenize from messages to tokenized texts with the loss applied.
362
+ """
363
+ # First convert messages to text with loss computation flags
364
+ texts = self.messages_to_loss_texts(messages, loss_on_start_token)
365
+
366
+ # Then tokenize the texts
367
+ return self.tokenize_loss_texts(texts, loss_on_eos, include_eos = include_eos)
368
+
369
+
370
+
371
+
372
+ # Register tokenizer classes for AutoTokenizer
373
+ AutoTokenizer.register("GemmaExplicitTokenizer", slow_tokenizer_class=None, fast_tokenizer_class=GemmaExplicitTokenizer)
374
+
375
+
376
+ if __name__ == "__main__":
377
+ # Example usage
378
+ # for first load
379
+ custom_tokenizer = GemmaExplicitTokenizer.from_gemma_pretrained("google/gemma-3-1b-it")
380
+
381
+ # Test messages in role/content format
382
+ test_messages = [
383
+ [
384
+ {"role": "description", "content": "This is a test task"},
385
+ {"role": "input", "content": "What is 2+2?"},
386
+ {"role": "output", "content": "4"},
387
+ {"role": "input", "content": "What is 3+3?"},
388
+ ],
389
+ [
390
+ {"role": "description", "content": "This is a test task"},
391
+ {"role": "output", "content": "4"},
392
+ {"role": "output", "content": "10"},
393
+ {"role": "output", "content": "13"},
394
+ ],
395
+ [
396
+ {"role": "output", "content": "4"},
397
+ {"role": "output", "content": "10"},
398
+ {"role": "output", "content": "13"},
399
+ ],
400
+ [
401
+ {"role": "input", "content": "What is 2+2?"},
402
+ {"role": "output", "content": "4"},
403
+ {"role": "input", "content": "What is 3+3?"},
404
+ {"role": "output", "content": "10"},
405
+ {"role": "input", "content": "What is 4+4?"},
406
+ ],
407
+ ]
408
+ for messages in test_messages:
409
+ # get messages to text_loss
410
+ texts = custom_tokenizer.messages_to_loss_texts(messages)
411
+
412
+ print("Texts with loss flags:")
413
+ for i, text in enumerate(texts):
414
+ print(f" {i}: {text}")
415
+
416
+ text = custom_tokenizer.messages_to_text(messages, start_generation=True)
417
+ print(f"\nFull text with generation prompt:")
418
+ print(text)
419
+
420
+ print("\nTesting save/load cycle:")
421
+ # Test saving and loading
422
+ tokenizer_path = "repos/explicit-gemma-tokenizer"
423
+ custom_tokenizer.save_pretrained(tokenizer_path)
424
+ print("Tokenizer saved successfully!")
425
+
426
+ # also save this file in the tokenizer_path
427
+ import shutil
428
+ shutil.copy(__file__, os.path.join(tokenizer_path, "gemma_explicit_tokenizer.py"))
429
+ print("GemmaExplicitTokenizer.py saved successfully!")
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 2,
3
+ "cache_implementation": "hybrid",
4
+ "do_sample": true,
5
+ "eos_token_id": [
6
+ 1,
7
+ 106
8
+ ],
9
+ "pad_token_id": 0,
10
+ "top_k": 64,
11
+ "top_p": 0.95,
12
+ "transformers_version": "4.51.3"
13
+ }
model-00001-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0671cab2a8f94872810b970ac47b9060d358ca9ff131af31c216621f6d89fe2b
3
+ size 4979902192
model-00002-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52dfe505e57fcd5835e806d0d19fd9c7ef19a11f0b6dae6877c084b16ac4db70
3
+ size 4931296592
model-00003-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ed43a3bace6e02d584abcfb101adcdc08022c65e5ca473fb8fe2c4bbb1ee7be
3
+ size 4931296656
model-00004-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c04b4408969e9473d8fd8174b881a4bd1220b93725e3b532671b2e26a2707be
3
+ size 4931296656
model-00005-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b7f85f21ad6209156a56a41a9de9b06354f1cffe51166757cabfeeca0f52d1e
3
+ size 4601000928
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "boi_token": "<start_of_image>",
3
+ "bos_token": {
4
+ "content": "<bos>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ "eoi_token": "<end_of_image>",
11
+ "eos_token": {
12
+ "content": "<eos>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "image_token": "<image_soft_token>",
19
+ "pad_token": {
20
+ "content": "<pad>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ },
26
+ "unk_token": {
27
+ "content": "<unk>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4667f2089529e8e7657cfb6d1c19910ae71ff5f28aa7ab2ff2763330affad795
3
+ size 33384568
tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff
 
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30e28ac87c293ee885688a284a96dbd0efd51290d74fbef6c1f426e728026204
3
+ size 7313