IkIzma commited on
Commit
9ec2088
·
1 Parent(s): 3053a3f

Upload run_clm.py

Browse files
Files changed (1) hide show
  1. run_clm.py +635 -0
run_clm.py ADDED
@@ -0,0 +1,635 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2020 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """
17
+ Fine-tuning the library models for causal language modeling (GPT, GPT-2, CTRL, ...) on a text file or a dataset.
18
+
19
+ Here is the full list of checkpoints on the hub that can be fine-tuned by this script:
20
+ https://huggingface.co/models?filter=text-generation
21
+ """
22
+ # You can also adapt this script on your own causal language modeling task. Pointers for this are left as comments.
23
+
24
+ import logging
25
+ import math
26
+ import os
27
+ import sys
28
+ from dataclasses import dataclass, field
29
+ from itertools import chain
30
+ from typing import Optional
31
+
32
+ import datasets
33
+ import evaluate
34
+ import torch
35
+ from datasets import load_dataset
36
+
37
+ import transformers
38
+ from transformers import (
39
+ CONFIG_MAPPING,
40
+ MODEL_FOR_CAUSAL_LM_MAPPING,
41
+ AutoConfig,
42
+ AutoModelForCausalLM,
43
+ AutoTokenizer,
44
+ HfArgumentParser,
45
+ Trainer,
46
+ TrainingArguments,
47
+ default_data_collator,
48
+ is_torch_tpu_available,
49
+ set_seed,
50
+ )
51
+ from transformers.testing_utils import CaptureLogger
52
+ from transformers.trainer_utils import get_last_checkpoint
53
+ from transformers.utils import check_min_version, send_example_telemetry
54
+ from transformers.utils.versions import require_version
55
+
56
+
57
+ # Will error if the minimal version of Transformers is not installed. Remove at your own risks.
58
+ check_min_version("4.29.0.dev0")
59
+
60
+ require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
61
+
62
+ logger = logging.getLogger(__name__)
63
+
64
+
65
+ MODEL_CONFIG_CLASSES = list(MODEL_FOR_CAUSAL_LM_MAPPING.keys())
66
+ MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
67
+
68
+
69
+ @dataclass
70
+ class ModelArguments:
71
+ """
72
+ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
73
+ """
74
+
75
+ model_name_or_path: Optional[str] = field(
76
+ default=None,
77
+ metadata={
78
+ "help": (
79
+ "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
80
+ )
81
+ },
82
+ )
83
+ model_type: Optional[str] = field(
84
+ default=None,
85
+ metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)},
86
+ )
87
+ config_overrides: Optional[str] = field(
88
+ default=None,
89
+ metadata={
90
+ "help": (
91
+ "Override some existing default config settings when a model is trained from scratch. Example: "
92
+ "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
93
+ )
94
+ },
95
+ )
96
+ config_name: Optional[str] = field(
97
+ default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
98
+ )
99
+ tokenizer_name: Optional[str] = field(
100
+ default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
101
+ )
102
+ cache_dir: Optional[str] = field(
103
+ default=None,
104
+ metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
105
+ )
106
+ use_fast_tokenizer: bool = field(
107
+ default=True,
108
+ metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
109
+ )
110
+ model_revision: str = field(
111
+ default="main",
112
+ metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
113
+ )
114
+ use_auth_token: bool = field(
115
+ default=False,
116
+ metadata={
117
+ "help": (
118
+ "Will use the token generated when running `huggingface-cli login` (necessary to use this script "
119
+ "with private models)."
120
+ )
121
+ },
122
+ )
123
+ torch_dtype: Optional[str] = field(
124
+ default=None,
125
+ metadata={
126
+ "help": (
127
+ "Override the default `torch.dtype` and load the model under this dtype. If `auto` is passed, the "
128
+ "dtype will be automatically derived from the model's weights."
129
+ ),
130
+ "choices": ["auto", "bfloat16", "float16", "float32"],
131
+ },
132
+ )
133
+ low_cpu_mem_usage: bool = field(
134
+ default=False,
135
+ metadata={
136
+ "help": (
137
+ "It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded."
138
+ "set True will benefit LLM loading time and RAM consumption."
139
+ )
140
+ },
141
+ )
142
+
143
+ def __post_init__(self):
144
+ if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
145
+ raise ValueError(
146
+ "--config_overrides can't be used in combination with --config_name or --model_name_or_path"
147
+ )
148
+
149
+
150
+ @dataclass
151
+ class DataTrainingArguments:
152
+ """
153
+ Arguments pertaining to what data we are going to input our model for training and eval.
154
+ """
155
+
156
+ dataset_name: Optional[str] = field(
157
+ default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
158
+ )
159
+ dataset_config_name: Optional[str] = field(
160
+ default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
161
+ )
162
+ train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
163
+ validation_file: Optional[str] = field(
164
+ default=None,
165
+ metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
166
+ )
167
+ max_train_samples: Optional[int] = field(
168
+ default=None,
169
+ metadata={
170
+ "help": (
171
+ "For debugging purposes or quicker training, truncate the number of training examples to this "
172
+ "value if set."
173
+ )
174
+ },
175
+ )
176
+ max_eval_samples: Optional[int] = field(
177
+ default=None,
178
+ metadata={
179
+ "help": (
180
+ "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
181
+ "value if set."
182
+ )
183
+ },
184
+ )
185
+ streaming: bool = field(default=False, metadata={"help": "Enable streaming mode"})
186
+ block_size: Optional[int] = field(
187
+ default=None,
188
+ metadata={
189
+ "help": (
190
+ "Optional input sequence length after tokenization. "
191
+ "The training dataset will be truncated in block of this size for training. "
192
+ "Default to the model max input length for single sentence inputs (take into account special tokens)."
193
+ )
194
+ },
195
+ )
196
+ overwrite_cache: bool = field(
197
+ default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
198
+ )
199
+ validation_split_percentage: Optional[int] = field(
200
+ default=5,
201
+ metadata={
202
+ "help": "The percentage of the train set used as validation set in case there's no validation split"
203
+ },
204
+ )
205
+ preprocessing_num_workers: Optional[int] = field(
206
+ default=None,
207
+ metadata={"help": "The number of processes to use for the preprocessing."},
208
+ )
209
+ keep_linebreaks: bool = field(
210
+ default=True, metadata={"help": "Whether to keep line breaks when using TXT files or not."}
211
+ )
212
+
213
+ def __post_init__(self):
214
+ if self.streaming:
215
+ require_version("datasets>=2.0.0", "The streaming feature requires `datasets>=2.0.0`")
216
+
217
+ if self.dataset_name is None and self.train_file is None and self.validation_file is None:
218
+ raise ValueError("Need either a dataset name or a training/validation file.")
219
+ else:
220
+ if self.train_file is not None:
221
+ extension = self.train_file.split(".")[-1]
222
+ assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
223
+ if self.validation_file is not None:
224
+ extension = self.validation_file.split(".")[-1]
225
+ assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
226
+
227
+
228
+ def main():
229
+ # See all possible arguments in src/transformers/training_args.py
230
+ # or by passing the --help flag to this script.
231
+ # We now keep distinct sets of args, for a cleaner separation of concerns.
232
+
233
+ parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
234
+ if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
235
+ # If we pass only one argument to the script and it's the path to a json file,
236
+ # let's parse it to get our arguments.
237
+ model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
238
+ else:
239
+ model_args, data_args, training_args = parser.parse_args_into_dataclasses()
240
+
241
+ # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
242
+ # information sent is the one passed as arguments along with your Python/PyTorch versions.
243
+ send_example_telemetry("run_clm", model_args, data_args)
244
+
245
+ # Setup logging
246
+ logging.basicConfig(
247
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
248
+ datefmt="%m/%d/%Y %H:%M:%S",
249
+ handlers=[logging.StreamHandler(sys.stdout)],
250
+ )
251
+
252
+ if training_args.should_log:
253
+ # The default of training_args.log_level is passive, so we set log level at info here to have that default.
254
+ transformers.utils.logging.set_verbosity_info()
255
+
256
+ log_level = training_args.get_process_log_level()
257
+ logger.setLevel(log_level)
258
+ datasets.utils.logging.set_verbosity(log_level)
259
+ transformers.utils.logging.set_verbosity(log_level)
260
+ transformers.utils.logging.enable_default_handler()
261
+ transformers.utils.logging.enable_explicit_format()
262
+
263
+ # Log on each process the small summary:
264
+ logger.warning(
265
+ f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
266
+ + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
267
+ )
268
+ logger.info(f"Training/evaluation parameters {training_args}")
269
+
270
+ # Detecting last checkpoint.
271
+ last_checkpoint = None
272
+ if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
273
+ last_checkpoint = get_last_checkpoint(training_args.output_dir)
274
+ if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
275
+ raise ValueError(
276
+ f"Output directory ({training_args.output_dir}) already exists and is not empty. "
277
+ "Use --overwrite_output_dir to overcome."
278
+ )
279
+ elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
280
+ logger.info(
281
+ f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
282
+ "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
283
+ )
284
+
285
+ # Set seed before initializing model.
286
+ set_seed(training_args.seed)
287
+
288
+ # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
289
+ # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
290
+ # (the dataset will be downloaded automatically from the datasets Hub).
291
+ #
292
+ # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
293
+ # 'text' is found. You can easily tweak this behavior (see below).
294
+ #
295
+ # In distributed training, the load_dataset function guarantee that only one local process can concurrently
296
+ # download the dataset.
297
+ if data_args.dataset_name is not None:
298
+ # Downloading and loading a dataset from the hub.
299
+ raw_datasets = load_dataset(
300
+ data_args.dataset_name,
301
+ data_args.dataset_config_name,
302
+ cache_dir=model_args.cache_dir,
303
+ use_auth_token=True if model_args.use_auth_token else None,
304
+ streaming=data_args.streaming,
305
+ )
306
+ if "validation" not in raw_datasets.keys():
307
+ raw_datasets["validation"] = load_dataset(
308
+ data_args.dataset_name,
309
+ data_args.dataset_config_name,
310
+ split=f"train[:{data_args.validation_split_percentage}%]",
311
+ cache_dir=model_args.cache_dir,
312
+ use_auth_token=True if model_args.use_auth_token else None,
313
+ streaming=data_args.streaming,
314
+ )
315
+ raw_datasets["train"] = load_dataset(
316
+ data_args.dataset_name,
317
+ data_args.dataset_config_name,
318
+ split=f"train[{data_args.validation_split_percentage}%:]",
319
+ cache_dir=model_args.cache_dir,
320
+ use_auth_token=True if model_args.use_auth_token else None,
321
+ streaming=data_args.streaming,
322
+ )
323
+ else:
324
+ data_files = {}
325
+ dataset_args = {}
326
+ if data_args.train_file is not None:
327
+ data_files["train"] = data_args.train_file
328
+ if data_args.validation_file is not None:
329
+ data_files["validation"] = data_args.validation_file
330
+ extension = (
331
+ data_args.train_file.split(".")[-1]
332
+ if data_args.train_file is not None
333
+ else data_args.validation_file.split(".")[-1]
334
+ )
335
+ if extension == "txt":
336
+ extension = "text"
337
+ dataset_args["keep_linebreaks"] = data_args.keep_linebreaks
338
+ raw_datasets = load_dataset(
339
+ extension,
340
+ data_files=data_files,
341
+ cache_dir=model_args.cache_dir,
342
+ use_auth_token=True if model_args.use_auth_token else None,
343
+ **dataset_args,
344
+ )
345
+ # If no validation data is there, validation_split_percentage will be used to divide the dataset.
346
+ if "validation" not in raw_datasets.keys():
347
+ raw_datasets["validation"] = load_dataset(
348
+ extension,
349
+ data_files=data_files,
350
+ split=f"train[:{data_args.validation_split_percentage}%]",
351
+ cache_dir=model_args.cache_dir,
352
+ use_auth_token=True if model_args.use_auth_token else None,
353
+ **dataset_args,
354
+ )
355
+ raw_datasets["train"] = load_dataset(
356
+ extension,
357
+ data_files=data_files,
358
+ split=f"train[{data_args.validation_split_percentage}%:]",
359
+ cache_dir=model_args.cache_dir,
360
+ use_auth_token=True if model_args.use_auth_token else None,
361
+ **dataset_args,
362
+ )
363
+
364
+ # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
365
+ # https://huggingface.co/docs/datasets/loading_datasets.html.
366
+
367
+ # Load pretrained model and tokenizer
368
+ #
369
+ # Distributed training:
370
+ # The .from_pretrained methods guarantee that only one local process can concurrently
371
+ # download model & vocab.
372
+
373
+ config_kwargs = {
374
+ "cache_dir": model_args.cache_dir,
375
+ "revision": model_args.model_revision,
376
+ "use_auth_token": True if model_args.use_auth_token else None,
377
+ }
378
+ if model_args.config_name:
379
+ config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs)
380
+ elif model_args.model_name_or_path:
381
+ config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs)
382
+ else:
383
+ config = CONFIG_MAPPING[model_args.model_type]()
384
+ logger.warning("You are instantiating a new config instance from scratch.")
385
+ if model_args.config_overrides is not None:
386
+ logger.info(f"Overriding config: {model_args.config_overrides}")
387
+ config.update_from_string(model_args.config_overrides)
388
+ logger.info(f"New config: {config}")
389
+
390
+ tokenizer_kwargs = {
391
+ "cache_dir": model_args.cache_dir,
392
+ "use_fast": model_args.use_fast_tokenizer,
393
+ "revision": model_args.model_revision,
394
+ "use_auth_token": True if model_args.use_auth_token else None,
395
+ }
396
+ if model_args.tokenizer_name:
397
+ tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs)
398
+ elif model_args.model_name_or_path:
399
+ tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs)
400
+ else:
401
+ raise ValueError(
402
+ "You are instantiating a new tokenizer from scratch. This is not supported by this script."
403
+ "You can do it from another script, save it, and load it from here, using --tokenizer_name."
404
+ )
405
+
406
+ if model_args.model_name_or_path:
407
+ torch_dtype = (
408
+ model_args.torch_dtype
409
+ if model_args.torch_dtype in ["auto", None]
410
+ else getattr(torch, model_args.torch_dtype)
411
+ )
412
+ model = AutoModelForCausalLM.from_pretrained(
413
+ model_args.model_name_or_path,
414
+ from_tf=bool(".ckpt" in model_args.model_name_or_path),
415
+ config=config,
416
+ cache_dir=model_args.cache_dir,
417
+ revision=model_args.model_revision,
418
+ use_auth_token=True if model_args.use_auth_token else None,
419
+ torch_dtype=torch_dtype,
420
+ low_cpu_mem_usage=model_args.low_cpu_mem_usage,
421
+ )
422
+ else:
423
+ model = AutoModelForCausalLM.from_config(config)
424
+ n_params = sum({p.data_ptr(): p.numel() for p in model.parameters()}.values())
425
+ logger.info(f"Training new model from scratch - Total size={n_params/2**20:.2f}M params")
426
+
427
+ # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch
428
+ # on a small vocab and want a smaller embedding size, remove this test.
429
+ embedding_size = model.get_input_embeddings().weight.shape[0]
430
+ if len(tokenizer) > embedding_size:
431
+ model.resize_token_embeddings(len(tokenizer))
432
+
433
+ # Preprocessing the datasets.
434
+ # First we tokenize all the texts.
435
+ if training_args.do_train:
436
+ column_names = list(raw_datasets["train"].features)
437
+ else:
438
+ column_names = list(raw_datasets["validation"].features)
439
+ text_column_name = "text" if "text" in column_names else column_names[0]
440
+
441
+ # since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function
442
+ tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base")
443
+
444
+ def tokenize_function(examples):
445
+ with CaptureLogger(tok_logger) as cl:
446
+ output = tokenizer(examples[text_column_name])
447
+ # clm input could be much much longer than block_size
448
+ if "Token indices sequence length is longer than the" in cl.out:
449
+ tok_logger.warning(
450
+ "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits"
451
+ " before being passed to the model."
452
+ )
453
+ return output
454
+
455
+ with training_args.main_process_first(desc="dataset map tokenization"):
456
+ if not data_args.streaming:
457
+ tokenized_datasets = raw_datasets.map(
458
+ tokenize_function,
459
+ batched=True,
460
+ num_proc=data_args.preprocessing_num_workers,
461
+ remove_columns=column_names,
462
+ load_from_cache_file=not data_args.overwrite_cache,
463
+ desc="Running tokenizer on dataset",
464
+ )
465
+ else:
466
+ tokenized_datasets = raw_datasets.map(
467
+ tokenize_function,
468
+ batched=True,
469
+ remove_columns=column_names,
470
+ )
471
+
472
+ if data_args.block_size is None:
473
+ block_size = tokenizer.model_max_length
474
+ if block_size > 1024:
475
+ logger.warning(
476
+ "The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
477
+ " of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
478
+ " override this default with `--block_size xxx`."
479
+ )
480
+ block_size = 1024
481
+ else:
482
+ if data_args.block_size > tokenizer.model_max_length:
483
+ logger.warning(
484
+ f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
485
+ f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
486
+ )
487
+ block_size = min(data_args.block_size, tokenizer.model_max_length)
488
+
489
+ # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size.
490
+ def group_texts(examples):
491
+ # Concatenate all texts.
492
+ concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()}
493
+ total_length = len(concatenated_examples[list(examples.keys())[0]])
494
+ # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can
495
+ # customize this part to your needs.
496
+ if total_length >= block_size:
497
+ total_length = (total_length // block_size) * block_size
498
+ # Split by chunks of max_len.
499
+ result = {
500
+ k: [t[i : i + block_size] for i in range(0, total_length, block_size)]
501
+ for k, t in concatenated_examples.items()
502
+ }
503
+ result["labels"] = result["input_ids"].copy()
504
+ return result
505
+
506
+ # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder
507
+ # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower
508
+ # to preprocess.
509
+ #
510
+ # To speed up this part, we use multiprocessing. See the documentation of the map method for more information:
511
+ # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map
512
+
513
+ with training_args.main_process_first(desc="grouping texts together"):
514
+ if not data_args.streaming:
515
+ lm_datasets = tokenized_datasets.map(
516
+ group_texts,
517
+ batched=True,
518
+ num_proc=data_args.preprocessing_num_workers,
519
+ load_from_cache_file=not data_args.overwrite_cache,
520
+ desc=f"Grouping texts in chunks of {block_size}",
521
+ )
522
+ else:
523
+ lm_datasets = tokenized_datasets.map(
524
+ group_texts,
525
+ batched=True,
526
+ )
527
+
528
+ if training_args.do_train:
529
+ if "train" not in tokenized_datasets:
530
+ raise ValueError("--do_train requires a train dataset")
531
+ train_dataset = lm_datasets["train"]
532
+ if data_args.max_train_samples is not None:
533
+ max_train_samples = min(len(train_dataset), data_args.max_train_samples)
534
+ train_dataset = train_dataset.select(range(max_train_samples))
535
+
536
+ if training_args.do_eval:
537
+ if "validation" not in tokenized_datasets:
538
+ raise ValueError("--do_eval requires a validation dataset")
539
+ eval_dataset = lm_datasets["validation"]
540
+ if data_args.max_eval_samples is not None:
541
+ max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
542
+ eval_dataset = eval_dataset.select(range(max_eval_samples))
543
+
544
+ def preprocess_logits_for_metrics(logits, labels):
545
+ if isinstance(logits, tuple):
546
+ # Depending on the model and config, logits may contain extra tensors,
547
+ # like past_key_values, but logits always come first
548
+ logits = logits[0]
549
+ return logits.argmax(dim=-1)
550
+
551
+ metric = evaluate.load("accuracy")
552
+
553
+ def compute_metrics(eval_preds):
554
+ preds, labels = eval_preds
555
+ # preds have the same shape as the labels, after the argmax(-1) has been calculated
556
+ # by preprocess_logits_for_metrics but we need to shift the labels
557
+ labels = labels[:, 1:].reshape(-1)
558
+ preds = preds[:, :-1].reshape(-1)
559
+ return metric.compute(predictions=preds, references=labels)
560
+
561
+ # Initialize our Trainer
562
+ trainer = Trainer(
563
+ model=model,
564
+ args=training_args,
565
+ train_dataset=train_dataset if training_args.do_train else None,
566
+ eval_dataset=eval_dataset if training_args.do_eval else None,
567
+ tokenizer=tokenizer,
568
+ # Data collator will default to DataCollatorWithPadding, so we change it.
569
+ data_collator=default_data_collator,
570
+ compute_metrics=compute_metrics if training_args.do_eval and not is_torch_tpu_available() else None,
571
+ preprocess_logits_for_metrics=preprocess_logits_for_metrics
572
+ if training_args.do_eval and not is_torch_tpu_available()
573
+ else None,
574
+ )
575
+
576
+ # Training
577
+ if training_args.do_train:
578
+ checkpoint = None
579
+ if training_args.resume_from_checkpoint is not None:
580
+ checkpoint = training_args.resume_from_checkpoint
581
+ elif last_checkpoint is not None:
582
+ checkpoint = last_checkpoint
583
+ train_result = trainer.train(resume_from_checkpoint=checkpoint)
584
+ trainer.save_model() # Saves the tokenizer too for easy upload
585
+
586
+ metrics = train_result.metrics
587
+
588
+ max_train_samples = (
589
+ data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
590
+ )
591
+ metrics["train_samples"] = min(max_train_samples, len(train_dataset))
592
+
593
+ trainer.log_metrics("train", metrics)
594
+ trainer.save_metrics("train", metrics)
595
+ trainer.save_state()
596
+
597
+ # Evaluation
598
+ if training_args.do_eval:
599
+ logger.info("*** Evaluate ***")
600
+
601
+ metrics = trainer.evaluate()
602
+
603
+ max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
604
+ metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
605
+ try:
606
+ perplexity = math.exp(metrics["eval_loss"])
607
+ except OverflowError:
608
+ perplexity = float("inf")
609
+ metrics["perplexity"] = perplexity
610
+
611
+ trainer.log_metrics("eval", metrics)
612
+ trainer.save_metrics("eval", metrics)
613
+
614
+ kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-generation"}
615
+ if data_args.dataset_name is not None:
616
+ kwargs["dataset_tags"] = data_args.dataset_name
617
+ if data_args.dataset_config_name is not None:
618
+ kwargs["dataset_args"] = data_args.dataset_config_name
619
+ kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
620
+ else:
621
+ kwargs["dataset"] = data_args.dataset_name
622
+
623
+ if training_args.push_to_hub:
624
+ trainer.push_to_hub(**kwargs)
625
+ else:
626
+ trainer.create_model_card(**kwargs)
627
+
628
+
629
+ def _mp_fn(index):
630
+ # For xla_spawn (TPUs)
631
+ main()
632
+
633
+
634
+ if __name__ == "__main__":
635
+ main()