id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
11,689
import argparse import csv import glob import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange import transformers from transformers import ( WEIGHTS_NAME, AdamW, AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, get_linear_schedule_with_warmup, ) from transformers.trainer_utils import is_main_process try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter logger = logging.getLogger(__name__) def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def evaluate(args, model, tokenizer, prefix=""): dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset) eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = { "input_ids": batch[0], "attention_mask": batch[1], # 'token_type_ids': None if args.model_type == 'xlm' else batch[2] # XLM don't use segment_ids "token_type_ids": batch[2], "labels": batch[3], } # if args.model_type in ['xlnet', 'xlm']: # inputs.update({'cls_index': batch[4], # 'p_mask': batch[5]}) outputs = model(**inputs) tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() logits = logits.detach().cpu().numpy() label_ids = inputs["labels"].to("cpu").numpy() tmp_eval_accuracy = accuracy(logits, label_ids) eval_accuracy += tmp_eval_accuracy nb_eval_steps += 1 nb_eval_examples += inputs["input_ids"].size(0) eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples result = {"eval_loss": eval_loss, "eval_accuracy": eval_accuracy} output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key in sorted(result.keys()): logger.info("%s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) return result The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(args, train_dataset, model, tokenizer)` to solve the following problem: Train the model Here is the function: def train(args, train_dataset, model, tokenizer): """Train the model""" if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True ) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() batch = tuple(t.to(args.device) for t in batch) inputs = { "input_ids": batch[0], "attention_mask": batch[1], # 'token_type_ids': None if args.model_type == 'xlm' else batch[2], "token_type_ids": batch[2], "labels": batch[3], } # if args.model_type in ['xlnet', 'xlm']: # inputs.update({'cls_index': batch[5], # 'p_mask': batch[6]}) outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: loss.backward() torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if ( args.local_rank == -1 and args.evaluate_during_training ): # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar("eval_{}".format(key), value, global_step) tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step) tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) tokenizer.save_vocabulary(output_dir) torch.save(args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step
Train the model
11,690
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) def accuracy(out, labels): outputs = np.argmax(out, axis=1) return np.sum(outputs == labels)
null
11,691
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) The provided code snippet includes necessary dependencies for implementing the `load_rocstories_dataset` function. Write a Python function `def load_rocstories_dataset(dataset_path)` to solve the following problem: Output a list of tuples(story, 1st continuation, 2nd continuation, label) Here is the function: def load_rocstories_dataset(dataset_path): """Output a list of tuples(story, 1st continuation, 2nd continuation, label)""" with open(dataset_path, encoding="utf_8") as f: f = csv.reader(f) output = [] next(f) # skip the first line for line in tqdm(f): output.append((" ".join(line[1:5]), line[5], line[6], int(line[-1]) - 1)) return output
Output a list of tuples(story, 1st continuation, 2nd continuation, label)
11,692
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) The provided code snippet includes necessary dependencies for implementing the `pre_process_datasets` function. Write a Python function `def pre_process_datasets(encoded_datasets, input_len, cap_length, start_token, delimiter_token, clf_token)` to solve the following problem: Pre-process datasets containing lists of tuples(story, 1st continuation, 2nd continuation, label) To Transformer inputs of shape (n_batch, n_alternative, length) comprising for each batch, continuation: input_ids[batch, alternative, :] = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token] Here is the function: def pre_process_datasets(encoded_datasets, input_len, cap_length, start_token, delimiter_token, clf_token): """Pre-process datasets containing lists of tuples(story, 1st continuation, 2nd continuation, label) To Transformer inputs of shape (n_batch, n_alternative, length) comprising for each batch, continuation: input_ids[batch, alternative, :] = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token] """ tensor_datasets = [] for dataset in encoded_datasets: n_batch = len(dataset) input_ids = np.zeros((n_batch, 2, input_len), dtype=np.int64) mc_token_ids = np.zeros((n_batch, 2), dtype=np.int64) lm_labels = np.full((n_batch, 2, input_len), fill_value=-100, dtype=np.int64) mc_labels = np.zeros((n_batch,), dtype=np.int64) for ( i, (story, cont1, cont2, mc_label), ) in enumerate(dataset): with_cont1 = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token] with_cont2 = [start_token] + story[:cap_length] + [delimiter_token] + cont2[:cap_length] + [clf_token] input_ids[i, 0, : len(with_cont1)] = with_cont1 input_ids[i, 1, : len(with_cont2)] = with_cont2 mc_token_ids[i, 0] = len(with_cont1) - 1 mc_token_ids[i, 1] = len(with_cont2) - 1 lm_labels[i, 0, : len(with_cont1)] = with_cont1 lm_labels[i, 1, : len(with_cont2)] = with_cont2 mc_labels[i] = mc_label all_inputs = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(t) for t in all_inputs)) return tensor_datasets
Pre-process datasets containing lists of tuples(story, 1st continuation, 2nd continuation, label) To Transformer inputs of shape (n_batch, n_alternative, length) comprising for each batch, continuation: input_ids[batch, alternative, :] = [start_token] + story[:cap_length] + [delimiter_token] + cont1[:cap_length] + [clf_token]
11,693
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import transformers from seq2seq_trainer import Seq2SeqTrainer from seq2seq_training_args import Seq2SeqTrainingArguments from transformers import ( AutoConfig, AutoModelForSeq2SeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( Seq2SeqDataCollator, Seq2SeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) logger = logging.getLogger(__name__) def save_json(content, path, indent=4, **json_dump_kwargs): with open(path, "w") as f: json.dump(content, f, indent=indent, sort_keys=True, **json_dump_kwargs) The provided code snippet includes necessary dependencies for implementing the `handle_metrics` function. Write a Python function `def handle_metrics(split, metrics, output_dir)` to solve the following problem: Log and save metrics Args: - split: one of train, val, test - metrics: metrics dict - output_dir: where to save the metrics Here is the function: def handle_metrics(split, metrics, output_dir): """ Log and save metrics Args: - split: one of train, val, test - metrics: metrics dict - output_dir: where to save the metrics """ logger.info(f"***** {split} metrics *****") for key in sorted(metrics.keys()): logger.info(f" {key} = {metrics[key]}") save_json(metrics, os.path.join(output_dir, f"{split}_results.json"))
Log and save metrics Args: - split: one of train, val, test - metrics: metrics dict - output_dir: where to save the metrics
11,694
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import transformers from seq2seq_trainer import Seq2SeqTrainer from seq2seq_training_args import Seq2SeqTrainingArguments from transformers import ( AutoConfig, AutoModelForSeq2SeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( Seq2SeqDataCollator, Seq2SeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() check_output_dir(training_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED), training_args.fp16, ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() logger.info("Training/evaluation parameters %s", training_args) # Set seed set_seed(training_args.seed) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: if getattr(training_args, p, None): assert hasattr(config, p), f"({config.__class__.__name__}) doesn't have a `{p}` attribute" setattr(config, p, getattr(training_args, p)) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) model = AutoModelForSeq2SeqLM.from_pretrained( model_args.model_name_or_path, from_tf=".ckpt" in model_args.model_name_or_path, config=config, cache_dir=model_args.cache_dir, ) # use task specific params use_task_specific_params(model, data_args.task) # set num_beams for evaluation if data_args.eval_beams is None: data_args.eval_beams = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(tokenizer, MBartTokenizer): model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.tgt_lang] else: model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.tgt_lang) if model_args.freeze_embeds: freeze_embeds(model) if model_args.freeze_encoder: freeze_params(model.get_encoder()) assert_all_frozen(model.get_encoder()) dataset_class = Seq2SeqDataset # Get datasets train_dataset = ( dataset_class( tokenizer, type_path="train", data_dir=data_args.data_dir, n_obs=data_args.n_train, max_target_length=data_args.max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or "", ) if training_args.do_train else None ) eval_dataset = ( dataset_class( tokenizer, type_path="val", data_dir=data_args.data_dir, n_obs=data_args.n_val, max_target_length=data_args.val_max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or "", ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) test_dataset = ( dataset_class( tokenizer, type_path="test", data_dir=data_args.data_dir, n_obs=data_args.n_test, max_target_length=data_args.test_max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or "", ) if training_args.do_predict else None ) # Initialize our Trainer compute_metrics_fn = ( build_compute_metrics_fn(data_args.task, tokenizer) if training_args.predict_with_generate else None ) trainer = Seq2SeqTrainer( model=model, args=training_args, data_args=data_args, train_dataset=train_dataset, eval_dataset=eval_dataset, data_collator=Seq2SeqDataCollator( tokenizer, data_args, model.config.decoder_start_token_id, training_args.tpu_num_cores ), compute_metrics=compute_metrics_fn, tokenizer=tokenizer, ) all_metrics = {} # Training if training_args.do_train: logger.info("*** Train ***") train_result = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None ) metrics = train_result.metrics metrics["train_n_objs"] = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("train", metrics, training_args.output_dir) all_metrics.update(metrics) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json")) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir) # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate(metric_key_prefix="val") metrics["val_n_objs"] = data_args.n_val metrics["val_loss"] = round(metrics["val_loss"], 4) if trainer.is_world_process_zero(): handle_metrics("val", metrics, training_args.output_dir) all_metrics.update(metrics) if training_args.do_predict: logger.info("*** Predict ***") test_output = trainer.predict(test_dataset=test_dataset, metric_key_prefix="test") metrics = test_output.metrics metrics["test_n_objs"] = data_args.n_test if trainer.is_world_process_zero(): metrics["test_loss"] = round(metrics["test_loss"], 4) handle_metrics("test", metrics, training_args.output_dir) all_metrics.update(metrics) if training_args.predict_with_generate: test_preds = tokenizer.batch_decode( test_output.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True ) test_preds = lmap(str.strip, test_preds) write_txt_file(test_preds, os.path.join(training_args.output_dir, "test_generations.txt")) if trainer.is_world_process_zero(): save_json(all_metrics, os.path.join(training_args.output_dir, "all_results.json")) return all_metrics def _mp_fn(index): # For xla_spawn (TPUs) main()
null
11,695
import argparse import shutil from pathlib import Path from tqdm import tqdm from transformers import AutoTokenizer def pack_data_dir(tok, data_dir: Path, max_tokens, save_path): save_path = Path(save_path) save_path.mkdir(exist_ok=True) for split in ["train"]: src_path, tgt_path = data_dir / f"{split}.source", data_dir / f"{split}.target" src_docs = [x.rstrip() for x in Path(src_path).open().readlines()] tgt_docs = [x.rstrip() for x in Path(tgt_path).open().readlines()] packed_src, packed_tgt = pack_examples(tok, src_docs, tgt_docs, max_tokens) print(f"packed {split} split from {len(src_docs)} examples -> {len(packed_src)}.") Path(save_path / f"{split}.source").open("w").write("\n".join(packed_src)) Path(save_path / f"{split}.target").open("w").write("\n".join(packed_tgt)) for split in ["val", "test"]: src_path, tgt_path = data_dir / f"{split}.source", data_dir / f"{split}.target" shutil.copyfile(src_path, save_path / f"{split}.source") shutil.copyfile(tgt_path, save_path / f"{split}.target") def packer_cli(): parser = argparse.ArgumentParser() parser.add_argument("--tok_name", type=str, help="like facebook/bart-large-cnn,t5-base, etc.") parser.add_argument("--max_seq_len", type=int, default=128) parser.add_argument("--data_dir", type=str) parser.add_argument("--save_path", type=str) args = parser.parse_args() tokenizer = AutoTokenizer.from_pretrained(args.tok_name) return pack_data_dir(tokenizer, Path(args.data_dir), args.max_seq_len, args.save_path)
null
11,696
import itertools import json import linecache import math import os import pickle import socket from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List, Tuple, Union import git import numpy as np import torch import torch.distributed as dist from rouge_score import rouge_scorer, scoring from sacrebleu import corpus_bleu from torch import nn from torch.utils.data import Dataset, Sampler from sentence_splitter import add_newline_to_end_of_each_sentence from transformers import BartTokenizer, EvalPrediction, PreTrainedTokenizer, T5Tokenizer from transformers.models.bart.modeling_bart import shift_tokens_right from transformers.utils import cached_property The provided code snippet includes necessary dependencies for implementing the `label_smoothed_nll_loss` function. Write a Python function `def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=-100)` to solve the following problem: From fairseq Here is the function: def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=-100): """From fairseq""" if target.dim() == lprobs.dim() - 1: target = target.unsqueeze(-1) nll_loss = -lprobs.gather(dim=-1, index=target) smooth_loss = -lprobs.sum(dim=-1, keepdim=True) if ignore_index is not None: pad_mask = target.eq(ignore_index) nll_loss.masked_fill_(pad_mask, 0.0) smooth_loss.masked_fill_(pad_mask, 0.0) else: nll_loss = nll_loss.squeeze(-1) smooth_loss = smooth_loss.squeeze(-1) nll_loss = nll_loss.sum() # mean()? Scared to break other math. smooth_loss = smooth_loss.sum() eps_i = epsilon / lprobs.size(-1) loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss return loss, nll_loss
From fairseq
11,697
import itertools import json import linecache import math import os import pickle import socket from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List, Tuple, Union import git import numpy as np import torch import torch.distributed as dist from rouge_score import rouge_scorer, scoring from sacrebleu import corpus_bleu from torch import nn from torch.utils.data import Dataset, Sampler from sentence_splitter import add_newline_to_end_of_each_sentence from transformers import BartTokenizer, EvalPrediction, PreTrainedTokenizer, T5Tokenizer from transformers.models.bart.modeling_bart import shift_tokens_right from transformers.utils import cached_property def lmap(f: Callable, x: Iterable) -> List: """list(map(f, x))""" return list(map(f, x)) def calculate_bleu(output_lns, refs_lns, **kwargs) -> dict: """Uses sacrebleu's corpus_bleu implementation.""" return {"bleu": round(corpus_bleu(output_lns, [refs_lns], **kwargs).score, 4)} def calculate_rouge( pred_lns: List[str], tgt_lns: List[str], use_stemmer=True, rouge_keys=ROUGE_KEYS, return_precision_and_recall=False, bootstrap_aggregation=True, newline_sep=True, ) -> Dict: """Calculate rouge using rouge_scorer package. Args: pred_lns: list of summaries generated by model tgt_lns: list of groundtruth summaries (e.g. contents of val.target) use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes to improve matching. rouge_keys: which metrics to compute, defaults to rouge1, rouge2, rougeL, rougeLsum return_precision_and_recall: (False) whether to also return precision and recall. bootstrap_aggregation: whether to do the typical bootstrap resampling of scores. Defaults to True, if False this function returns a collections.defaultdict[metric: list of values for each observation for each subscore]`` newline_sep:(default=True) whether to add newline between sentences. This is essential for calculation rougeL on multi sentence summaries (CNN/DM dataset). Returns: Dict[score: value] if aggregate else defaultdict(list) keyed by rouge_keys """ scorer = rouge_scorer.RougeScorer(rouge_keys, use_stemmer=use_stemmer) aggregator = scoring.BootstrapAggregator() for pred, tgt in zip(tgt_lns, pred_lns): # rougeLsum expects "\n" separated sentences within a summary if newline_sep: pred = add_newline_to_end_of_each_sentence(pred) tgt = add_newline_to_end_of_each_sentence(tgt) scores = scorer.score(pred, tgt) aggregator.add_scores(scores) if bootstrap_aggregation: result = aggregator.aggregate() if return_precision_and_recall: return extract_rouge_mid_statistics(result) # here we return dict else: return {k: round(v.mid.fmeasure * 100, 4) for k, v in result.items()} else: return aggregator._scores # here we return defaultdict(list) def build_compute_metrics_fn(task_name: str, tokenizer: PreTrainedTokenizer) -> Callable[[EvalPrediction], Dict]: def non_pad_len(tokens: np.ndarray) -> int: return np.count_nonzero(tokens != tokenizer.pad_token_id) def decode_pred(pred: EvalPrediction) -> Tuple[List[str], List[str]]: pred_ids = pred.predictions label_ids = pred.label_ids pred_str = tokenizer.batch_decode(pred_ids, skip_special_tokens=True) label_ids[label_ids == -100] = tokenizer.pad_token_id label_str = tokenizer.batch_decode(label_ids, skip_special_tokens=True) pred_str = lmap(str.strip, pred_str) label_str = lmap(str.strip, label_str) return pred_str, label_str def summarization_metrics(pred: EvalPrediction) -> Dict: pred_str, label_str = decode_pred(pred) rouge: Dict = calculate_rouge(pred_str, label_str) summ_len = np.round(np.mean(lmap(non_pad_len, pred.predictions)), 1) rouge.update({"gen_len": summ_len}) return rouge def translation_metrics(pred: EvalPrediction) -> Dict: pred_str, label_str = decode_pred(pred) bleu: Dict = calculate_bleu(pred_str, label_str) gen_len = np.round(np.mean(lmap(non_pad_len, pred.predictions)), 1) bleu.update({"gen_len": gen_len}) return bleu compute_metrics_fn = summarization_metrics if "summarization" in task_name else translation_metrics return compute_metrics_fn
null
11,698
import itertools import json import linecache import math import os import pickle import socket from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List, Tuple, Union import git import numpy as np import torch import torch.distributed as dist from rouge_score import rouge_scorer, scoring from sacrebleu import corpus_bleu from torch import nn from torch.utils.data import Dataset, Sampler from sentence_splitter import add_newline_to_end_of_each_sentence from transformers import BartTokenizer, EvalPrediction, PreTrainedTokenizer, T5Tokenizer from transformers.models.bart.modeling_bart import shift_tokens_right from transformers.utils import cached_property The provided code snippet includes necessary dependencies for implementing the `trim_batch` function. Write a Python function `def trim_batch( input_ids, pad_token_id, attention_mask=None, )` to solve the following problem: Remove columns that are populated exclusively by pad_token_id Here is the function: def trim_batch( input_ids, pad_token_id, attention_mask=None, ): """Remove columns that are populated exclusively by pad_token_id""" keep_column_mask = input_ids.ne(pad_token_id).any(dim=0) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
Remove columns that are populated exclusively by pad_token_id
11,699
import itertools import json import linecache import math import os import pickle import socket from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List, Tuple, Union import git import numpy as np import torch import torch.distributed as dist from rouge_score import rouge_scorer, scoring from sacrebleu import corpus_bleu from torch import nn from torch.utils.data import Dataset, Sampler from sentence_splitter import add_newline_to_end_of_each_sentence from transformers import BartTokenizer, EvalPrediction, PreTrainedTokenizer, T5Tokenizer from transformers.models.bart.modeling_bart import shift_tokens_right from transformers.utils import cached_property The provided code snippet includes necessary dependencies for implementing the `sortish_sampler_indices` function. Write a Python function `def sortish_sampler_indices(data: List, bs: int, shuffle=True) -> np.array` to solve the following problem: Go through the text data by order of src length with a bit of randomness. From fastai repo. Here is the function: def sortish_sampler_indices(data: List, bs: int, shuffle=True) -> np.array: "Go through the text data by order of src length with a bit of randomness. From fastai repo." if not shuffle: return np.argsort(np.array(data) * -1) def key_fn(i): return data[i] idxs = np.random.permutation(len(data)) sz = bs * 50 ck_idx = [idxs[i : i + sz] for i in range(0, len(idxs), sz)] sort_idx = np.concatenate([sorted(s, key=key_fn, reverse=True) for s in ck_idx]) sz = bs ck_idx = [sort_idx[i : i + sz] for i in range(0, len(sort_idx), sz)] max_ck = np.argmax([key_fn(ck[0]) for ck in ck_idx]) # find the chunk with the largest key, ck_idx[0], ck_idx[max_ck] = ck_idx[max_ck], ck_idx[0] # then make sure it goes first. sort_idx = np.concatenate(np.random.permutation(ck_idx[1:])) if len(ck_idx) > 1 else np.array([], dtype=np.int) sort_idx = np.concatenate((ck_idx[0], sort_idx)) return sort_idx
Go through the text data by order of src length with a bit of randomness. From fastai repo.
11,700
import itertools import json import linecache import math import os import pickle import socket from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List, Tuple, Union import git import numpy as np import torch import torch.distributed as dist from rouge_score import rouge_scorer, scoring from sacrebleu import corpus_bleu from torch import nn from torch.utils.data import Dataset, Sampler from sentence_splitter import add_newline_to_end_of_each_sentence from transformers import BartTokenizer, EvalPrediction, PreTrainedTokenizer, T5Tokenizer from transformers.models.bart.modeling_bart import shift_tokens_right from transformers.utils import cached_property The provided code snippet includes necessary dependencies for implementing the `pickle_load` function. Write a Python function `def pickle_load(path)` to solve the following problem: pickle.load(path) Here is the function: def pickle_load(path): """pickle.load(path)""" with open(path, "rb") as f: return pickle.load(f)
pickle.load(path)
11,701
import itertools import json import linecache import math import os import pickle import socket from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List, Tuple, Union import git import numpy as np import torch import torch.distributed as dist from rouge_score import rouge_scorer, scoring from sacrebleu import corpus_bleu from torch import nn from torch.utils.data import Dataset, Sampler from sentence_splitter import add_newline_to_end_of_each_sentence from transformers import BartTokenizer, EvalPrediction, PreTrainedTokenizer, T5Tokenizer from transformers.models.bart.modeling_bart import shift_tokens_right from transformers.utils import cached_property def flatten_list(summary_ids: List[List]): return [x for x in itertools.chain.from_iterable(summary_ids)]
null
11,702
import itertools import json import linecache import math import os import pickle import socket from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List, Tuple, Union import git import numpy as np import torch import torch.distributed as dist from rouge_score import rouge_scorer, scoring from sacrebleu import corpus_bleu from torch import nn from torch.utils.data import Dataset, Sampler from sentence_splitter import add_newline_to_end_of_each_sentence from transformers import BartTokenizer, EvalPrediction, PreTrainedTokenizer, T5Tokenizer from transformers.models.bart.modeling_bart import shift_tokens_right from transformers.utils import cached_property def save_json(content, path, indent=4, **json_dump_kwargs): with open(path, "w") as f: json.dump(content, f, indent=indent, sort_keys=True, **json_dump_kwargs) def get_git_info(): try: repo = git.Repo(search_parent_directories=True) repo_infos = { "repo_id": str(repo), "repo_sha": str(repo.head.object.hexsha), "repo_branch": str(repo.active_branch), "hostname": str(socket.gethostname()), } return repo_infos except TypeError: return { "repo_id": None, "repo_sha": None, "repo_branch": None, "hostname": None, } The provided code snippet includes necessary dependencies for implementing the `save_git_info` function. Write a Python function `def save_git_info(folder_path: str) -> None` to solve the following problem: Save git information to output_dir/git_log.json Here is the function: def save_git_info(folder_path: str) -> None: """Save git information to output_dir/git_log.json""" repo_infos = get_git_info() save_json(repo_infos, os.path.join(folder_path, "git_log.json"))
Save git information to output_dir/git_log.json
11,703
import itertools import json import linecache import math import os import pickle import socket from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List, Tuple, Union import git import numpy as np import torch import torch.distributed as dist from rouge_score import rouge_scorer, scoring from sacrebleu import corpus_bleu from torch import nn from torch.utils.data import Dataset, Sampler from sentence_splitter import add_newline_to_end_of_each_sentence from transformers import BartTokenizer, EvalPrediction, PreTrainedTokenizer, T5Tokenizer from transformers.models.bart.modeling_bart import shift_tokens_right from transformers.utils import cached_property def freeze_params(model: nn.Module): """Set requires_grad=False for each of model.parameters()""" for par in model.parameters(): par.requires_grad = False The provided code snippet includes necessary dependencies for implementing the `freeze_embeds` function. Write a Python function `def freeze_embeds(model)` to solve the following problem: Freeze token embeddings and positional embeddings for bart, just token embeddings for t5. Here is the function: def freeze_embeds(model): """Freeze token embeddings and positional embeddings for bart, just token embeddings for t5.""" model_type = model.config.model_type if model_type in ["t5", "mt5"]: freeze_params(model.shared) for d in [model.encoder, model.decoder]: freeze_params(d.embed_tokens) elif model_type == "fsmt": for d in [model.model.encoder, model.model.decoder]: freeze_params(d.embed_positions) freeze_params(d.embed_tokens) else: freeze_params(model.model.shared) for d in [model.model.encoder, model.model.decoder]: freeze_params(d.embed_positions) freeze_params(d.embed_tokens)
Freeze token embeddings and positional embeddings for bart, just token embeddings for t5.
11,704
import itertools import json import linecache import math import os import pickle import socket from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List, Tuple, Union import git import numpy as np import torch import torch.distributed as dist from rouge_score import rouge_scorer, scoring from sacrebleu import corpus_bleu from torch import nn from torch.utils.data import Dataset, Sampler from sentence_splitter import add_newline_to_end_of_each_sentence from transformers import BartTokenizer, EvalPrediction, PreTrainedTokenizer, T5Tokenizer from transformers.models.bart.modeling_bart import shift_tokens_right from transformers.utils import cached_property def grad_status(model: nn.Module) -> Iterable: return (par.requires_grad for par in model.parameters()) def any_requires_grad(model: nn.Module) -> bool: return any(grad_status(model))
null
11,705
import itertools import json import linecache import math import os import pickle import socket from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List, Tuple, Union import git import numpy as np import torch import torch.distributed as dist from rouge_score import rouge_scorer, scoring from sacrebleu import corpus_bleu from torch import nn from torch.utils.data import Dataset, Sampler from sentence_splitter import add_newline_to_end_of_each_sentence from transformers import BartTokenizer, EvalPrediction, PreTrainedTokenizer, T5Tokenizer from transformers.models.bart.modeling_bart import shift_tokens_right from transformers.utils import cached_property def lmap(f: Callable, x: Iterable) -> List: """list(map(f, x))""" return list(map(f, x)) def grad_status(model: nn.Module) -> Iterable: return (par.requires_grad for par in model.parameters()) def assert_all_frozen(model): model_grads: List[bool] = list(grad_status(model)) n_require_grad = sum(lmap(int, model_grads)) npars = len(model_grads) assert not any(model_grads), f"{n_require_grad/npars:.1%} of {npars} weights require grad"
null
11,706
import itertools import json import linecache import math import os import pickle import socket from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List, Tuple, Union import git import numpy as np import torch import torch.distributed as dist from rouge_score import rouge_scorer, scoring from sacrebleu import corpus_bleu from torch import nn from torch.utils.data import Dataset, Sampler from sentence_splitter import add_newline_to_end_of_each_sentence from transformers import BartTokenizer, EvalPrediction, PreTrainedTokenizer, T5Tokenizer from transformers.models.bart.modeling_bart import shift_tokens_right from transformers.utils import cached_property def grad_status(model: nn.Module) -> Iterable: def assert_not_all_frozen(model): model_grads: List[bool] = list(grad_status(model)) npars = len(model_grads) assert any(model_grads), f"none of {npars} weights require grad"
null
11,707
import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import Seq2SeqDataset, pickle_save class Seq2SeqDataset(AbstractSeq2SeqDataset): """A dataset that calls prepare_seq2seq_batch.""" def __getitem__(self, index) -> Dict[str, str]: index = index + 1 # linecache starts at 1 source_line = self.prefix + linecache.getline(str(self.src_file), index).rstrip("\n") tgt_line = linecache.getline(str(self.tgt_file), index).rstrip("\n") assert source_line, f"empty source line for index {index}" assert tgt_line, f"empty tgt line for index {index}" return {"tgt_texts": tgt_line, "src_texts": source_line, "id": index - 1} def collate_fn(self, batch) -> Dict[str, torch.Tensor]: """Call prepare_seq2seq_batch.""" batch_encoding: Dict[str, torch.Tensor] = self.tokenizer.prepare_seq2seq_batch( [x["src_texts"] for x in batch], tgt_texts=[x["tgt_texts"] for x in batch], max_length=self.max_source_length, max_target_length=self.max_target_length, return_tensors="pt", **self.dataset_kwargs, ).data batch_encoding["ids"] = torch.tensor([x["id"] for x in batch]) return batch_encoding def pickle_save(obj, path): """pickle.dump(obj, path)""" with open(path, "wb") as f: return pickle.dump(obj, f) The provided code snippet includes necessary dependencies for implementing the `save_len_file` function. Write a Python function `def save_len_file( tokenizer_name, data_dir, max_source_length=1024, max_target_length=1024, consider_target=False, **kwargs )` to solve the following problem: Save max(src_len, tgt_len) for each example to allow dynamic batching. Here is the function: def save_len_file( tokenizer_name, data_dir, max_source_length=1024, max_target_length=1024, consider_target=False, **kwargs ): """Save max(src_len, tgt_len) for each example to allow dynamic batching.""" tok = AutoTokenizer.from_pretrained(tokenizer_name) train_ds = Seq2SeqDataset(tok, data_dir, max_source_length, max_target_length, type_path="train", **kwargs) pad = tok.pad_token_id def get_lens(ds): dl = tqdm( DataLoader(ds, batch_size=512, num_workers=8, shuffle=False, collate_fn=ds.collate_fn), desc=str(ds.len_file), ) max_lens = [] for batch in dl: src_lens = batch["input_ids"].ne(pad).sum(1).tolist() tgt_lens = batch["labels"].ne(pad).sum(1).tolist() if consider_target: for src, tgt in zip(src_lens, tgt_lens): max_lens.append(max(src, tgt)) else: max_lens.extend(src_lens) return max_lens train_lens = get_lens(train_ds) val_ds = Seq2SeqDataset(tok, data_dir, max_source_length, max_target_length, type_path="val", **kwargs) val_lens = get_lens(val_ds) pickle_save(train_lens, train_ds.len_file) pickle_save(val_lens, val_ds.len_file)
Save max(src_len, tgt_len) for each example to allow dynamic batching.
11,708
import fire from utils import calculate_rouge, save_json def save_json(content, path, indent=4, **json_dump_kwargs): with open(path, "w") as f: json.dump(content, f, indent=indent, sort_keys=True, **json_dump_kwargs) def calculate_rouge( pred_lns: List[str], tgt_lns: List[str], use_stemmer=True, rouge_keys=ROUGE_KEYS, return_precision_and_recall=False, bootstrap_aggregation=True, newline_sep=True, ) -> Dict: """Calculate rouge using rouge_scorer package. Args: pred_lns: list of summaries generated by model tgt_lns: list of groundtruth summaries (e.g. contents of val.target) use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes to improve matching. rouge_keys: which metrics to compute, defaults to rouge1, rouge2, rougeL, rougeLsum return_precision_and_recall: (False) whether to also return precision and recall. bootstrap_aggregation: whether to do the typical bootstrap resampling of scores. Defaults to True, if False this function returns a collections.defaultdict[metric: list of values for each observation for each subscore]`` newline_sep:(default=True) whether to add newline between sentences. This is essential for calculation rougeL on multi sentence summaries (CNN/DM dataset). Returns: Dict[score: value] if aggregate else defaultdict(list) keyed by rouge_keys """ scorer = rouge_scorer.RougeScorer(rouge_keys, use_stemmer=use_stemmer) aggregator = scoring.BootstrapAggregator() for pred, tgt in zip(tgt_lns, pred_lns): # rougeLsum expects "\n" separated sentences within a summary if newline_sep: pred = add_newline_to_end_of_each_sentence(pred) tgt = add_newline_to_end_of_each_sentence(tgt) scores = scorer.score(pred, tgt) aggregator.add_scores(scores) if bootstrap_aggregation: result = aggregator.aggregate() if return_precision_and_recall: return extract_rouge_mid_statistics(result) # here we return dict else: return {k: round(v.mid.fmeasure * 100, 4) for k, v in result.items()} else: return aggregator._scores # here we return defaultdict(list) The provided code snippet includes necessary dependencies for implementing the `calculate_rouge_path` function. Write a Python function `def calculate_rouge_path(pred_path, tgt_path, save_path=None, **kwargs)` to solve the following problem: Kwargs will be passed to calculate_rouge Here is the function: def calculate_rouge_path(pred_path, tgt_path, save_path=None, **kwargs): """Kwargs will be passed to calculate_rouge""" pred_lns = [x.strip() for x in open(pred_path).readlines()] tgt_lns = [x.strip() for x in open(tgt_path).readlines()][: len(pred_lns)] metrics = calculate_rouge(pred_lns, tgt_lns, **kwargs) if save_path is not None: save_json(metrics, save_path, indent=None) return metrics # these print nicely
Kwargs will be passed to calculate_rouge
11,709
from pathlib import Path import fire from tqdm import tqdm The provided code snippet includes necessary dependencies for implementing the `download_wmt_dataset` function. Write a Python function `def download_wmt_dataset(src_lang="ro", tgt_lang="en", dataset="wmt16", save_dir=None) -> None` to solve the following problem: Download a dataset using the datasets package and save it to the format expected by finetune.py Format of save_dir: train.source, train.target, val.source, val.target, test.source, test.target. Args: src_lang: <str> source language tgt_lang: <str> target language dataset: <str> wmt16, wmt17, etc. wmt16 is a good start as it's small. To get the full list run `import datasets; print([d.id for d in datasets.list_datasets() if "wmt" in d.id])` save_dir: <str>, where to save the datasets, defaults to f'{dataset}-{src_lang}-{tgt_lang}' Usage: >>> download_wmt_dataset('ro', 'en', dataset='wmt16') # saves to wmt16-ro-en Here is the function: def download_wmt_dataset(src_lang="ro", tgt_lang="en", dataset="wmt16", save_dir=None) -> None: """Download a dataset using the datasets package and save it to the format expected by finetune.py Format of save_dir: train.source, train.target, val.source, val.target, test.source, test.target. Args: src_lang: <str> source language tgt_lang: <str> target language dataset: <str> wmt16, wmt17, etc. wmt16 is a good start as it's small. To get the full list run `import datasets; print([d.id for d in datasets.list_datasets() if "wmt" in d.id])` save_dir: <str>, where to save the datasets, defaults to f'{dataset}-{src_lang}-{tgt_lang}' Usage: >>> download_wmt_dataset('ro', 'en', dataset='wmt16') # saves to wmt16-ro-en """ try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError("run pip install datasets") pair = f"{src_lang}-{tgt_lang}" print(f"Converting {dataset}-{pair}") ds = datasets.load_dataset(dataset, pair) if save_dir is None: save_dir = f"{dataset}-{pair}" save_dir = Path(save_dir) save_dir.mkdir(exist_ok=True) for split in ds.keys(): print(f"Splitting {split} with {ds[split].num_rows} records") # to save to val.source, val.target like summary datasets fn = "val" if split == "validation" else split src_path = save_dir.joinpath(f"{fn}.source") tgt_path = save_dir.joinpath(f"{fn}.target") src_fp = src_path.open("w+") tgt_fp = tgt_path.open("w+") # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split]): ex = x["translation"] src_fp.write(ex[src_lang] + "\n") tgt_fp.write(ex[tgt_lang] + "\n") print(f"Saved {dataset} dataset to {save_dir}")
Download a dataset using the datasets package and save it to the format expected by finetune.py Format of save_dir: train.source, train.target, val.source, val.target, test.source, test.target. Args: src_lang: <str> source language tgt_lang: <str> target language dataset: <str> wmt16, wmt17, etc. wmt16 is a good start as it's small. To get the full list run `import datasets; print([d.id for d in datasets.list_datasets() if "wmt" in d.id])` save_dir: <str>, where to save the datasets, defaults to f'{dataset}-{src_lang}-{tgt_lang}' Usage: >>> download_wmt_dataset('ro', 'en', dataset='wmt16') # saves to wmt16-ro-en
11,710
from pathlib import Path import fire The provided code snippet includes necessary dependencies for implementing the `minify` function. Write a Python function `def minify(src_dir: str, dest_dir: str, n: int)` to solve the following problem: Write first n lines of each file f in src_dir to dest_dir/f Here is the function: def minify(src_dir: str, dest_dir: str, n: int): """Write first n lines of each file f in src_dir to dest_dir/f""" src_dir = Path(src_dir) dest_dir = Path(dest_dir) dest_dir.mkdir(exist_ok=True) for path in src_dir.iterdir(): new = [x.rstrip() for x in list(path.open().readlines())][:n] dest_path = dest_dir.joinpath(path.name) print(dest_path) dest_path.open("w").write("\n".join(new))
Write first n lines of each file f in src_dir to dest_dir/f
11,711
from typing import Union import fire import torch from tqdm import tqdm The provided code snippet includes necessary dependencies for implementing the `convert` function. Write a Python function `def convert(src_path: str, map_location: str = "cpu", save_path: Union[str, None] = None) -> None` to solve the following problem: Convert a pytorch_model.bin or model.pt file to torch.float16 for faster downloads, less disk space. Here is the function: def convert(src_path: str, map_location: str = "cpu", save_path: Union[str, None] = None) -> None: """Convert a pytorch_model.bin or model.pt file to torch.float16 for faster downloads, less disk space.""" state_dict = torch.load(src_path, map_location=map_location) for k, v in tqdm(state_dict.items()): if not isinstance(v, torch.Tensor): raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin") state_dict[k] = v.half() if save_path is None: # overwrite src_path save_path = src_path torch.save(state_dict, save_path)
Convert a pytorch_model.bin or model.pt file to torch.float16 for faster downloads, less disk space.
11,712
import argparse import itertools import operator import sys from collections import OrderedDict from run_eval import datetime_now, run_generate from utils import ROUGE_KEYS task_score_names = { "translation": ["bleu"], "summarization": ROUGE_KEYS, } def parse_search_arg(search): groups = search.split() entries = {k: vs for k, vs in (g.split("=") for g in groups)} entry_names = list(entries.keys()) sets = [list(f"--{k} {v}" for v in vs.split(":")) for k, vs in entries.items()] matrix = [list(x) for x in itertools.product(*sets)] return matrix, entry_names import sys sys.path.insert(1, os.path.dirname(os.path.realpath(__file__))) def datetime_now(): return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") def run_generate(verbose=True): """ Takes input text, generates output, and then using reference calculates the BLEU scores. The results are saved to a file and returned to the caller, and printed out unless ``verbose=False`` is passed. Args: verbose (:obj:`bool`, `optional`, defaults to :obj:`True`): print results to stdout Returns: a tuple: ``(scores, params}`` - ``scores``: a dict of scores data ``{'bleu': 39.6501, 'n_obs': 2000, 'runtime': 186, 'seconds_per_sample': 0.093}`` - ``params``: a dict of custom params, e.g. ``{'num_beams': 5, 'length_penalty': 0.8}`` """ parser = argparse.ArgumentParser() parser.add_argument("model_name", type=str, help="like facebook/bart-large-cnn,t5-base, etc.") parser.add_argument("input_path", type=str, help="like cnn_dm/test.source") parser.add_argument("save_path", type=str, help="where to save summaries") parser.add_argument("--reference_path", type=str, required=False, help="like cnn_dm/test.target") parser.add_argument("--score_path", type=str, required=False, default="metrics.json", help="where to save metrics") parser.add_argument("--device", type=str, required=False, default=DEFAULT_DEVICE, help="cuda, cuda:1, cpu etc.") parser.add_argument( "--prefix", type=str, required=False, default=None, help="will be added to the begininng of src examples" ) parser.add_argument("--task", type=str, default="summarization", help="used for task_specific_params + metrics") parser.add_argument("--bs", type=int, default=8, required=False, help="batch size") parser.add_argument( "--n_obs", type=int, default=-1, required=False, help="How many observations. Defaults to all." ) parser.add_argument("--fp16", action="store_true") parser.add_argument("--dump-args", action="store_true", help="print the custom hparams with the results") parser.add_argument( "--info", nargs="?", type=str, const=datetime_now(), help=( "use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g." " lang=en-ru. If no value is passed, the current datetime string will be used." ), ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate args, rest = parser.parse_known_args() parsed_args = parse_numeric_n_bool_cl_kwargs(rest) if parsed_args and verbose: print(f"parsed the following generate kwargs: {parsed_args}") examples = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path).readlines()] if args.n_obs > 0: examples = examples[: args.n_obs] Path(args.save_path).parent.mkdir(exist_ok=True) if args.reference_path is None and Path(args.score_path).exists(): warnings.warn(f"score_path {args.score_path} will be overwritten unless you type ctrl-c.") if args.device == "cpu" and args.fp16: # this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half' raise ValueError("Can't mix --fp16 and --device cpu") runtime_metrics = generate_summaries_or_translations( examples, args.save_path, args.model_name, batch_size=args.bs, device=args.device, fp16=args.fp16, task=args.task, prefix=args.prefix, **parsed_args, ) if args.reference_path is None: return {} # Compute scores score_fn = calculate_bleu if "translation" in args.task else calculate_rouge output_lns = [x.rstrip() for x in open(args.save_path).readlines()] reference_lns = [x.rstrip() for x in open(args.reference_path).readlines()][: len(output_lns)] scores: dict = score_fn(output_lns, reference_lns) scores.update(runtime_metrics) if args.dump_args: scores.update(parsed_args) if args.info: scores["info"] = args.info if verbose: print(scores) if args.score_path is not None: json.dump(scores, open(args.score_path, "w")) return scores The provided code snippet includes necessary dependencies for implementing the `run_search` function. Write a Python function `def run_search()` to solve the following problem: Run parametric search over the desired hparam space with help of ``run_eval.py``. All the arguments except ``--search`` are passed to ``run_eval.py`` as is. The values inside of "--search" are parsed, reformatted and fed to ``run_eval.py`` as additional args. The format for the ``--search`` value is a simple string with hparams and colon separated values to try, e.g.: ``` --search "num_beams=5:10 length_penalty=0.8:1.0:1.2 early_stopping=true:false" ``` which will generate ``12`` ``(2*3*2)`` searches for a product of each hparam. For example the example that was just used will invoke ``run_eval.py`` repeatedly with: ``` --num_beams 5 --length_penalty 0.8 --early_stopping true --num_beams 5 --length_penalty 0.8 --early_stopping false [...] --num_beams 10 --length_penalty 1.2 --early_stopping false ``` On completion, this function prints a markdown table of the results sorted by the best BLEU score and the winning arguments. Here is the function: def run_search(): """ Run parametric search over the desired hparam space with help of ``run_eval.py``. All the arguments except ``--search`` are passed to ``run_eval.py`` as is. The values inside of "--search" are parsed, reformatted and fed to ``run_eval.py`` as additional args. The format for the ``--search`` value is a simple string with hparams and colon separated values to try, e.g.: ``` --search "num_beams=5:10 length_penalty=0.8:1.0:1.2 early_stopping=true:false" ``` which will generate ``12`` ``(2*3*2)`` searches for a product of each hparam. For example the example that was just used will invoke ``run_eval.py`` repeatedly with: ``` --num_beams 5 --length_penalty 0.8 --early_stopping true --num_beams 5 --length_penalty 0.8 --early_stopping false [...] --num_beams 10 --length_penalty 1.2 --early_stopping false ``` On completion, this function prints a markdown table of the results sorted by the best BLEU score and the winning arguments. """ prog = sys.argv[0] parser = argparse.ArgumentParser( usage=( "\n\nImportant: this script accepts all arguments `run_eval.py` accepts and then a few extra, therefore" " refer to `run_eval.py -h` for the complete list." ) ) parser.add_argument( "--search", type=str, required=False, help='param space to search, e.g. "num_beams=5:10 length_penalty=0.8:1.0:1.2"', ) parser.add_argument( "--bs", type=int, default=8, required=False, help="initial batch size (may get reduced if it's too big)" ) parser.add_argument("--task", type=str, help="used for task_specific_params + metrics") parser.add_argument( "--info", nargs="?", type=str, const=datetime_now(), help=( "add custom notes to be printed before the results table. If no value is passed, the current datetime" " string will be used." ), ) args, args_main = parser.parse_known_args() # we share some of the args args_main.extend(["--task", args.task]) args_normal = [prog] + args_main # to support variations like translation_en_to_de" task = "translation" if "translation" in args.task else "summarization" matrix, col_names = parse_search_arg(args.search) col_names[0:0] = task_score_names[task] # score cols first col_widths = {col: len(str(col)) for col in col_names} results = [] for r in matrix: hparams = {k: v for k, v in (x.replace("--", "").split() for x in r)} args_exp = " ".join(r).split() args_exp.extend(["--bs", str(args.bs)]) # in case we need to reduce its size due to CUDA OOM sys.argv = args_normal + args_exp # XXX: need to trap CUDA OOM and lower args.bs if that happens and retry scores = run_generate(verbose=False) # make sure scores are first in the table result = OrderedDict() for score in task_score_names[task]: result[score] = scores[score] result.update(hparams) results.append(result) # find widest entries for k, v in result.items(): l = len(str(v)) if l > col_widths[k]: col_widths[k] = l results_sorted = sorted(results, key=operator.itemgetter(*task_score_names[task]), reverse=True) print(" | ".join([f"{col:{col_widths[col]}}" for col in col_names])) print(" | ".join([f"{'-'*col_widths[col]}" for col in col_names])) for row in results_sorted: print(" | ".join([f"{row[col]:{col_widths[col]}}" for col in col_names])) best = results_sorted[0] for score in task_score_names[task]: del best[score] best_args = [f"--{k} {v}" for k, v in best.items()] dyn_args = ["--bs", str(args.bs)] if args.info: print(f"\nInfo: {args.info}") print("\nBest score args:") print(" ".join(args_main + best_args + dyn_args)) return results_sorted
Run parametric search over the desired hparam space with help of ``run_eval.py``. All the arguments except ``--search`` are passed to ``run_eval.py`` as is. The values inside of "--search" are parsed, reformatted and fed to ``run_eval.py`` as additional args. The format for the ``--search`` value is a simple string with hparams and colon separated values to try, e.g.: ``` --search "num_beams=5:10 length_penalty=0.8:1.0:1.2 early_stopping=true:false" ``` which will generate ``12`` ``(2*3*2)`` searches for a product of each hparam. For example the example that was just used will invoke ``run_eval.py`` repeatedly with: ``` --num_beams 5 --length_penalty 0.8 --early_stopping true --num_beams 5 --length_penalty 0.8 --early_stopping false [...] --num_beams 10 --length_penalty 1.2 --early_stopping false ``` On completion, this function prints a markdown table of the results sorted by the best BLEU score and the winning arguments.
11,713
import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeq2SeqLM, AutoTokenizer from utils import ( Seq2SeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) def eval_data_dir( data_dir, save_dir: str, model_name: str, bs: int = 8, max_source_length: int = 1024, type_path="val", n_obs=None, fp16=False, task="summarization", local_rank=None, num_return_sequences=1, dataset_kwargs: Dict = None, prefix="", **generate_kwargs, ) -> Dict: """Run evaluation on part of the data for one gpu and save to {save_dir}/rank_{rank}_output.json""" model_name = str(model_name) assert local_rank is not None torch.distributed.init_process_group(backend="nccl", rank=local_rank) save_dir = Path(save_dir) save_path = save_dir.joinpath(f"rank_{local_rank}_output.json") torch.cuda.set_device(local_rank) model = AutoModelForSeq2SeqLM.from_pretrained(model_name).cuda() if fp16: model = model.half() # determine if we need to increase num_beams use_task_specific_params(model, task) # update config with task specific params num_beams = generate_kwargs.pop("num_beams", model.config.num_beams) # AttributeError risk? if num_return_sequences > num_beams: num_beams = num_return_sequences tokenizer = AutoTokenizer.from_pretrained(model_name) logger.info(f"Inferred tokenizer type: {tokenizer.__class__}") # if this is wrong, check config.model_type. if max_source_length is None: max_source_length = tokenizer.model_max_length if prefix is None: prefix = prefix or getattr(model.config, "prefix", "") or "" ds = Seq2SeqDataset( tokenizer, data_dir, max_source_length, max_target_length=1024, type_path=type_path, n_obs=n_obs, prefix=prefix, **dataset_kwargs, ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. sampler = ds.make_sortish_sampler(bs, distributed=True, add_extra_examples=False, shuffle=True) data_loader = DataLoader(ds, sampler=sampler, batch_size=bs, collate_fn=ds.collate_fn) results = [] for batch in tqdm(data_loader): summaries = model.generate( input_ids=batch["input_ids"].to(model.device), attention_mask=batch["attention_mask"].to(model.device), num_return_sequences=num_return_sequences, num_beams=num_beams, **generate_kwargs, ) preds = tokenizer.batch_decode(summaries, skip_special_tokens=True, clean_up_tokenization_spaces=False) ids = batch["ids"] if num_return_sequences > 1: preds = chunks(preds, num_return_sequences) # batch size chunks, each of size num_return_seq for i, pred in enumerate(preds): results.append(dict(pred=pred, id=ids[i].item())) save_json(results, save_path) return results, sampler.num_replicas def combine_partial_results(partial_results) -> List: """Concatenate partial results into one file, then sort it by id.""" records = [] for partial_result in partial_results: records.extend(partial_result) records = list(sorted(records, key=lambda x: x["id"])) preds = [x["pred"] for x in records] return preds def gather_results_from_each_node(num_replicas, save_dir, timeout) -> List[Dict[str, List]]: # WAIT FOR lots of .json files start_wait = time.time() logger.info("waiting for all nodes to finish") json_data = None while (time.time() - start_wait) < timeout: json_files = list(save_dir.glob("rank_*.json")) if len(json_files) < num_replicas: continue try: # make sure all json files are fully saved json_data = lmap(load_json, json_files) return json_data except JSONDecodeError: continue else: raise TimeoutError("Rank 0 gave up on waiting for other processes") # Unreachable def calculate_bleu(output_lns, refs_lns, **kwargs) -> dict: """Uses sacrebleu's corpus_bleu implementation.""" return {"bleu": round(corpus_bleu(output_lns, [refs_lns], **kwargs).score, 4)} def save_json(content, path, indent=4, **json_dump_kwargs): with open(path, "w") as f: json.dump(content, f, indent=indent, sort_keys=True, **json_dump_kwargs) def calculate_rouge( pred_lns: List[str], tgt_lns: List[str], use_stemmer=True, rouge_keys=ROUGE_KEYS, return_precision_and_recall=False, bootstrap_aggregation=True, newline_sep=True, ) -> Dict: """Calculate rouge using rouge_scorer package. Args: pred_lns: list of summaries generated by model tgt_lns: list of groundtruth summaries (e.g. contents of val.target) use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes to improve matching. rouge_keys: which metrics to compute, defaults to rouge1, rouge2, rougeL, rougeLsum return_precision_and_recall: (False) whether to also return precision and recall. bootstrap_aggregation: whether to do the typical bootstrap resampling of scores. Defaults to True, if False this function returns a collections.defaultdict[metric: list of values for each observation for each subscore]`` newline_sep:(default=True) whether to add newline between sentences. This is essential for calculation rougeL on multi sentence summaries (CNN/DM dataset). Returns: Dict[score: value] if aggregate else defaultdict(list) keyed by rouge_keys """ scorer = rouge_scorer.RougeScorer(rouge_keys, use_stemmer=use_stemmer) aggregator = scoring.BootstrapAggregator() for pred, tgt in zip(tgt_lns, pred_lns): # rougeLsum expects "\n" separated sentences within a summary if newline_sep: pred = add_newline_to_end_of_each_sentence(pred) tgt = add_newline_to_end_of_each_sentence(tgt) scores = scorer.score(pred, tgt) aggregator.add_scores(scores) if bootstrap_aggregation: result = aggregator.aggregate() if return_precision_and_recall: return extract_rouge_mid_statistics(result) # here we return dict else: return {k: round(v.mid.fmeasure * 100, 4) for k, v in result.items()} else: return aggregator._scores # here we return defaultdict(list) def parse_numeric_n_bool_cl_kwargs(unparsed_args: List[str]) -> Dict[str, Union[int, float, bool]]: """ Parse an argv list of unspecified command line args to a dict. Assumes all values are either numeric or boolean in the form of true/false. """ result = {} assert len(unparsed_args) % 2 == 0, f"got odd number of unparsed args: {unparsed_args}" num_pairs = len(unparsed_args) // 2 for pair_num in range(num_pairs): i = 2 * pair_num assert unparsed_args[i].startswith("--") if unparsed_args[i + 1].lower() == "true": value = True elif unparsed_args[i + 1].lower() == "false": value = False else: try: value = int(unparsed_args[i + 1]) except ValueError: value = float(unparsed_args[i + 1]) # this can raise another informative ValueError result[unparsed_args[i][2:]] = value return result def write_txt_file(ordered_tgt, path): f = Path(path).open("w") for ln in ordered_tgt: f.write(ln + "\n") f.flush() def run_generate(): parser = argparse.ArgumentParser( epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" ) parser.add_argument("--data_dir", type=str, help="like cnn_dm/test.source") parser.add_argument( "--model_name", type=str, help="like facebook/bart-large-cnn,t5-base, etc.", default="sshleifer/distilbart-xsum-12-3", ) parser.add_argument("--save_dir", type=str, help="where to save", default="tmp_gen") parser.add_argument("--max_source_length", type=int, default=None) parser.add_argument( "--type_path", type=str, default="test", help="which subset to evaluate typically train/val/test" ) parser.add_argument("--task", type=str, default="summarization", help="used for task_specific_params + metrics") parser.add_argument("--bs", type=int, default=8, required=False, help="batch size") parser.add_argument( "--local_rank", type=int, default=-1, required=False, help="should be passed by distributed.launch" ) parser.add_argument( "--n_obs", type=int, default=None, required=False, help="How many observations. Defaults to all." ) parser.add_argument( "--num_return_sequences", type=int, default=1, required=False, help="How many sequences to return" ) parser.add_argument( "--sync_timeout", type=int, default=600, required=False, help="How long should master process wait for other processes to finish.", ) parser.add_argument("--src_lang", type=str, default=None, required=False) parser.add_argument("--tgt_lang", type=str, default=None, required=False) parser.add_argument( "--prefix", type=str, required=False, default=None, help="will be added to the begininng of src examples" ) parser.add_argument("--fp16", action="store_true") parser.add_argument("--debug", action="store_true") start_time = time.time() args, rest = parser.parse_known_args() generate_kwargs = parse_numeric_n_bool_cl_kwargs(rest) if generate_kwargs and args.local_rank <= 0: print(f"parsed the following generate kwargs: {generate_kwargs}") json_save_dir = Path(args.save_dir + "_tmp") Path(json_save_dir).mkdir(exist_ok=True) # this handles locking. intermediate_files = list(json_save_dir.glob("rank_*.json")) if intermediate_files: raise ValueError(f"Found files at {json_save_dir} please move or remove them.") # In theory, a node could finish and save before another node hits this. If this happens, we can address later. dataset_kwargs = {} if args.src_lang is not None: dataset_kwargs["src_lang"] = args.src_lang if args.tgt_lang is not None: dataset_kwargs["tgt_lang"] = args.tgt_lang Path(args.save_dir).mkdir(exist_ok=True) results, num_replicas = eval_data_dir( args.data_dir, json_save_dir, args.model_name, type_path=args.type_path, bs=args.bs, fp16=args.fp16, task=args.task, local_rank=args.local_rank, n_obs=args.n_obs, max_source_length=args.max_source_length, num_return_sequences=args.num_return_sequences, prefix=args.prefix, dataset_kwargs=dataset_kwargs, **generate_kwargs, ) if args.local_rank <= 0: save_dir = Path(args.save_dir) save_dir.mkdir(exist_ok=True) partial_results = gather_results_from_each_node(num_replicas, json_save_dir, args.sync_timeout) preds = combine_partial_results(partial_results) if args.num_return_sequences > 1: save_path = save_dir.joinpath("pseudolabel_results.json") print(f"Saving aggregated results at {save_path}, intermediate in {json_save_dir}/") save_json(preds, save_path) return tgt_file = Path(args.data_dir).joinpath(args.type_path + ".target") with open(tgt_file) as f: labels = [x.rstrip() for x in f.readlines()][: len(preds)] # Calculate metrics, save metrics, and save _generations.txt calc_bleu = "translation" in args.task score_fn = calculate_bleu if calc_bleu else calculate_rouge metric_name = "bleu" if calc_bleu else "rouge" metrics: Dict = score_fn(preds, labels) metrics["n_obs"] = len(preds) runtime = time.time() - start_time metrics["seconds_per_sample"] = round(runtime / metrics["n_obs"], 4) metrics["n_gpus"] = num_replicas # TODO(@stas00): add whatever metadata to metrics metrics_save_path = save_dir.joinpath(f"{args.type_path}_{metric_name}.json") save_json(metrics, metrics_save_path, indent=None) print(metrics) write_txt_file(preds, save_dir.joinpath(f"{args.type_path}_generations.txt")) if args.debug: write_txt_file(labels, save_dir.joinpath(f"{args.type_path}.target")) else: shutil.rmtree(json_save_dir)
null
11,714
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp The provided code snippet includes necessary dependencies for implementing the `parse_args` function. Write a Python function `def parse_args()` to solve the following problem: Helper function parsing the command line options @retval ArgumentParser Here is the function: def parse_args(): """ Helper function parsing the command line options @retval ArgumentParser """ parser = ArgumentParser( description=( "PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes" ) ) # Optional arguments for the launch helper parser.add_argument("--num_cores", type=int, default=1, help="Number of TPU cores to use (1 or 8).") # positional parser.add_argument( "training_script", type=str, help=( "The full path to the single TPU training " "program/script to be launched in parallel, " "followed by all the arguments for the " "training script" ), ) # rest from the training program parser.add_argument("training_script_args", nargs=REMAINDER) return parser.parse_args()
Helper function parsing the command line options @retval ArgumentParser
11,715
import fire from transformers import AutoConfig, AutoModelForSeq2SeqLM, AutoTokenizer The provided code snippet includes necessary dependencies for implementing the `save_randomly_initialized_version` function. Write a Python function `def save_randomly_initialized_version(config_name: str, save_dir: str, **config_kwargs)` to solve the following problem: Save a randomly initialized version of a model using a pretrained config. Args: config_name: which config to use save_dir: where to save the resulting model and tokenizer config_kwargs: Passed to AutoConfig Usage:: save_randomly_initialized_version("facebook/bart-large-cnn", "distilbart_random_cnn_6_3", encoder_layers=6, decoder_layers=3, num_beams=3) Here is the function: def save_randomly_initialized_version(config_name: str, save_dir: str, **config_kwargs): """Save a randomly initialized version of a model using a pretrained config. Args: config_name: which config to use save_dir: where to save the resulting model and tokenizer config_kwargs: Passed to AutoConfig Usage:: save_randomly_initialized_version("facebook/bart-large-cnn", "distilbart_random_cnn_6_3", encoder_layers=6, decoder_layers=3, num_beams=3) """ cfg = AutoConfig.from_pretrained(config_name, **config_kwargs) model = AutoModelForSeq2SeqLM.from_config(cfg) model.save_pretrained(save_dir) AutoTokenizer.from_pretrained(config_name).save_pretrained(save_dir) return model
Save a randomly initialized version of a model using a pretrained config. Args: config_name: which config to use save_dir: where to save the resulting model and tokenizer config_kwargs: Passed to AutoConfig Usage:: save_randomly_initialized_version("facebook/bart-large-cnn", "distilbart_random_cnn_6_3", encoder_layers=6, decoder_layers=3, num_beams=3)
11,716
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import datasets import numpy as np import tensorflow as tf from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, HfArgumentParser, PreTrainedTokenizer, TFAutoModelForSequenceClassification, TFTrainer, TFTrainingArguments, ) from transformers.utils import logging as hf_logging def get_tfds( train_file: str, eval_file: str, test_file: str, tokenizer: PreTrainedTokenizer, label_column_id: int, max_seq_length: Optional[int] = None, ): files = {} if train_file is not None: files[datasets.Split.TRAIN] = [train_file] if eval_file is not None: files[datasets.Split.VALIDATION] = [eval_file] if test_file is not None: files[datasets.Split.TEST] = [test_file] ds = datasets.load_dataset("csv", data_files=files) features_name = list(ds[list(files.keys())[0]].features.keys()) label_name = features_name.pop(label_column_id) label_list = list(set(ds[list(files.keys())[0]][label_name])) label2id = {label: i for i, label in enumerate(label_list)} input_names = tokenizer.model_input_names transformed_ds = {} if len(features_name) == 1: for k in files.keys(): transformed_ds[k] = ds[k].map( lambda example: tokenizer.batch_encode_plus( example[features_name[0]], truncation=True, max_length=max_seq_length, padding="max_length" ), batched=True, ) elif len(features_name) == 2: for k in files.keys(): transformed_ds[k] = ds[k].map( lambda example: tokenizer.batch_encode_plus( (example[features_name[0]], example[features_name[1]]), truncation=True, max_length=max_seq_length, padding="max_length", ), batched=True, ) def gen_train(): for ex in transformed_ds[datasets.Split.TRAIN]: d = {k: v for k, v in ex.items() if k in input_names} label = label2id[ex[label_name]] yield (d, label) def gen_val(): for ex in transformed_ds[datasets.Split.VALIDATION]: d = {k: v for k, v in ex.items() if k in input_names} label = label2id[ex[label_name]] yield (d, label) def gen_test(): for ex in transformed_ds[datasets.Split.TEST]: d = {k: v for k, v in ex.items() if k in input_names} label = label2id[ex[label_name]] yield (d, label) train_ds = ( tf.data.Dataset.from_generator( gen_train, ({k: tf.int32 for k in input_names}, tf.int64), ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])), ) if datasets.Split.TRAIN in transformed_ds else None ) if train_ds is not None: train_ds = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN]))) val_ds = ( tf.data.Dataset.from_generator( gen_val, ({k: tf.int32 for k in input_names}, tf.int64), ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])), ) if datasets.Split.VALIDATION in transformed_ds else None ) if val_ds is not None: val_ds = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION]))) test_ds = ( tf.data.Dataset.from_generator( gen_test, ({k: tf.int32 for k in input_names}, tf.int64), ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])), ) if datasets.Split.TEST in transformed_ds else None ) if test_ds is not None: test_ds = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST]))) return train_ds, val_ds, test_ds, label2id
null
11,717
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process from utils_multiple_choice import MultipleChoiceDataset, Split, processors def simple_accuracy(preds, labels): return (preds == labels).mean()
null
11,718
import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process from utils_multiple_choice import MultipleChoiceDataset, Split, processors def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) model_args, data_args, training_args = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use" " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fp16, ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s", training_args) # Set seed set_seed(training_args.seed) try: processor = processors[data_args.task_name]() label_list = processor.get_labels() num_labels = len(label_list) except KeyError: raise ValueError("Task not found: %s" % (data_args.task_name)) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) model = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, ) # Get datasets train_dataset = ( MultipleChoiceDataset( data_dir=data_args.data_dir, tokenizer=tokenizer, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, ) if training_args.do_train else None ) eval_dataset = ( MultipleChoiceDataset( data_dir=data_args.data_dir, tokenizer=tokenizer, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, ) if training_args.do_eval else None ) def compute_metrics(p: EvalPrediction) -> Dict: preds = np.argmax(p.predictions, axis=1) return {"acc": simple_accuracy(preds, p.label_ids)} # Data collator data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) if training_args.fp16 else None # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, compute_metrics=compute_metrics, data_collator=data_collator, ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir) # Evaluation results = {} if training_args.do_eval: logger.info("*** Evaluate ***") result = trainer.evaluate() output_eval_file = os.path.join(training_args.output_dir, "eval_results.txt") if trainer.is_world_master(): with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key, value in result.items(): logger.info(" %s = %s", key, value) writer.write("%s = %s\n" % (key, value)) results.update(result) return results def _mp_fn(index): # For xla_spawn (TPUs) main()
null
11,719
import csv import glob import json import logging import os from dataclasses import dataclass from enum import Enum from typing import List, Optional import tqdm from filelock import FileLock from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available logger = logging.getLogger(__name__) class InputExample: """ A single training/test example for multiple choice Args: example_id: Unique id for the example. question: string. The untokenized text of the second sequence (question). contexts: list of str. The untokenized text of the first sequence (context of corresponding question). endings: list of str. multiple choice's options. Its length must be equal to contexts' length. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ example_id: str question: str contexts: List[str] endings: List[str] label: Optional[str] class InputFeatures: """ A single set of features of data. Property names are the same names as the corresponding inputs to a model. """ example_id: str input_ids: List[List[int]] attention_mask: Optional[List[List[int]]] token_type_ids: Optional[List[List[int]]] label: Optional[int] The provided code snippet includes necessary dependencies for implementing the `convert_examples_to_features` function. Write a Python function `def convert_examples_to_features( examples: List[InputExample], label_list: List[str], max_length: int, tokenizer: PreTrainedTokenizer, ) -> List[InputFeatures]` to solve the following problem: Loads a data file into a list of `InputFeatures` Here is the function: def convert_examples_to_features( examples: List[InputExample], label_list: List[str], max_length: int, tokenizer: PreTrainedTokenizer, ) -> List[InputFeatures]: """ Loads a data file into a list of `InputFeatures` """ label_map = {label: i for i, label in enumerate(label_list)} features = [] for ex_index, example in tqdm.tqdm(enumerate(examples), desc="convert examples to features"): if ex_index % 10000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(examples))) choices_inputs = [] for ending_idx, (context, ending) in enumerate(zip(example.contexts, example.endings)): text_a = context if example.question.find("_") != -1: # this is for cloze question text_b = example.question.replace("_", ending) else: text_b = example.question + " " + ending inputs = tokenizer( text_a, text_b, add_special_tokens=True, max_length=max_length, padding="max_length", truncation=True, return_overflowing_tokens=True, ) if "num_truncated_tokens" in inputs and inputs["num_truncated_tokens"] > 0: logger.info( "Attention! you are cropping tokens (swag task is ok). " "If you are training ARC and RACE and you are poping question + options," "you need to try to use a bigger max seq length!" ) choices_inputs.append(inputs) label = label_map[example.label] input_ids = [x["input_ids"] for x in choices_inputs] attention_mask = ( [x["attention_mask"] for x in choices_inputs] if "attention_mask" in choices_inputs[0] else None ) token_type_ids = ( [x["token_type_ids"] for x in choices_inputs] if "token_type_ids" in choices_inputs[0] else None ) features.append( InputFeatures( example_id=example.example_id, input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, label=label, ) ) for f in features[:2]: logger.info("*** Example ***") logger.info("feature: %s" % f) return features
Loads a data file into a list of `InputFeatures`
11,720
import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, f1_score, precision_score, recall_score from torch import nn import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask def main(): def _mp_fn(index): # For xla_spawn (TPUs) main()
null
11,721
import torch from transformers import CamembertForMaskedLM, CamembertTokenizer def fill_mask(masked_input, model, tokenizer, topk=5): # Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py assert masked_input.count("<mask>") == 1 input_ids = torch.tensor(tokenizer.encode(masked_input, add_special_tokens=True)).unsqueeze(0) # Batch size 1 logits = model(input_ids)[0] # The last hidden-state is the first element of the output tuple masked_index = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item() logits = logits[0, masked_index, :] prob = logits.softmax(dim=0) values, indices = prob.topk(k=topk, dim=0) topk_predicted_token_bpe = " ".join( [tokenizer.convert_ids_to_tokens(indices[i].item()) for i in range(len(indices))] ) masked_token = tokenizer.mask_token topk_filled_outputs = [] for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(" ")): predicted_token = predicted_token_bpe.replace("\u2581", " ") if " {0}".format(masked_token) in masked_input: topk_filled_outputs.append( ( masked_input.replace(" {0}".format(masked_token), predicted_token), values[index].item(), predicted_token, ) ) else: topk_filled_outputs.append( ( masked_input.replace(masked_token, predicted_token), values[index].item(), predicted_token, ) ) return topk_filled_outputs
null
11,722
import argparse import glob import logging import os import random import timeit import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange import transformers from transformers import ( MODEL_FOR_QUESTION_ANSWERING_MAPPING, WEIGHTS_NAME, AdamW, AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer, get_linear_schedule_with_warmup, squad_convert_examples_to_features, ) from transformers.data.metrics.squad_metrics import ( compute_predictions_log_probs, compute_predictions_logits, squad_evaluate, ) from transformers.data.processors.squad import SquadResult, SquadV1Processor, SquadV2Processor from transformers.trainer_utils import is_main_process try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter logger = logging.getLogger(__name__) def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def evaluate(args, model, tokenizer, prefix=""): dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(dataset) eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu evaluate if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel): model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", args.eval_batch_size) all_results = [] start_time = timeit.default_timer() for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = { "input_ids": batch[0], "attention_mask": batch[1], "token_type_ids": batch[2], } if args.model_type in ["xlm", "roberta", "distilbert", "camembert", "bart", "longformer"]: del inputs["token_type_ids"] feature_indices = batch[3] # XLNet and XLM use more arguments for their predictions if args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": batch[4], "p_mask": batch[5]}) # for lang_id-sensitive xlm models if hasattr(model, "config") and hasattr(model.config, "lang2id"): inputs.update( {"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)} ) outputs = model(**inputs) for i, feature_index in enumerate(feature_indices): eval_feature = features[feature_index.item()] unique_id = int(eval_feature.unique_id) output = [to_list(output[i]) for output in outputs.to_tuple()] # Some models (XLNet, XLM) use 5 arguments for their predictions, while the other "simpler" # models only use two. if len(output) >= 5: start_logits = output[0] start_top_index = output[1] end_logits = output[2] end_top_index = output[3] cls_logits = output[4] result = SquadResult( unique_id, start_logits, end_logits, start_top_index=start_top_index, end_top_index=end_top_index, cls_logits=cls_logits, ) else: start_logits, end_logits = output result = SquadResult(unique_id, start_logits, end_logits) all_results.append(result) evalTime = timeit.default_timer() - start_time logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset)) # Compute predictions output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix)) output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix)) if args.version_2_with_negative: output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix)) else: output_null_log_odds_file = None # XLNet and XLM use a more complex post-processing procedure if args.model_type in ["xlnet", "xlm"]: start_n_top = model.config.start_n_top if hasattr(model, "config") else model.module.config.start_n_top end_n_top = model.config.end_n_top if hasattr(model, "config") else model.module.config.end_n_top predictions = compute_predictions_log_probs( examples, features, all_results, args.n_best_size, args.max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, start_n_top, end_n_top, args.version_2_with_negative, tokenizer, args.verbose_logging, ) else: predictions = compute_predictions_logits( examples, features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.verbose_logging, args.version_2_with_negative, args.null_score_diff_threshold, tokenizer, ) # Compute the F1 and exact scores. results = squad_evaluate(examples, predictions) return results The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(args, train_dataset, model, tokenizer)` to solve the following problem: Train the model Here is the function: def train(args, train_dataset, model, tokenizer): """Train the model""" if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) # Check if saved optimizer or scheduler states exist if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile( os.path.join(args.model_name_or_path, "scheduler.pt") ): # Load in optimizer and scheduler states optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))) scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True ) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 1 epochs_trained = 0 steps_trained_in_current_epoch = 0 # Check if continuing training from a checkpoint if os.path.exists(args.model_name_or_path): try: # set global_step to gobal_step of last saved checkpoint from model path checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0] global_step = int(checkpoint_suffix) epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps) steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps) logger.info(" Continuing training from checkpoint, will skip to saved global_step") logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", global_step) logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) except ValueError: logger.info(" Starting fine-tuning.") tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange( epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0] ) # Added here for reproductibility set_seed(args) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue model.train() batch = tuple(t.to(args.device) for t in batch) inputs = { "input_ids": batch[0], "attention_mask": batch[1], "token_type_ids": batch[2], "start_positions": batch[3], "end_positions": batch[4], } if args.model_type in ["xlm", "roberta", "distilbert", "camembert", "bart", "longformer"]: del inputs["token_type_ids"] if args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": batch[5], "p_mask": batch[6]}) if args.version_2_with_negative: inputs.update({"is_impossible": batch[7]}) if hasattr(model, "config") and hasattr(model.config, "lang2id"): inputs.update( {"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)} ) outputs = model(**inputs) # model outputs are always tuple in transformers (see doc) loss = outputs[0] if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 # Log metrics if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Only evaluate when single GPU otherwise metrics may not average well if args.local_rank == -1 and args.evaluate_during_training: results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar("eval_{}".format(key), value, global_step) tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step) tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step) logging_loss = tr_loss # Save model checkpoint if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) # Take care of distributed/parallel training model_to_save = model.module if hasattr(model, "module") else model model_to_save.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) logger.info("Saving optimizer and scheduler states to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step
Train the model
11,723
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import transformers from transformers import ( AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer, DataCollatorWithPadding, HfArgumentParser, SquadDataset, ) from transformers import SquadDataTrainingArguments as DataTrainingArguments from transformers import Trainer, TrainingArguments from transformers.trainer_utils import is_main_process def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use" " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fp16, ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s", training_args) # Prepare Question-Answering task # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=False, # SquadDataset is not compatible with Fast tokenizers which have a smarter overflow handeling ) model = AutoModelForQuestionAnswering.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, ) # Get datasets is_language_sensitive = hasattr(model.config, "lang2id") train_dataset = ( SquadDataset( data_args, tokenizer=tokenizer, is_language_sensitive=is_language_sensitive, cache_dir=model_args.cache_dir ) if training_args.do_train else None ) eval_dataset = ( SquadDataset( data_args, tokenizer=tokenizer, mode="dev", is_language_sensitive=is_language_sensitive, cache_dir=model_args.cache_dir, ) if training_args.do_eval else None ) # Data collator data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) if training_args.fp16 else None # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, data_collator=data_collator, ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir) def _mp_fn(index): # For xla_spawn (TPUs) main()
null
11,724
import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def _is_chinese_char(cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False def get_chinese_word(tokens: List[str]): word_set = set() for token in tokens: chinese_word = len(token) > 1 and is_chinese(token) if chinese_word: word_set.add(token) word_list = list(word_set) return word_list def add_sub_symbol(bert_tokens: List[str], chinese_word_set: set()): if not chinese_word_set: return bert_tokens max_word_len = max([len(w) for w in chinese_word_set]) bert_word = bert_tokens start, end = 0, len(bert_word) while start < end: single_word = True if is_chinese(bert_word[start]): l = min(end - start, max_word_len) for i in range(l, 1, -1): whole_word = "".join(bert_word[start : start + i]) if whole_word in chinese_word_set: for j in range(start + 1, start + i): bert_word[j] = "##" + bert_word[j] start = start + i single_word = False break if single_word: start += 1 return bert_word def prepare_ref(lines: List[str], ltp_tokenizer: LTP, bert_tokenizer: BertTokenizer): ltp_res = [] for i in range(0, len(lines), 100): res = ltp_tokenizer.seg(lines[i : i + 100])[0] res = [get_chinese_word(r) for r in res] ltp_res.extend(res) assert len(ltp_res) == len(lines) bert_res = [] for i in range(0, len(lines), 100): res = bert_tokenizer(lines[i : i + 100], add_special_tokens=True, truncation=True, max_length=512) bert_res.extend(res["input_ids"]) assert len(bert_res) == len(lines) ref_ids = [] for input_ids, chinese_word in zip(bert_res, ltp_res): input_tokens = [] for id in input_ids: token = bert_tokenizer._convert_id_to_token(id) input_tokens.append(token) input_tokens = add_sub_symbol(input_tokens, chinese_word) ref_id = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(input_tokens): if token[:2] == "##": clean_token = token[2:] # save chinese tokens' pos if len(clean_token) == 1 and _is_chinese_char(ord(clean_token)): ref_id.append(i) ref_ids.append(ref_id) assert len(ref_ids) == len(bert_res) return ref_ids
null
11,725
import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ train_data_file: Optional[str] = field( default=None, metadata={"help": "The input training data file (a text file)."} ) train_data_files: Optional[str] = field( default=None, metadata={ "help": ( "The input training data files (multiple files in glob format). " "Very often splitting large files to smaller files can prevent tokenizer going out of memory" ) }, ) eval_data_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) train_ref_file: Optional[str] = field( default=None, metadata={"help": "An optional input train ref data file for whole word mask in Chinese."}, ) eval_ref_file: Optional[str] = field( default=None, metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."}, ) line_by_line: bool = field( default=False, metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."}, ) mlm: bool = field( default=False, metadata={"help": "Train with masked-language modeling loss instead of language modeling."} ) whole_word_mask: bool = field(default=False, metadata={"help": "Whether ot not to use whole word mask."}) mlm_probability: float = field( default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} ) plm_probability: float = field( default=1 / 6, metadata={ "help": ( "Ratio of length of a span of masked tokens to surrounding context length for permutation language" " modeling." ) }, ) max_span_length: int = field( default=5, metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} ) block_size: int = field( default=-1, metadata={ "help": ( "Optional input sequence length after tokenization." "The training dataset will be truncated in block of this size for training." "Default to the model max input length for single sentence inputs (take into account special tokens)." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) def get_dataset( args: DataTrainingArguments, tokenizer: PreTrainedTokenizer, evaluate: bool = False, cache_dir: Optional[str] = None, ): def _dataset(file_path, ref_path=None): if args.line_by_line: if ref_path is not None: if not args.whole_word_mask or not args.mlm: raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask") return LineByLineWithRefDataset( tokenizer=tokenizer, file_path=file_path, block_size=args.block_size, ref_path=ref_path, ) return LineByLineTextDataset(tokenizer=tokenizer, file_path=file_path, block_size=args.block_size) else: return TextDataset( tokenizer=tokenizer, file_path=file_path, block_size=args.block_size, overwrite_cache=args.overwrite_cache, cache_dir=cache_dir, ) if evaluate: return _dataset(args.eval_data_file, args.eval_ref_file) elif args.train_data_files: return ConcatDataset([_dataset(f) for f in glob(args.train_data_files)]) else: return _dataset(args.train_data_file, args.train_ref_file)
null
11,726
import logging import math import os from dataclasses import dataclass, field from glob import glob from typing import Optional from torch.utils.data import ConcatDataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoConfig, AutoModelWithLMHead, AutoTokenizer, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForWholeWordMask, HfArgumentParser, LineByLineTextDataset, LineByLineWithRefDataset, PreTrainedTokenizer, TextDataset, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process def main(): def _mp_fn(index): # For xla_spawn (TPUs) main()
null
11,727
import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version def add_generic_args(parser, root_dir) -> None: # To allow all pl args uncomment the following line # parser = pl.Trainer.add_argparse_args(parser) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O2", help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ), ) parser.add_argument("--n_tpu_cores", dest="tpu_cores", type=int) parser.add_argument("--max_grad_norm", dest="gradient_clip_val", default=1.0, type=float, help="Max gradient norm") parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.") parser.add_argument( "--gradient_accumulation_steps", dest="accumulate_grad_batches", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument( "--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.", )
null
11,728
import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version class BaseTransformer(pl.LightningModule): def __init__( self, hparams: argparse.Namespace, num_labels=None, mode="base", config=None, tokenizer=None, model=None, **config_kwargs ): """Initialize a model, tokenizer and config.""" super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(hparams) self.step_count = 0 self.output_dir = Path(self.hparams.output_dir) cache_dir = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: self.config = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path, **({"num_labels": num_labels} if num_labels is not None else {}), cache_dir=cache_dir, **config_kwargs, ) else: self.config: PretrainedConfig = config extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: if getattr(self.hparams, p, None): assert hasattr(self.config, p), f"model config doesn't have a `{p}` attribute" setattr(self.config, p, getattr(self.hparams, p)) if tokenizer is None: self.tokenizer = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path, cache_dir=cache_dir, ) else: self.tokenizer: PreTrainedTokenizer = tokenizer self.model_type = MODEL_MODES[mode] if model is None: self.model = self.model_type.from_pretrained( self.hparams.model_name_or_path, from_tf=bool(".ckpt" in self.hparams.model_name_or_path), config=self.config, cache_dir=cache_dir, ) else: self.model = model def load_hf_checkpoint(self, *args, **kwargs): self.model = self.model_type.from_pretrained(*args, **kwargs) def get_lr_scheduler(self): get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler] scheduler = get_schedule_func( self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps() ) scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1} return scheduler def configure_optimizers(self): """Prepare optimizer and schedule (linear warmup and decay)""" model = self.model no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": self.hparams.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] if self.hparams.adafactor: optimizer = Adafactor( optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False ) else: optimizer = AdamW( optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon ) self.opt = optimizer scheduler = self.get_lr_scheduler() return [optimizer], [scheduler] def test_step(self, batch, batch_nb): return self.validation_step(batch, batch_nb) def test_epoch_end(self, outputs): return self.validation_end(outputs) def total_steps(self) -> int: """The number of total training steps that will be run. Used for lr scheduler purposes.""" num_devices = max(1, self.hparams.gpus) # TODO: consider num_tpu_cores effective_batch_size = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def setup(self, mode): if mode == "test": self.dataset_size = len(self.test_dataloader().dataset) else: self.train_loader = self.get_dataloader("train", self.hparams.train_batch_size, shuffle=True) self.dataset_size = len(self.train_dataloader().dataset) def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False): raise NotImplementedError("You must implement this for your task") def train_dataloader(self): return self.train_loader def val_dataloader(self): return self.get_dataloader("dev", self.hparams.eval_batch_size, shuffle=False) def test_dataloader(self): return self.get_dataloader("test", self.hparams.eval_batch_size, shuffle=False) def _feature_file(self, mode): return os.path.join( self.hparams.data_dir, "cached_{}_{}_{}".format( mode, list(filter(None, self.hparams.model_name_or_path.split("/"))).pop(), str(self.hparams.max_seq_length), ), ) def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None: save_path = self.output_dir.joinpath("best_tfmr") self.model.config.save_step = self.step_count self.model.save_pretrained(save_path) self.tokenizer.save_pretrained(save_path) def add_model_specific_args(parser, root_dir): parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" ) parser.add_argument( "--tokenizer_name", default=None, type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from huggingface.co", ) parser.add_argument( "--encoder_layerdrop", type=float, help="Encoder layer dropout probability (Optional). Goes into model.config", ) parser.add_argument( "--decoder_layerdrop", type=float, help="Decoder layer dropout probability (Optional). Goes into model.config", ) parser.add_argument( "--dropout", type=float, help="Dropout probability (Optional). Goes into model.config", ) parser.add_argument( "--attention_dropout", type=float, help="Attention dropout probability (Optional). Goes into model.config", ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument( "--lr_scheduler", default="linear", choices=arg_to_scheduler_choices, metavar=arg_to_scheduler_metavar, type=str, help="Learning rate scheduler", ) parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--num_workers", default=4, type=int, help="kwarg passed to DataLoader") parser.add_argument("--num_train_epochs", dest="max_epochs", default=3, type=int) parser.add_argument("--train_batch_size", default=32, type=int) parser.add_argument("--eval_batch_size", default=32, type=int) parser.add_argument("--adafactor", action="store_true") class LoggingCallback(pl.Callback): def on_batch_end(self, trainer, pl_module): lr_scheduler = trainer.lr_schedulers[0]["scheduler"] lrs = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr())} pl_module.logger.log_metrics(lrs) def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): rank_zero_info("***** Validation results *****") metrics = trainer.callback_metrics # Log results for key in sorted(metrics): if key not in ["log", "progress_bar"]: rank_zero_info("{} = {}\n".format(key, str(metrics[key]))) def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): rank_zero_info("***** Test results *****") metrics = trainer.callback_metrics # Log and save results to file output_test_results_file = os.path.join(pl_module.hparams.output_dir, "test_results.txt") with open(output_test_results_file, "w") as writer: for key in sorted(metrics): if key not in ["log", "progress_bar"]: rank_zero_info("{} = {}\n".format(key, str(metrics[key]))) writer.write("{} = {}\n".format(key, str(metrics[key]))) def generic_train( model: BaseTransformer, args: argparse.Namespace, early_stopping_callback=None, logger=True, # can pass WandbLogger() here extra_callbacks=[], checkpoint_callback=None, logging_callback=None, **extra_train_kwargs ): pl.seed_everything(args.seed) # init model odir = Path(model.hparams.output_dir) odir.mkdir(exist_ok=True) # add custom checkpoints if checkpoint_callback is None: checkpoint_callback = pl.callbacks.ModelCheckpoint( filepath=args.output_dir, prefix="checkpoint", monitor="val_loss", mode="min", save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(early_stopping_callback) if logging_callback is None: logging_callback = LoggingCallback() train_params = {} # TODO: remove with PyTorch 1.6 since pl uses native amp if args.fp16: train_params["precision"] = 16 train_params["amp_level"] = args.fp16_opt_level if args.gpus > 1: train_params["distributed_backend"] = "ddp" train_params["accumulate_grad_batches"] = args.accumulate_grad_batches train_params["accelerator"] = extra_train_kwargs.get("accelerator", None) train_params["profiler"] = extra_train_kwargs.get("profiler", None) trainer = pl.Trainer.from_argparse_args( args, weights_summary=None, callbacks=[logging_callback] + extra_callbacks, logger=logger, checkpoint_callback=checkpoint_callback, **train_params, ) if args.do_train: trainer.fit(model) return trainer
null
11,729
argparse import json import logging import math import os from pathlib import Path import datasets import torch from datasets import load_dataset from torch.utils.data import DataLoader from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) from tqdm.auto import tqdm import evaluate import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from huggingface_hub import Repository from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForImageClassification, SchedulerType, get_scheduler, ) from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry from transformers.utils.versions import require_version def parse_args(): parser = argparse.ArgumentParser(description="Fine-tune a Transformers model on an image classification dataset") parser.add_argument( "--dataset_name", type=str, default="cifar10", help=( "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private," " dataset)." ), ) parser.add_argument("--train_dir", type=str, default=None, help="A folder containing the training data.") parser.add_argument("--validation_dir", type=str, default=None, help="A folder containing the validation data.") parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) parser.add_argument( "--max_eval_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ), ) parser.add_argument( "--train_val_split", type=float, default=0.15, help="Percent to split off of train for validation", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", default="google/vit-base-patch16-224-in21k", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.' "Only applicable when `--with_tracking` is passed." ), ) parser.add_argument( "--ignore_mismatched_sizes", action="store_true", help="Whether or not to enable to load a pretrained model whose head dimensions are different.", ) args = parser.parse_args() # Sanity checks if args.dataset_name is None and args.train_dir is None and args.validation_dir is None: raise ValueError("Need either a dataset name or a training/validation folder.") if args.push_to_hub or args.with_tracking: if args.output_dir is None: raise ValueError( "Need an `output_dir` to create a repo when `--push_to_hub` or `with_tracking` is specified." ) if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) return args
null
11,730
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from PIL import Image from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) import evaluate import transformers from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, AutoConfig, AutoFeatureExtractor, AutoModelForImageClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version def pil_loader(path: str): with open(path, "rb") as f: im = Image.open(f) return im.convert("RGB")
null
11,731
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from PIL import Image from torchvision.transforms import ( CenterCrop, Compose, Normalize, RandomHorizontalFlip, RandomResizedCrop, Resize, ToTensor, ) import evaluate import transformers from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, AutoConfig, AutoFeatureExtractor, AutoModelForImageClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) labels = torch.tensor([example["labels"] for example in examples]) return {"pixel_values": pixel_values, "labels": labels}
null
11,732
import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np from datasets import load_dataset import evaluate import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, PretrainedConfig, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_glue", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named # label if at least two columns are provided. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.task_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( "glue", data_args.task_name, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) elif data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. data_files = {"train": data_args.train_file, "validation": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: train_extension = data_args.train_file.split(".")[-1] test_extension = data_args.test_file.split(".")[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." data_files["test"] = data_args.test_file else: raise ValueError("Need either a GLUE task or a test file for `do_predict`.") for key in data_files.keys(): logger.info(f"load a local file for {key}: {data_files[key]}") if data_args.train_file.endswith(".csv"): # Loading a dataset from local csv files raw_datasets = load_dataset( "csv", data_files=data_files, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: # Loading a dataset from local json files raw_datasets = load_dataset( "json", data_files=data_files, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. # Labels if data_args.task_name is not None: is_regression = data_args.task_name == "stsb" if not is_regression: label_list = raw_datasets["train"].features["label"].names num_labels = len(label_list) else: num_labels = 1 else: # Trying to have good defaults here, don't hesitate to tweak to your needs. is_regression = raw_datasets["train"].features["label"].dtype in ["float32", "float64"] if is_regression: num_labels = 1 else: # A useful fast method: # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique label_list = raw_datasets["train"].unique("label") label_list.sort() # Let's sort it for determinism num_labels = len(label_list) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, ) # Preprocessing the raw_datasets if data_args.task_name is not None: sentence1_key, sentence2_key = task_to_keys[data_args.task_name] else: # Again, we try to have some nice defaults but don't hesitate to tweak to your use case. non_label_column_names = [name for name in raw_datasets["train"].column_names if name != "label"] if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names: sentence1_key, sentence2_key = "sentence1", "sentence2" else: if len(non_label_column_names) >= 2: sentence1_key, sentence2_key = non_label_column_names[:2] else: sentence1_key, sentence2_key = non_label_column_names[0], None # Padding strategy if data_args.pad_to_max_length: padding = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch padding = False # Some models have set the order of the labels to use, so let's make sure we do use it. label_to_id = None if ( model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id and data_args.task_name is not None and not is_regression ): # Some have all caps in their config, some don't. label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()} if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)): label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)} else: logger.warning( "Your model seems to have been trained with labels, but they don't match the dataset: ", f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}." "\nIgnoring the model labels as a result.", ) elif data_args.task_name is None and not is_regression: label_to_id = {v: i for i, v in enumerate(label_list)} if label_to_id is not None: model.config.label2id = label_to_id model.config.id2label = {id: label for label, id in config.label2id.items()} elif data_args.task_name is not None and not is_regression: model.config.label2id = {l: i for i, l in enumerate(label_list)} model.config.id2label = {id: label for label, id in config.label2id.items()} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) def preprocess_function(examples): # Tokenize the texts args = ( (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) ) result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True) # Map labels to IDs (not necessary for GLUE tasks) if label_to_id is not None and "label" in examples: result["label"] = [(label_to_id[l] if l != -1 else -1) for l in examples["label"]] return result with training_args.main_process_first(desc="dataset map pre-processing"): raw_datasets = raw_datasets.map( preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on dataset", ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = raw_datasets["validation_matched" if data_args.task_name == "mnli" else "validation"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) if training_args.do_predict or data_args.task_name is not None or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("--do_predict requires a test dataset") predict_dataset = raw_datasets["test_matched" if data_args.task_name == "mnli" else "test"] if data_args.max_predict_samples is not None: max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) predict_dataset = predict_dataset.select(range(max_predict_samples)) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # Get the metric function if data_args.task_name is not None: metric = evaluate.load("glue", data_args.task_name) else: metric = evaluate.load("accuracy") # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(p: EvalPrediction): preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1) if data_args.task_name is not None: result = metric.compute(predictions=preds, references=p.label_ids) if len(result) > 1: result["combined_score"] = np.mean(list(result.values())).item() return result elif is_regression: return {"mse": ((preds - p.label_ids) ** 2).mean().item()} else: return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()} # Data collator will default to DataCollatorWithPadding when the tokenizer is passed to Trainer, so we change it if # we already did the padding. if data_args.pad_to_max_length: data_collator = default_data_collator elif training_args.fp16: data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) else: data_collator = None # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=compute_metrics, tokenizer=tokenizer, data_collator=data_collator, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") # Loop to handle MNLI double evaluation (matched, mis-matched) tasks = [data_args.task_name] eval_datasets = [eval_dataset] if data_args.task_name == "mnli": tasks.append("mnli-mm") valid_mm_dataset = raw_datasets["validation_mismatched"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(valid_mm_dataset), data_args.max_eval_samples) valid_mm_dataset = valid_mm_dataset.select(range(max_eval_samples)) eval_datasets.append(valid_mm_dataset) combined = {} for eval_dataset, task in zip(eval_datasets, tasks): metrics = trainer.evaluate(eval_dataset=eval_dataset) max_eval_samples = ( data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) ) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) if task == "mnli-mm": metrics = {k + "_mm": v for k, v in metrics.items()} if task is not None and "mnli" in task: combined.update(metrics) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", combined if task is not None and "mnli" in task else metrics) if training_args.do_predict: logger.info("*** Predict ***") # Loop to handle MNLI double evaluation (matched, mis-matched) tasks = [data_args.task_name] predict_datasets = [predict_dataset] if data_args.task_name == "mnli": tasks.append("mnli-mm") predict_datasets.append(raw_datasets["test_mismatched"]) for predict_dataset, task in zip(predict_datasets, tasks): # Removing the `label` columns because it contains -1 and Trainer won't like that. predict_dataset = predict_dataset.remove_columns("label") predictions = trainer.predict(predict_dataset, metric_key_prefix="predict").predictions predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1) output_predict_file = os.path.join(training_args.output_dir, f"predict_results_{task}.txt") if trainer.is_world_process_zero(): with open(output_predict_file, "w") as writer: logger.info(f"***** Predict results {task} *****") writer.write("index\tprediction\n") for index, item in enumerate(predictions): if is_regression: writer.write(f"{index}\t{item:3.3f}\n") else: item = label_list[item] writer.write(f"{index}\t{item}\n") kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"} if data_args.task_name is not None: kwargs["language"] = "en" kwargs["dataset_tags"] = "glue" kwargs["dataset_args"] = data_args.task_name kwargs["dataset"] = f"GLUE {data_args.task_name.upper()}" if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) def _mp_fn(index): # For xla_spawn (TPUs) main()
null
11,733
argparse import json import logging import math import os import random from pathlib import Path import datasets import torch from datasets import load_dataset from torch.utils.data import DataLoader from tqdm.auto import tqdm import evaluate import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from huggingface_hub import Repository from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, PretrainedConfig, SchedulerType, default_data_collator, get_scheduler, ) from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry from transformers.utils.versions import require_version task_to_keys = { "cola": ("sentence", None), "mnli": ("premise", "hypothesis"), "mrpc": ("sentence1", "sentence2"), "qnli": ("question", "sentence"), "qqp": ("question1", "question2"), "rte": ("sentence1", "sentence2"), "sst2": ("sentence", None), "stsb": ("sentence1", "sentence2"), "wnli": ("sentence1", "sentence2"), } def parse_args(): parser = argparse.ArgumentParser(description="Finetune a transformers model on a text classification task") parser.add_argument( "--task_name", type=str, default=None, help="The name of the glue task to train on.", choices=list(task_to_keys.keys()), ) parser.add_argument( "--train_file", type=str, default=None, help="A csv or a json file containing the training data." ) parser.add_argument( "--validation_file", type=str, default=None, help="A csv or a json file containing the validation data." ) parser.add_argument( "--max_length", type=int, default=128, help=( "The maximum total input sequence length after tokenization. Sequences longer than this will be truncated," " sequences shorter will be padded if `--pad_to_max_lengh` is passed." ), ) parser.add_argument( "--pad_to_max_length", action="store_true", help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=True, ) parser.add_argument( "--use_slow_tokenizer", action="store_true", help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.' "Only applicable when `--with_tracking` is passed." ), ) parser.add_argument( "--ignore_mismatched_sizes", action="store_true", help="Whether or not to enable to load a pretrained model whose head dimensions are different.", ) args = parser.parse_args() # Sanity checks if args.task_name is None and args.train_file is None and args.validation_file is None: raise ValueError("Need either a task name or a training/validation file.") else: if args.train_file is not None: extension = args.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if args.validation_file is not None: extension = args.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." return args
null
11,734
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from PIL import Image from torchvision.io import ImageReadMode, read_image from torchvision.transforms import CenterCrop, ConvertImageDtype, Normalize, Resize from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( AutoFeatureExtractor, AutoModel, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) input_ids = torch.tensor([example["input_ids"] for example in examples], dtype=torch.long) attention_mask = torch.tensor([example["attention_mask"] for example in examples], dtype=torch.long) return { "pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask, "return_loss": True, }
null
11,735
import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import numpy as np from datasets import DatasetDict, load_dataset import evaluate import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version The provided code snippet includes necessary dependencies for implementing the `random_subsample` function. Write a Python function `def random_subsample(wav: np.ndarray, max_length: float, sample_rate: int = 16000)` to solve the following problem: Randomly sample chunks of `max_length` seconds from the input audio Here is the function: def random_subsample(wav: np.ndarray, max_length: float, sample_rate: int = 16000): """Randomly sample chunks of `max_length` seconds from the input audio""" sample_length = int(round(sample_rate * max_length)) if len(wav) <= sample_length: return wav random_offset = randint(0, len(wav) - sample_length - 1) return wav[random_offset : random_offset + sample_length]
Randomly sample chunks of `max_length` seconds from the input audio
11,736
import argparse import json import logging import math import os import random from dataclasses import dataclass from itertools import chain from pathlib import Path from typing import Optional, Union import datasets import torch from datasets import load_dataset from torch.utils.data import DataLoader from tqdm.auto import tqdm import evaluate import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from huggingface_hub import Repository from transformers import ( CONFIG_MAPPING, MODEL_MAPPING, AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, PreTrainedTokenizerBase, SchedulerType, default_data_collator, get_scheduler, ) from transformers.utils import PaddingStrategy, check_min_version, get_full_repo_name, send_example_telemetry MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def parse_args(): parser = argparse.ArgumentParser(description="Finetune a transformers model on a multiple choice task") parser.add_argument( "--dataset_name", type=str, default=None, help="The name of the dataset to use (via the datasets library).", ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The configuration name of the dataset to use (via the datasets library).", ) parser.add_argument( "--train_file", type=str, default=None, help="A csv or a json file containing the training data." ) parser.add_argument( "--validation_file", type=str, default=None, help="A csv or a json file containing the validation data." ) parser.add_argument( "--max_length", type=int, default=128, help=( "The maximum total input sequence length after tokenization. Sequences longer than this will be truncated," " sequences shorter will be padded if `--pad_to_max_lengh` is passed." ), ) parser.add_argument( "--pad_to_max_length", action="store_true", help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=False, ) parser.add_argument( "--config_name", type=str, default=None, help="Pretrained config name or path if not the same as model_name", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--use_slow_tokenizer", action="store_true", help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--model_type", type=str, default=None, help="Model type to use if training from scratch.", choices=MODEL_TYPES, ) parser.add_argument( "--debug", action="store_true", help="Activate debug mode and run training only with a subset of data.", ) parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.' "Only applicable when `--with_tracking` is passed." ), ) args = parser.parse_args() if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." return args
null
11,737
import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_swag", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.train_file.split(".")[-1] raw_datasets = load_dataset( extension, data_files=data_files, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: # Downloading and loading the swag dataset from the hub. raw_datasets = load_dataset( "swag", "regular", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # When using your own dataset or a different dataset from swag, you will probably need to change this. ending_names = [f"ending{i}" for i in range(4)] context_name = "sent1" question_header_name = "sent2" if data_args.max_seq_length is None: max_seq_length = tokenizer.model_max_length if max_seq_length > 1024: logger.warning( f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). " "Picking 1024 instead. You can change that default value by passing --max_seq_length xxx." ) max_seq_length = 1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) # Preprocessing the datasets. def preprocess_function(examples): first_sentences = [[context] * 4 for context in examples[context_name]] question_headers = examples[question_header_name] second_sentences = [ [f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(question_headers) ] # Flatten out first_sentences = list(chain(*first_sentences)) second_sentences = list(chain(*second_sentences)) # Tokenize tokenized_examples = tokenizer( first_sentences, second_sentences, truncation=True, max_length=max_seq_length, padding="max_length" if data_args.pad_to_max_length else False, ) # Un-flatten return {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) with training_args.main_process_first(desc="train dataset map pre-processing"): train_dataset = train_dataset.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = raw_datasets["validation"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) with training_args.main_process_first(desc="validation dataset map pre-processing"): eval_dataset = eval_dataset.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) # Data collator data_collator = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None) ) # Metric def compute_metrics(eval_predictions): predictions, label_ids = eval_predictions preds = np.argmax(predictions, axis=1) return {"accuracy": (preds == label_ids).astype(np.float32).mean().item()} # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) kwargs = dict( finetuned_from=model_args.model_name_or_path, tasks="multiple-choice", dataset_tags="swag", dataset_args="regular", dataset="SWAG", language="en", ) if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) def _mp_fn(index): # For xla_spawn (TPUs) main()
null
11,738
import json import logging import os import re import sys import warnings from dataclasses import dataclass, field from typing import Dict, List, Optional, Union import datasets import numpy as np import torch from datasets import DatasetDict, load_dataset import evaluate import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForCTC, AutoProcessor, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, Wav2Vec2Processor, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version def list_field(default=None, metadata=None): return field(default_factory=lambda: default, metadata=metadata)
null
11,739
functools import json import logging import os import re import sys import warnings from dataclasses import dataclass, field from typing import Dict, List, Optional, Union import datasets import numpy as np import torch from datasets import DatasetDict, load_dataset import evaluate import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForCTC, AutoProcessor, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, Wav2Vec2Processor, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version def create_vocabulary_from_data( datasets: DatasetDict, word_delimiter_token: Optional[str] = None, unk_token: Optional[str] = None, pad_token: Optional[str] = None, ): # Given training and test labels create vocabulary def extract_all_chars(batch): all_text = " ".join(batch["target_text"]) vocab = list(set(all_text)) return {"vocab": [vocab], "all_text": [all_text]} vocabs = datasets.map( extract_all_chars, batched=True, batch_size=-1, keep_in_memory=True, remove_columns=datasets["train"].column_names, ) # take union of all unique characters in each dataset vocab_set = functools.reduce( lambda vocab_1, vocab_2: set(vocab_1["vocab"][0]) | set(vocab_2["vocab"][0]), vocabs.values() ) vocab_dict = {v: k for k, v in enumerate(sorted(list(vocab_set)))} # replace white space with delimiter token if word_delimiter_token is not None: vocab_dict[word_delimiter_token] = vocab_dict[" "] del vocab_dict[" "] # add unk and pad token if unk_token is not None: vocab_dict[unk_token] = len(vocab_dict) if pad_token is not None: vocab_dict[pad_token] = len(vocab_dict) return vocab_dict
null
11,740
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np from datasets import ClassLabel, load_dataset import evaluate import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorForTokenClassification, HfArgumentParser, PretrainedConfig, PreTrainedTokenizerFast, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_ner", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.train_file.split(".")[-1] raw_datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. if training_args.do_train: column_names = raw_datasets["train"].column_names features = raw_datasets["train"].features else: column_names = raw_datasets["validation"].column_names features = raw_datasets["validation"].features if data_args.text_column_name is not None: text_column_name = data_args.text_column_name elif "tokens" in column_names: text_column_name = "tokens" else: text_column_name = column_names[0] if data_args.label_column_name is not None: label_column_name = data_args.label_column_name elif f"{data_args.task_name}_tags" in column_names: label_column_name = f"{data_args.task_name}_tags" else: label_column_name = column_names[1] # In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the # unique labels. def get_label_list(labels): unique_labels = set() for label in labels: unique_labels = unique_labels | set(label) label_list = list(unique_labels) label_list.sort() return label_list # If the labels are of type ClassLabel, they are already integers and we have the map stored somewhere. # Otherwise, we have to get the list of labels manually. labels_are_int = isinstance(features[label_column_name].feature, ClassLabel) if labels_are_int: label_list = features[label_column_name].feature.names label_to_id = {i: i for i in range(len(label_list))} else: label_list = get_label_list(raw_datasets["train"][label_column_name]) label_to_id = {l: i for i, l in enumerate(label_list)} num_labels = len(label_list) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer_name_or_path = model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path if config.model_type in {"bloom", "gpt2", "roberta"}: tokenizer = AutoTokenizer.from_pretrained( tokenizer_name_or_path, cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, add_prefix_space=True, ) else: tokenizer = AutoTokenizer.from_pretrained( tokenizer_name_or_path, cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, ) # Tokenizer check: this script requires a fast tokenizer. if not isinstance(tokenizer, PreTrainedTokenizerFast): raise ValueError( "This example script only works for models that have a fast tokenizer. Checkout the big table of models at" " https://huggingface.co/transformers/index.html#supported-frameworks to find the model types that meet" " this requirement" ) # Model has labels -> use them. if model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id: if list(sorted(model.config.label2id.keys())) == list(sorted(label_list)): # Reorganize `label_list` to match the ordering of the model. if labels_are_int: label_to_id = {i: int(model.config.label2id[l]) for i, l in enumerate(label_list)} label_list = [model.config.id2label[i] for i in range(num_labels)] else: label_list = [model.config.id2label[i] for i in range(num_labels)] label_to_id = {l: i for i, l in enumerate(label_list)} else: logger.warning( "Your model seems to have been trained with labels, but they don't match the dataset: ", f"model labels: {list(sorted(model.config.label2id.keys()))}, dataset labels:" f" {list(sorted(label_list))}.\nIgnoring the model labels as a result.", ) # Set the correspondences label/ID inside the model config model.config.label2id = {l: i for i, l in enumerate(label_list)} model.config.id2label = {i: l for i, l in enumerate(label_list)} # Map that sends B-Xxx label to its I-Xxx counterpart b_to_i_label = [] for idx, label in enumerate(label_list): if label.startswith("B-") and label.replace("B-", "I-") in label_list: b_to_i_label.append(label_list.index(label.replace("B-", "I-"))) else: b_to_i_label.append(idx) # Preprocessing the dataset # Padding strategy padding = "max_length" if data_args.pad_to_max_length else False # Tokenize all texts and align the labels with them. def tokenize_and_align_labels(examples): tokenized_inputs = tokenizer( examples[text_column_name], padding=padding, truncation=True, max_length=data_args.max_seq_length, # We use this argument because the texts in our dataset are lists of words (with a label for each word). is_split_into_words=True, ) labels = [] for i, label in enumerate(examples[label_column_name]): word_ids = tokenized_inputs.word_ids(batch_index=i) previous_word_idx = None label_ids = [] for word_idx in word_ids: # Special tokens have a word id that is None. We set the label to -100 so they are automatically # ignored in the loss function. if word_idx is None: label_ids.append(-100) # We set the label for the first token of each word. elif word_idx != previous_word_idx: label_ids.append(label_to_id[label[word_idx]]) # For the other tokens in a word, we set the label to either the current label or -100, depending on # the label_all_tokens flag. else: if data_args.label_all_tokens: label_ids.append(b_to_i_label[label_to_id[label[word_idx]]]) else: label_ids.append(-100) previous_word_idx = word_idx labels.append(label_ids) tokenized_inputs["labels"] = labels return tokenized_inputs if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) with training_args.main_process_first(desc="train dataset map pre-processing"): train_dataset = train_dataset.map( tokenize_and_align_labels, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on train dataset", ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = raw_datasets["validation"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) with training_args.main_process_first(desc="validation dataset map pre-processing"): eval_dataset = eval_dataset.map( tokenize_and_align_labels, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on validation dataset", ) if training_args.do_predict: if "test" not in raw_datasets: raise ValueError("--do_predict requires a test dataset") predict_dataset = raw_datasets["test"] if data_args.max_predict_samples is not None: max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) predict_dataset = predict_dataset.select(range(max_predict_samples)) with training_args.main_process_first(desc="prediction dataset map pre-processing"): predict_dataset = predict_dataset.map( tokenize_and_align_labels, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on prediction dataset", ) # Data collator data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None) # Metrics metric = evaluate.load("seqeval") def compute_metrics(p): predictions, labels = p predictions = np.argmax(predictions, axis=2) # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] true_labels = [ [label_list[l] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] results = metric.compute(predictions=true_predictions, references=true_labels) if data_args.return_entity_level_metrics: # Unpack nested dictionaries final_results = {} for key, value in results.items(): if isinstance(value, dict): for n, v in value.items(): final_results[f"{key}_{n}"] = v else: final_results[key] = value return final_results else: return { "precision": results["overall_precision"], "recall": results["overall_recall"], "f1": results["overall_f1"], "accuracy": results["overall_accuracy"], } # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics trainer.save_model() # Saves the tokenizer too for easy upload max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Predict if training_args.do_predict: logger.info("*** Predict ***") predictions, labels, metrics = trainer.predict(predict_dataset, metric_key_prefix="predict") predictions = np.argmax(predictions, axis=2) # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] trainer.log_metrics("predict", metrics) trainer.save_metrics("predict", metrics) # Save predictions output_predictions_file = os.path.join(training_args.output_dir, "predictions.txt") if trainer.is_world_process_zero(): with open(output_predictions_file, "w") as writer: for prediction in true_predictions: writer.write(" ".join(prediction) + "\n") kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "token-classification"} if data_args.dataset_name is not None: kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None: kwargs["dataset_args"] = data_args.dataset_config_name kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else: kwargs["dataset"] = data_args.dataset_name if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) def _mp_fn(index): # For xla_spawn (TPUs) main()
null
11,741
import argparse import json import logging import math import os import random from pathlib import Path import datasets import torch from datasets import ClassLabel, load_dataset from torch.utils.data import DataLoader from tqdm.auto import tqdm import evaluate import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from huggingface_hub import Repository from transformers import ( CONFIG_MAPPING, MODEL_MAPPING, AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorForTokenClassification, PretrainedConfig, SchedulerType, default_data_collator, get_scheduler, ) from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry from transformers.utils.versions import require_version MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def parse_args(): parser = argparse.ArgumentParser( description="Finetune a transformers model on a text classification task (NER) with accelerate library" ) parser.add_argument( "--dataset_name", type=str, default=None, help="The name of the dataset to use (via the datasets library).", ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The configuration name of the dataset to use (via the datasets library).", ) parser.add_argument( "--train_file", type=str, default=None, help="A csv or a json file containing the training data." ) parser.add_argument( "--validation_file", type=str, default=None, help="A csv or a json file containing the validation data." ) parser.add_argument( "--text_column_name", type=str, default=None, help="The column name of text to input in the file (a csv or JSON file).", ) parser.add_argument( "--label_column_name", type=str, default=None, help="The column name of label to input in the file (a csv or JSON file).", ) parser.add_argument( "--max_length", type=int, default=128, help=( "The maximum total input sequence length after tokenization. Sequences longer than this will be truncated," " sequences shorter will be padded if `--pad_to_max_length` is passed." ), ) parser.add_argument( "--pad_to_max_length", action="store_true", help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=False, ) parser.add_argument( "--config_name", type=str, default=None, help="Pretrained config name or path if not the same as model_name", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--model_type", type=str, default=None, help="Model type to use if training from scratch.", choices=MODEL_TYPES, ) parser.add_argument( "--label_all_tokens", action="store_true", help="Setting labels of all special tokens to -100 and thus PyTorch will ignore them.", ) parser.add_argument( "--return_entity_level_metrics", action="store_true", help="Indication whether entity level metrics are to be returner.", ) parser.add_argument( "--task_name", type=str, default="ner", choices=["ner", "pos", "chunk"], help="The name of the task.", ) parser.add_argument( "--debug", action="store_true", help="Activate debug mode and run training only with a subset of data.", ) parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.' "Only applicable when `--with_tracking` is passed." ), ) parser.add_argument( "--ignore_mismatched_sizes", action="store_true", help="Whether or not to enable to load a pretrained model whose head dimensions are different.", ) args = parser.parse_args() # Sanity checks if args.task_name is None and args.train_file is None and args.validation_file is None: raise ValueError("Need either a task name or a training/validation file.") else: if args.train_file is not None: extension = args.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if args.validation_file is not None: extension = args.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." return args
null
11,742
import argparse import json import logging import math import os import random from pathlib import Path import datasets import numpy as np import torch from datasets import load_dataset from torch.utils.data import DataLoader from tqdm.auto import tqdm import evaluate import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from huggingface_hub import Repository from transformers import ( CONFIG_MAPPING, MODEL_MAPPING, AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, SchedulerType, default_data_collator, get_scheduler, ) from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry from transformers.utils.versions import require_version from utils_qa import postprocess_qa_predictions The provided code snippet includes necessary dependencies for implementing the `save_prefixed_metrics` function. Write a Python function `def save_prefixed_metrics(results, output_dir, file_name: str = "all_results.json", metric_key_prefix: str = "eval")` to solve the following problem: Save results while prefixing metric names. Args: results: (:obj:`dict`): A dictionary of results. output_dir: (:obj:`str`): An output directory. file_name: (:obj:`str`, `optional`, defaults to :obj:`all_results.json`): An output file name. metric_key_prefix: (:obj:`str`, `optional`, defaults to :obj:`eval`): A metric name prefix. Here is the function: def save_prefixed_metrics(results, output_dir, file_name: str = "all_results.json", metric_key_prefix: str = "eval"): """ Save results while prefixing metric names. Args: results: (:obj:`dict`): A dictionary of results. output_dir: (:obj:`str`): An output directory. file_name: (:obj:`str`, `optional`, defaults to :obj:`all_results.json`): An output file name. metric_key_prefix: (:obj:`str`, `optional`, defaults to :obj:`eval`): A metric name prefix. """ # Prefix all keys with metric_key_prefix + '_' for key in list(results.keys()): if not key.startswith(f"{metric_key_prefix}_"): results[f"{metric_key_prefix}_{key}"] = results.pop(key) with open(os.path.join(output_dir, file_name), "w") as f: json.dump(results, f, indent=4)
Save results while prefixing metric names. Args: results: (:obj:`dict`): A dictionary of results. output_dir: (:obj:`str`): An output directory. file_name: (:obj:`str`, `optional`, defaults to :obj:`all_results.json`): An output file name. metric_key_prefix: (:obj:`str`, `optional`, defaults to :obj:`eval`): A metric name prefix.
11,743
import argparse import json import logging import math import os import random from pathlib import Path import datasets import numpy as np import torch from datasets import load_dataset from torch.utils.data import DataLoader from tqdm.auto import tqdm import evaluate import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from huggingface_hub import Repository from transformers import ( CONFIG_MAPPING, MODEL_MAPPING, AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, SchedulerType, default_data_collator, get_scheduler, ) from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry from transformers.utils.versions import require_version from utils_qa import postprocess_qa_predictions MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def parse_args(): parser = argparse.ArgumentParser(description="Finetune a transformers model on a Question Answering task") parser.add_argument( "--dataset_name", type=str, default=None, help="The name of the dataset to use (via the datasets library).", ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The configuration name of the dataset to use (via the datasets library).", ) parser.add_argument( "--train_file", type=str, default=None, help="A csv or a json file containing the training data." ) parser.add_argument( "--preprocessing_num_workers", type=int, default=1, help="A csv or a json file containing the training data." ) parser.add_argument("--do_predict", action="store_true", help="To do prediction on the question answering model") parser.add_argument( "--validation_file", type=str, default=None, help="A csv or a json file containing the validation data." ) parser.add_argument( "--test_file", type=str, default=None, help="A csv or a json file containing the Prediction data." ) parser.add_argument( "--max_seq_length", type=int, default=384, help=( "The maximum total input sequence length after tokenization. Sequences longer than this will be truncated," " sequences shorter will be padded if `--pad_to_max_lengh` is passed." ), ) parser.add_argument( "--pad_to_max_length", action="store_true", help="If passed, pad all samples to `max_seq_length`. Otherwise, dynamic padding is used.", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=False, ) parser.add_argument( "--config_name", type=str, default=None, help="Pretrained config name or path if not the same as model_name", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--use_slow_tokenizer", action="store_true", help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--doc_stride", type=int, default=128, help="When splitting up a long document into chunks how much stride to take between chunks.", ) parser.add_argument( "--n_best_size", type=int, default=20, help="The total number of n-best predictions to generate when looking for an answer.", ) parser.add_argument( "--null_score_diff_threshold", type=float, default=0.0, help=( "The threshold used to select the null answer: if the best answer has a score that is less than " "the score of the null answer minus this threshold, the null answer is selected for this example. " "Only useful when `version_2_with_negative=True`." ), ) parser.add_argument( "--version_2_with_negative", action="store_true", help="If true, some of the examples do not have an answer.", ) parser.add_argument( "--max_answer_length", type=int, default=30, help=( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ), ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) parser.add_argument( "--max_eval_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ), ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--max_predict_samples", type=int, default=None, help="For debugging purposes or quicker training, truncate the number of prediction examples to this", ) parser.add_argument( "--model_type", type=str, default=None, help="Model type to use if training from scratch.", choices=MODEL_TYPES, ) parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.' "Only applicable when `--with_tracking` is passed." ), ) args = parser.parse_args() # Sanity checks if ( args.dataset_name is None and args.train_file is None and args.validation_file is None and args.test_file is None ): raise ValueError("Need either a dataset name or a training/validation/test file.") else: if args.train_file is not None: extension = args.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if args.validation_file is not None: extension = args.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." if args.test_file is not None: extension = args.test_file.split(".")[-1] assert extension in ["csv", "json"], "`test_file` should be a csv or a json file." if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." return args
null
11,744
import logging import os import sys from dataclasses import dataclass, field from typing import List, Optional, Tuple import datasets from datasets import load_dataset import evaluate import transformers from trainer_seq2seq_qa import QuestionAnsweringSeq2SeqTrainer from transformers import ( AutoConfig, AutoModelForSeq2SeqLM, AutoTokenizer, DataCollatorForSeq2Seq, HfArgumentParser, Seq2SeqTrainingArguments, set_seed, ) from transformers.trainer_utils import EvalLoopOutput, EvalPrediction, get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_seq2seq_qa", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.test_file.split(".")[-1] raw_datasets = load_dataset( extension, data_files=data_files, field="data", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForSeq2SeqLM.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model.resize_token_embeddings(len(tokenizer)) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") # Preprocessing the datasets. # We need to generate and tokenize inputs and targets. if training_args.do_train: column_names = raw_datasets["train"].column_names elif training_args.do_eval: column_names = raw_datasets["validation"].column_names elif training_args.do_predict: column_names = raw_datasets["test"].column_names else: logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.") return # Get the column names for input/target. dataset_columns = question_answering_column_name_mapping.get(data_args.dataset_name, None) if data_args.question_column is None: question_column = dataset_columns[0] if dataset_columns is not None else column_names[0] else: question_column = data_args.question_column if question_column not in column_names: raise ValueError( f"--question_column' value '{data_args.question_column}' needs to be one of: {', '.join(column_names)}" ) if data_args.context_column is None: context_column = dataset_columns[1] if dataset_columns is not None else column_names[1] else: context_column = data_args.context_column if context_column not in column_names: raise ValueError( f"--context_column' value '{data_args.context_column}' needs to be one of: {', '.join(column_names)}" ) if data_args.answer_column is None: answer_column = dataset_columns[2] if dataset_columns is not None else column_names[2] else: answer_column = data_args.answer_column if answer_column not in column_names: raise ValueError( f"--answer_column' value '{data_args.answer_column}' needs to be one of: {', '.join(column_names)}" ) # Temporarily set max_answer_length for training. max_answer_length = data_args.max_answer_length padding = "max_length" if data_args.pad_to_max_length else False if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"): logger.warning( "label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for" f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory" ) if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) def preprocess_squad_batch( examples, question_column: str, context_column: str, answer_column: str, ) -> Tuple[List[str], List[str]]: questions = examples[question_column] contexts = examples[context_column] answers = examples[answer_column] def generate_input(_question, _context): return " ".join(["question:", _question.lstrip(), "context:", _context.lstrip()]) inputs = [generate_input(question, context) for question, context in zip(questions, contexts)] targets = [answer["text"][0] if len(answer["text"]) > 0 else "" for answer in answers] return inputs, targets def preprocess_function(examples): inputs, targets = preprocess_squad_batch(examples, question_column, context_column, answer_column) model_inputs = tokenizer(inputs, max_length=max_seq_length, padding=padding, truncation=True) # Tokenize targets with text_target=... labels = tokenizer(text_target=targets, max_length=max_answer_length, padding=padding, truncation=True) # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore # padding in the loss. if padding == "max_length" and data_args.ignore_pad_token_for_loss: labels["input_ids"] = [ [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] ] model_inputs["labels"] = labels["input_ids"] return model_inputs # Validation preprocessing def preprocess_validation_function(examples): inputs, targets = preprocess_squad_batch(examples, question_column, context_column, answer_column) model_inputs = tokenizer( inputs, max_length=max_seq_length, padding=padding, truncation=True, return_overflowing_tokens=True, return_offsets_mapping=True, ) # Tokenize targets with the `text_target` keyword argument labels = tokenizer(text_target=targets, max_length=max_answer_length, padding=padding, truncation=True) # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore # padding in the loss. if padding == "max_length" and data_args.ignore_pad_token_for_loss: labels["input_ids"] = [ [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] ] # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = model_inputs.pop("overflow_to_sample_mapping") # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. model_inputs["example_id"] = [] # Augment the overflowing tokens to the labels labels_out = [] for i in range(len(model_inputs["input_ids"])): # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] model_inputs["example_id"].append(examples["id"][sample_index]) labels_out.append(labels["input_ids"][sample_index]) model_inputs["labels"] = labels_out return model_inputs if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if data_args.max_train_samples is not None: # We will select sample from whole data if agument is specified max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) # Create train feature from dataset with training_args.main_process_first(desc="train dataset map pre-processing"): train_dataset = train_dataset.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on train dataset", ) if data_args.max_train_samples is not None: # Number of samples might increase during Feature Creation, We select only specified max samples max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset") eval_examples = raw_datasets["validation"] if data_args.max_eval_samples is not None: # We will select sample from whole data max_eval_samples = min(len(eval_examples), data_args.max_eval_samples) eval_examples = eval_examples.select(range(max_eval_samples)) # Validation Feature Creation with training_args.main_process_first(desc="validation dataset map pre-processing"): eval_dataset = eval_examples.map( preprocess_validation_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on validation dataset", ) if data_args.max_eval_samples is not None: # During Feature creation dataset samples might increase, we will select required samples again max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) if training_args.do_predict: if "test" not in raw_datasets: raise ValueError("--do_predict requires a test dataset") predict_examples = raw_datasets["test"] if data_args.max_predict_samples is not None: # We will select sample from whole data predict_examples = predict_examples.select(range(data_args.max_predict_samples)) # Predict Feature Creation with training_args.main_process_first(desc="prediction dataset map pre-processing"): predict_dataset = predict_examples.map( preprocess_validation_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on prediction dataset", ) if data_args.max_predict_samples is not None: # During Feature creation dataset samples might increase, we will select required samples again max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) predict_dataset = predict_dataset.select(range(max_predict_samples)) # Data collator label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id data_collator = DataCollatorForSeq2Seq( tokenizer, model=model, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=8 if training_args.fp16 else None, ) metric = evaluate.load("squad_v2" if data_args.version_2_with_negative else "squad") def compute_metrics(p: EvalPrediction): return metric.compute(predictions=p.predictions, references=p.label_ids) # Post-processing: def post_processing_function( examples: datasets.Dataset, features: datasets.Dataset, outputs: EvalLoopOutput, stage="eval" ): # Decode the predicted tokens. preds = outputs.predictions if isinstance(preds, tuple): preds = preds[0] decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True) # Build a map example to its corresponding features. example_id_to_index = {k: i for i, k in enumerate(examples["id"])} feature_per_example = {example_id_to_index[feature["example_id"]]: i for i, feature in enumerate(features)} predictions = {} # Let's loop over all the examples! for example_index, example in enumerate(examples): # This is the index of the feature associated to the current example. feature_index = feature_per_example[example_index] predictions[example["id"]] = decoded_preds[feature_index] # Format the result to the format the metric expects. if data_args.version_2_with_negative: formatted_predictions = [ {"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items() ] else: formatted_predictions = [{"id": k, "prediction_text": v} for k, v in predictions.items()] references = [{"id": ex["id"], "answers": ex[answer_column]} for ex in examples] return EvalPrediction(predictions=formatted_predictions, label_ids=references) # Initialize our Trainer trainer = QuestionAnsweringSeq2SeqTrainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, eval_examples=eval_examples if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics if training_args.predict_with_generate else None, post_process_function=post_processing_function, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation results = {} max_length = ( training_args.generation_max_length if training_args.generation_max_length is not None else data_args.val_max_answer_length ) num_beams = data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate(max_length=max_length, num_beams=num_beams, metric_key_prefix="eval") max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Prediction if training_args.do_predict: logger.info("*** Predict ***") results = trainer.predict(predict_dataset, predict_examples) metrics = results.metrics max_predict_samples = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset) ) metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset)) trainer.log_metrics("predict", metrics) trainer.save_metrics("predict", metrics) if training_args.push_to_hub: kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "question-answering"} if data_args.dataset_name is not None: kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None: kwargs["dataset_args"] = data_args.dataset_config_name kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else: kwargs["dataset"] = data_args.dataset_name trainer.push_to_hub(**kwargs) def _mp_fn(index): # For xla_spawn (TPUs) main()
null
11,745
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import datasets from datasets import load_dataset import evaluate import transformers from trainer_qa import QuestionAnsweringTrainer from transformers import ( DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TrainingArguments, XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizerFast, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version from utils_qa import postprocess_qa_predictions_with_beam_search def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_qa_beam_search", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.test_file.split(".")[-1] raw_datasets = load_dataset( extension, data_files=data_files, field="data", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = XLNetConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer = XLNetTokenizerFast.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = XLNetForQuestionAnswering.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. if training_args.do_train: column_names = raw_datasets["train"].column_names elif training_args.do_eval: column_names = raw_datasets["validation"].column_names else: column_names = raw_datasets["test"].column_names question_column_name = "question" if "question" in column_names else column_names[0] context_column_name = "context" if "context" in column_names else column_names[1] answer_column_name = "answers" if "answers" in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). pad_on_right = tokenizer.padding_side == "right" if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) # Training preprocessing def prepare_train_features(examples): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation="only_second" if pad_on_right else "only_first", max_length=max_seq_length, stride=data_args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, return_special_tokens_mask=True, return_token_type_ids=True, padding="max_length", ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") # The offset mappings will give us a map from token to character position in the original context. This will # help us compute the start_positions and end_positions. offset_mapping = tokenized_examples.pop("offset_mapping") # The special tokens will help us build the p_mask (which indicates the tokens that can't be in answers). special_tokens = tokenized_examples.pop("special_tokens_mask") # Let's label those examples! tokenized_examples["start_positions"] = [] tokenized_examples["end_positions"] = [] tokenized_examples["is_impossible"] = [] tokenized_examples["cls_index"] = [] tokenized_examples["p_mask"] = [] for i, offsets in enumerate(offset_mapping): # We will label impossible answers with the index of the CLS token. input_ids = tokenized_examples["input_ids"][i] cls_index = input_ids.index(tokenizer.cls_token_id) tokenized_examples["cls_index"].append(cls_index) # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples["token_type_ids"][i] for k, s in enumerate(special_tokens[i]): if s: sequence_ids[k] = 3 context_idx = 1 if pad_on_right else 0 # Build the p_mask: non special tokens and context gets 0.0, the others get 1.0. # The cls token gets 1.0 too (for predictions of empty answers). tokenized_examples["p_mask"].append( [ 0.0 if (not special_tokens[i][k] and s == context_idx) or k == cls_index else 1.0 for k, s in enumerate(sequence_ids) ] ) # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] answers = examples[answer_column_name][sample_index] # If no answers are given, set the cls_index as answer. if len(answers["answer_start"]) == 0: tokenized_examples["start_positions"].append(cls_index) tokenized_examples["end_positions"].append(cls_index) tokenized_examples["is_impossible"].append(1.0) else: # Start/end character index of the answer in the text. start_char = answers["answer_start"][0] end_char = start_char + len(answers["text"][0]) # Start token index of the current span in the text. token_start_index = 0 while sequence_ids[token_start_index] != context_idx: token_start_index += 1 # End token index of the current span in the text. token_end_index = len(input_ids) - 1 while sequence_ids[token_end_index] != context_idx: token_end_index -= 1 # Detect if the answer is out of the span (in which case this feature is labeled with the CLS index). if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char): tokenized_examples["start_positions"].append(cls_index) tokenized_examples["end_positions"].append(cls_index) tokenized_examples["is_impossible"].append(1.0) else: # Otherwise move the token_start_index and token_end_index to the two ends of the answer. # Note: we could go after the last offset if the answer is the last word (edge case). while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char: token_start_index += 1 tokenized_examples["start_positions"].append(token_start_index - 1) while offsets[token_end_index][1] >= end_char: token_end_index -= 1 tokenized_examples["end_positions"].append(token_end_index + 1) tokenized_examples["is_impossible"].append(0.0) return tokenized_examples if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if data_args.max_train_samples is not None: # Select samples from Dataset, This will help to decrease processing time max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) # Create Training Features with training_args.main_process_first(desc="train dataset map pre-processing"): train_dataset = train_dataset.map( prepare_train_features, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on train dataset", ) if data_args.max_train_samples is not None: # Select samples from dataset again since Feature Creation might increase number of features max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) # Validation preprocessing def prepare_validation_features(examples): # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation="only_second" if pad_on_right else "only_first", max_length=max_seq_length, stride=data_args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, return_special_tokens_mask=True, return_token_type_ids=True, padding="max_length", ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") # The special tokens will help us build the p_mask (which indicates the tokens that can't be in answers). special_tokens = tokenized_examples.pop("special_tokens_mask") # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. tokenized_examples["example_id"] = [] # We still provide the index of the CLS token and the p_mask to the model, but not the is_impossible label. tokenized_examples["cls_index"] = [] tokenized_examples["p_mask"] = [] for i, input_ids in enumerate(tokenized_examples["input_ids"]): # Find the CLS token in the input ids. cls_index = input_ids.index(tokenizer.cls_token_id) tokenized_examples["cls_index"].append(cls_index) # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples["token_type_ids"][i] for k, s in enumerate(special_tokens[i]): if s: sequence_ids[k] = 3 context_idx = 1 if pad_on_right else 0 # Build the p_mask: non special tokens and context gets 0.0, the others 1.0. tokenized_examples["p_mask"].append( [ 0.0 if (not special_tokens[i][k] and s == context_idx) or k == cls_index else 1.0 for k, s in enumerate(sequence_ids) ] ) # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] tokenized_examples["example_id"].append(examples["id"][sample_index]) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. tokenized_examples["offset_mapping"][i] = [ (o if sequence_ids[k] == context_idx else None) for k, o in enumerate(tokenized_examples["offset_mapping"][i]) ] return tokenized_examples if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset") eval_examples = raw_datasets["validation"] if data_args.max_eval_samples is not None: # Selecting Eval Samples from Dataset max_eval_samples = min(len(eval_examples), data_args.max_eval_samples) eval_examples = eval_examples.select(range(max_eval_samples)) # Create Features from Eval Dataset with training_args.main_process_first(desc="validation dataset map pre-processing"): eval_dataset = eval_examples.map( prepare_validation_features, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on validation dataset", ) if data_args.max_eval_samples is not None: # Selecting Samples from Dataset again since Feature Creation might increase samples size max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) if training_args.do_predict: if "test" not in raw_datasets: raise ValueError("--do_predict requires a test dataset") predict_examples = raw_datasets["test"] if data_args.max_predict_samples is not None: # We will select sample from whole data predict_examples = predict_examples.select(range(data_args.max_predict_samples)) # Test Feature Creation with training_args.main_process_first(desc="prediction dataset map pre-processing"): predict_dataset = predict_examples.map( prepare_validation_features, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on prediction dataset", ) if data_args.max_predict_samples is not None: # During Feature creation dataset samples might increase, we will select required samples again max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) predict_dataset = predict_dataset.select(range(max_predict_samples)) # Data collator # We have already padded to max length if the corresponding flag is True, otherwise we need to pad in the data # collator. data_collator = ( default_data_collator if data_args.pad_to_max_length else DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None) ) # Post-processing: def post_processing_function(examples, features, predictions, stage="eval"): # Post-processing: we match the start logits and end logits to answers in the original context. predictions, scores_diff_json = postprocess_qa_predictions_with_beam_search( examples=examples, features=features, predictions=predictions, version_2_with_negative=data_args.version_2_with_negative, n_best_size=data_args.n_best_size, max_answer_length=data_args.max_answer_length, start_n_top=model.config.start_n_top, end_n_top=model.config.end_n_top, output_dir=training_args.output_dir, log_level=log_level, prefix=stage, ) # Format the result to the format the metric expects. if data_args.version_2_with_negative: formatted_predictions = [ {"id": k, "prediction_text": v, "no_answer_probability": scores_diff_json[k]} for k, v in predictions.items() ] else: formatted_predictions = [{"id": k, "prediction_text": v} for k, v in predictions.items()] references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=formatted_predictions, label_ids=references) metric = evaluate.load("squad_v2" if data_args.version_2_with_negative else "squad") def compute_metrics(p: EvalPrediction): return metric.compute(predictions=p.predictions, references=p.label_ids) # Initialize our Trainer trainer = QuestionAnsweringTrainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, eval_examples=eval_examples if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, post_process_function=post_processing_function, compute_metrics=compute_metrics, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Prediction if training_args.do_predict: logger.info("*** Predict ***") results = trainer.predict(predict_dataset, predict_examples) metrics = results.metrics max_predict_samples = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset) ) metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset)) trainer.log_metrics("predict", metrics) trainer.save_metrics("predict", metrics) kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "question-answering"} if data_args.dataset_name is not None: kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None: kwargs["dataset_args"] = data_args.dataset_config_name kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else: kwargs["dataset"] = data_args.dataset_name if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) def _mp_fn(index): # For xla_spawn (TPUs) main()
null
11,746
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import datasets from datasets import load_dataset import evaluate import transformers from trainer_qa import QuestionAnsweringTrainer from transformers import ( AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, PreTrainedTokenizerFast, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version from utils_qa import postprocess_qa_predictions def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_qa", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.test_file.split(".")[-1] raw_datasets = load_dataset( extension, data_files=data_files, field="data", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForQuestionAnswering.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # Tokenizer check: this script requires a fast tokenizer. if not isinstance(tokenizer, PreTrainedTokenizerFast): raise ValueError( "This example script only works for models that have a fast tokenizer. Checkout the big table of models at" " https://huggingface.co/transformers/index.html#supported-frameworks to find the model types that meet" " this requirement" ) # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. if training_args.do_train: column_names = raw_datasets["train"].column_names elif training_args.do_eval: column_names = raw_datasets["validation"].column_names else: column_names = raw_datasets["test"].column_names question_column_name = "question" if "question" in column_names else column_names[0] context_column_name = "context" if "context" in column_names else column_names[1] answer_column_name = "answers" if "answers" in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). pad_on_right = tokenizer.padding_side == "right" if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) # Training preprocessing def prepare_train_features(examples): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation="only_second" if pad_on_right else "only_first", max_length=max_seq_length, stride=data_args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding="max_length" if data_args.pad_to_max_length else False, ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") # The offset mappings will give us a map from token to character position in the original context. This will # help us compute the start_positions and end_positions. offset_mapping = tokenized_examples.pop("offset_mapping") # Let's label those examples! tokenized_examples["start_positions"] = [] tokenized_examples["end_positions"] = [] for i, offsets in enumerate(offset_mapping): # We will label impossible answers with the index of the CLS token. input_ids = tokenized_examples["input_ids"][i] cls_index = input_ids.index(tokenizer.cls_token_id) # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples.sequence_ids(i) # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] answers = examples[answer_column_name][sample_index] # If no answers are given, set the cls_index as answer. if len(answers["answer_start"]) == 0: tokenized_examples["start_positions"].append(cls_index) tokenized_examples["end_positions"].append(cls_index) else: # Start/end character index of the answer in the text. start_char = answers["answer_start"][0] end_char = start_char + len(answers["text"][0]) # Start token index of the current span in the text. token_start_index = 0 while sequence_ids[token_start_index] != (1 if pad_on_right else 0): token_start_index += 1 # End token index of the current span in the text. token_end_index = len(input_ids) - 1 while sequence_ids[token_end_index] != (1 if pad_on_right else 0): token_end_index -= 1 # Detect if the answer is out of the span (in which case this feature is labeled with the CLS index). if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char): tokenized_examples["start_positions"].append(cls_index) tokenized_examples["end_positions"].append(cls_index) else: # Otherwise move the token_start_index and token_end_index to the two ends of the answer. # Note: we could go after the last offset if the answer is the last word (edge case). while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char: token_start_index += 1 tokenized_examples["start_positions"].append(token_start_index - 1) while offsets[token_end_index][1] >= end_char: token_end_index -= 1 tokenized_examples["end_positions"].append(token_end_index + 1) return tokenized_examples if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if data_args.max_train_samples is not None: # We will select sample from whole data if argument is specified max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) # Create train feature from dataset with training_args.main_process_first(desc="train dataset map pre-processing"): train_dataset = train_dataset.map( prepare_train_features, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on train dataset", ) if data_args.max_train_samples is not None: # Number of samples might increase during Feature Creation, We select only specified max samples max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) # Validation preprocessing def prepare_validation_features(examples): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples[question_column_name if pad_on_right else context_column_name], examples[context_column_name if pad_on_right else question_column_name], truncation="only_second" if pad_on_right else "only_first", max_length=max_seq_length, stride=data_args.doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding="max_length" if data_args.pad_to_max_length else False, ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. tokenized_examples["example_id"] = [] for i in range(len(tokenized_examples["input_ids"])): # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples.sequence_ids(i) context_index = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] tokenized_examples["example_id"].append(examples["id"][sample_index]) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. tokenized_examples["offset_mapping"][i] = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["offset_mapping"][i]) ] return tokenized_examples if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset") eval_examples = raw_datasets["validation"] if data_args.max_eval_samples is not None: # We will select sample from whole data max_eval_samples = min(len(eval_examples), data_args.max_eval_samples) eval_examples = eval_examples.select(range(max_eval_samples)) # Validation Feature Creation with training_args.main_process_first(desc="validation dataset map pre-processing"): eval_dataset = eval_examples.map( prepare_validation_features, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on validation dataset", ) if data_args.max_eval_samples is not None: # During Feature creation dataset samples might increase, we will select required samples again max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) if training_args.do_predict: if "test" not in raw_datasets: raise ValueError("--do_predict requires a test dataset") predict_examples = raw_datasets["test"] if data_args.max_predict_samples is not None: # We will select sample from whole data predict_examples = predict_examples.select(range(data_args.max_predict_samples)) # Predict Feature Creation with training_args.main_process_first(desc="prediction dataset map pre-processing"): predict_dataset = predict_examples.map( prepare_validation_features, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on prediction dataset", ) if data_args.max_predict_samples is not None: # During Feature creation dataset samples might increase, we will select required samples again max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) predict_dataset = predict_dataset.select(range(max_predict_samples)) # Data collator # We have already padded to max length if the corresponding flag is True, otherwise we need to pad in the data # collator. data_collator = ( default_data_collator if data_args.pad_to_max_length else DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None) ) # Post-processing: def post_processing_function(examples, features, predictions, stage="eval"): # Post-processing: we match the start logits and end logits to answers in the original context. predictions = postprocess_qa_predictions( examples=examples, features=features, predictions=predictions, version_2_with_negative=data_args.version_2_with_negative, n_best_size=data_args.n_best_size, max_answer_length=data_args.max_answer_length, null_score_diff_threshold=data_args.null_score_diff_threshold, output_dir=training_args.output_dir, log_level=log_level, prefix=stage, ) # Format the result to the format the metric expects. if data_args.version_2_with_negative: formatted_predictions = [ {"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items() ] else: formatted_predictions = [{"id": k, "prediction_text": v} for k, v in predictions.items()] references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=formatted_predictions, label_ids=references) metric = evaluate.load("squad_v2" if data_args.version_2_with_negative else "squad") def compute_metrics(p: EvalPrediction): return metric.compute(predictions=p.predictions, references=p.label_ids) # Initialize our Trainer trainer = QuestionAnsweringTrainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, eval_examples=eval_examples if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, post_process_function=post_processing_function, compute_metrics=compute_metrics, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Prediction if training_args.do_predict: logger.info("*** Predict ***") results = trainer.predict(predict_dataset, predict_examples) metrics = results.metrics max_predict_samples = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset) ) metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset)) trainer.log_metrics("predict", metrics) trainer.save_metrics("predict", metrics) kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "question-answering"} if data_args.dataset_name is not None: kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None: kwargs["dataset_args"] = data_args.dataset_config_name kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else: kwargs["dataset"] = data_args.dataset_name if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) def _mp_fn(index): # For xla_spawn (TPUs) main()
null
11,747
import collections import json import logging import os from typing import Optional, Tuple import numpy as np from tqdm.auto import tqdm logger = logging.getLogger(__name__) The provided code snippet includes necessary dependencies for implementing the `postprocess_qa_predictions` function. Write a Python function `def postprocess_qa_predictions( examples, features, predictions: Tuple[np.ndarray, np.ndarray], version_2_with_negative: bool = False, n_best_size: int = 20, max_answer_length: int = 30, null_score_diff_threshold: float = 0.0, output_dir: Optional[str] = None, prefix: Optional[str] = None, log_level: Optional[int] = logging.WARNING, )` to solve the following problem: Post-processes the predictions of a question-answering model to convert them to answers that are substrings of the original contexts. This is the base postprocessing functions for models that only return start and end logits. Args: examples: The non-preprocessed dataset (see the main script for more information). features: The processed dataset (see the main script for more information). predictions (:obj:`Tuple[np.ndarray, np.ndarray]`): The predictions of the model: two arrays containing the start logits and the end logits respectively. Its first dimension must match the number of elements of :obj:`features`. version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the underlying dataset contains examples with no answers. n_best_size (:obj:`int`, `optional`, defaults to 20): The total number of n-best predictions to generate when looking for an answer. max_answer_length (:obj:`int`, `optional`, defaults to 30): The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another. null_score_diff_threshold (:obj:`float`, `optional`, defaults to 0): The threshold used to select the null answer: if the best answer has a score that is less than the score of the null answer minus this threshold, the null answer is selected for this example (note that the score of the null answer for an example giving several features is the minimum of the scores for the null answer on each feature: all features must be aligned on the fact they `want` to predict a null answer). Only useful when :obj:`version_2_with_negative` is :obj:`True`. output_dir (:obj:`str`, `optional`): If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if :obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null answers, are saved in `output_dir`. prefix (:obj:`str`, `optional`): If provided, the dictionaries mentioned above are saved with `prefix` added to their names. log_level (:obj:`int`, `optional`, defaults to ``logging.WARNING``): ``logging`` log level (e.g., ``logging.WARNING``) Here is the function: def postprocess_qa_predictions( examples, features, predictions: Tuple[np.ndarray, np.ndarray], version_2_with_negative: bool = False, n_best_size: int = 20, max_answer_length: int = 30, null_score_diff_threshold: float = 0.0, output_dir: Optional[str] = None, prefix: Optional[str] = None, log_level: Optional[int] = logging.WARNING, ): """ Post-processes the predictions of a question-answering model to convert them to answers that are substrings of the original contexts. This is the base postprocessing functions for models that only return start and end logits. Args: examples: The non-preprocessed dataset (see the main script for more information). features: The processed dataset (see the main script for more information). predictions (:obj:`Tuple[np.ndarray, np.ndarray]`): The predictions of the model: two arrays containing the start logits and the end logits respectively. Its first dimension must match the number of elements of :obj:`features`. version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the underlying dataset contains examples with no answers. n_best_size (:obj:`int`, `optional`, defaults to 20): The total number of n-best predictions to generate when looking for an answer. max_answer_length (:obj:`int`, `optional`, defaults to 30): The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another. null_score_diff_threshold (:obj:`float`, `optional`, defaults to 0): The threshold used to select the null answer: if the best answer has a score that is less than the score of the null answer minus this threshold, the null answer is selected for this example (note that the score of the null answer for an example giving several features is the minimum of the scores for the null answer on each feature: all features must be aligned on the fact they `want` to predict a null answer). Only useful when :obj:`version_2_with_negative` is :obj:`True`. output_dir (:obj:`str`, `optional`): If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if :obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null answers, are saved in `output_dir`. prefix (:obj:`str`, `optional`): If provided, the dictionaries mentioned above are saved with `prefix` added to their names. log_level (:obj:`int`, `optional`, defaults to ``logging.WARNING``): ``logging`` log level (e.g., ``logging.WARNING``) """ if len(predictions) != 2: raise ValueError("`predictions` should be a tuple with two elements (start_logits, end_logits).") all_start_logits, all_end_logits = predictions if len(predictions[0]) != len(features): raise ValueError(f"Got {len(predictions[0])} predictions and {len(features)} features.") # Build a map example to its corresponding features. example_id_to_index = {k: i for i, k in enumerate(examples["id"])} features_per_example = collections.defaultdict(list) for i, feature in enumerate(features): features_per_example[example_id_to_index[feature["example_id"]]].append(i) # The dictionaries we have to fill. all_predictions = collections.OrderedDict() all_nbest_json = collections.OrderedDict() if version_2_with_negative: scores_diff_json = collections.OrderedDict() # Logging. logger.setLevel(log_level) logger.info(f"Post-processing {len(examples)} example predictions split into {len(features)} features.") # Let's loop over all the examples! for example_index, example in enumerate(tqdm(examples)): # Those are the indices of the features associated to the current example. feature_indices = features_per_example[example_index] min_null_prediction = None prelim_predictions = [] # Looping through all the features associated to the current example. for feature_index in feature_indices: # We grab the predictions of the model for this feature. start_logits = all_start_logits[feature_index] end_logits = all_end_logits[feature_index] # This is what will allow us to map some the positions in our logits to span of texts in the original # context. offset_mapping = features[feature_index]["offset_mapping"] # Optional `token_is_max_context`, if provided we will remove answers that do not have the maximum context # available in the current feature. token_is_max_context = features[feature_index].get("token_is_max_context", None) # Update minimum null prediction. feature_null_score = start_logits[0] + end_logits[0] if min_null_prediction is None or min_null_prediction["score"] > feature_null_score: min_null_prediction = { "offsets": (0, 0), "score": feature_null_score, "start_logit": start_logits[0], "end_logit": end_logits[0], } # Go through all possibilities for the `n_best_size` greater start and end logits. start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist() end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist() for start_index in start_indexes: for end_index in end_indexes: # Don't consider out-of-scope answers, either because the indices are out of bounds or correspond # to part of the input_ids that are not in the context. if ( start_index >= len(offset_mapping) or end_index >= len(offset_mapping) or offset_mapping[start_index] is None or len(offset_mapping[start_index]) < 2 or offset_mapping[end_index] is None or len(offset_mapping[end_index]) < 2 ): continue # Don't consider answers with a length that is either < 0 or > max_answer_length. if end_index < start_index or end_index - start_index + 1 > max_answer_length: continue # Don't consider answer that don't have the maximum context available (if such information is # provided). if token_is_max_context is not None and not token_is_max_context.get(str(start_index), False): continue prelim_predictions.append( { "offsets": (offset_mapping[start_index][0], offset_mapping[end_index][1]), "score": start_logits[start_index] + end_logits[end_index], "start_logit": start_logits[start_index], "end_logit": end_logits[end_index], } ) if version_2_with_negative and min_null_prediction is not None: # Add the minimum null prediction prelim_predictions.append(min_null_prediction) null_score = min_null_prediction["score"] # Only keep the best `n_best_size` predictions. predictions = sorted(prelim_predictions, key=lambda x: x["score"], reverse=True)[:n_best_size] # Add back the minimum null prediction if it was removed because of its low score. if ( version_2_with_negative and min_null_prediction is not None and not any(p["offsets"] == (0, 0) for p in predictions) ): predictions.append(min_null_prediction) # Use the offsets to gather the answer text in the original context. context = example["context"] for pred in predictions: offsets = pred.pop("offsets") pred["text"] = context[offsets[0] : offsets[1]] # In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid # failure. if len(predictions) == 0 or (len(predictions) == 1 and predictions[0]["text"] == ""): predictions.insert(0, {"text": "empty", "start_logit": 0.0, "end_logit": 0.0, "score": 0.0}) # Compute the softmax of all scores (we do it with numpy to stay independent from torch/tf in this file, using # the LogSumExp trick). scores = np.array([pred.pop("score") for pred in predictions]) exp_scores = np.exp(scores - np.max(scores)) probs = exp_scores / exp_scores.sum() # Include the probabilities in our predictions. for prob, pred in zip(probs, predictions): pred["probability"] = prob # Pick the best prediction. If the null answer is not possible, this is easy. if not version_2_with_negative: all_predictions[example["id"]] = predictions[0]["text"] else: # Otherwise we first need to find the best non-empty prediction. i = 0 while predictions[i]["text"] == "": i += 1 best_non_null_pred = predictions[i] # Then we compare to the null prediction using the threshold. score_diff = null_score - best_non_null_pred["start_logit"] - best_non_null_pred["end_logit"] scores_diff_json[example["id"]] = float(score_diff) # To be JSON-serializable. if score_diff > null_score_diff_threshold: all_predictions[example["id"]] = "" else: all_predictions[example["id"]] = best_non_null_pred["text"] # Make `predictions` JSON-serializable by casting np.float back to float. all_nbest_json[example["id"]] = [ {k: (float(v) if isinstance(v, (np.float16, np.float32, np.float64)) else v) for k, v in pred.items()} for pred in predictions ] # If we have an output_dir, let's save all those dicts. if output_dir is not None: if not os.path.isdir(output_dir): raise EnvironmentError(f"{output_dir} is not a directory.") prediction_file = os.path.join( output_dir, "predictions.json" if prefix is None else f"{prefix}_predictions.json" ) nbest_file = os.path.join( output_dir, "nbest_predictions.json" if prefix is None else f"{prefix}_nbest_predictions.json" ) if version_2_with_negative: null_odds_file = os.path.join( output_dir, "null_odds.json" if prefix is None else f"{prefix}_null_odds.json" ) logger.info(f"Saving predictions to {prediction_file}.") with open(prediction_file, "w") as writer: writer.write(json.dumps(all_predictions, indent=4) + "\n") logger.info(f"Saving nbest_preds to {nbest_file}.") with open(nbest_file, "w") as writer: writer.write(json.dumps(all_nbest_json, indent=4) + "\n") if version_2_with_negative: logger.info(f"Saving null_odds to {null_odds_file}.") with open(null_odds_file, "w") as writer: writer.write(json.dumps(scores_diff_json, indent=4) + "\n") return all_predictions
Post-processes the predictions of a question-answering model to convert them to answers that are substrings of the original contexts. This is the base postprocessing functions for models that only return start and end logits. Args: examples: The non-preprocessed dataset (see the main script for more information). features: The processed dataset (see the main script for more information). predictions (:obj:`Tuple[np.ndarray, np.ndarray]`): The predictions of the model: two arrays containing the start logits and the end logits respectively. Its first dimension must match the number of elements of :obj:`features`. version_2_with_negative (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the underlying dataset contains examples with no answers. n_best_size (:obj:`int`, `optional`, defaults to 20): The total number of n-best predictions to generate when looking for an answer. max_answer_length (:obj:`int`, `optional`, defaults to 30): The maximum length of an answer that can be generated. This is needed because the start and end predictions are not conditioned on one another. null_score_diff_threshold (:obj:`float`, `optional`, defaults to 0): The threshold used to select the null answer: if the best answer has a score that is less than the score of the null answer minus this threshold, the null answer is selected for this example (note that the score of the null answer for an example giving several features is the minimum of the scores for the null answer on each feature: all features must be aligned on the fact they `want` to predict a null answer). Only useful when :obj:`version_2_with_negative` is :obj:`True`. output_dir (:obj:`str`, `optional`): If provided, the dictionaries of predictions, n_best predictions (with their scores and logits) and, if :obj:`version_2_with_negative=True`, the dictionary of the scores differences between best and null answers, are saved in `output_dir`. prefix (:obj:`str`, `optional`): If provided, the dictionaries mentioned above are saved with `prefix` added to their names. log_level (:obj:`int`, `optional`, defaults to ``logging.WARNING``): ``logging`` log level (e.g., ``logging.WARNING``)
11,749
import argparse import json import logging import math import os import random from pathlib import Path import datasets import numpy as np import torch from datasets import load_dataset from torch.utils.data import DataLoader from tqdm.auto import tqdm import evaluate import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from huggingface_hub import Repository from transformers import ( AdamW, DataCollatorWithPadding, EvalPrediction, SchedulerType, XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizerFast, default_data_collator, get_scheduler, ) from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry from transformers.utils.versions import require_version from utils_qa import postprocess_qa_predictions_with_beam_search The provided code snippet includes necessary dependencies for implementing the `save_prefixed_metrics` function. Write a Python function `def save_prefixed_metrics(results, output_dir, file_name: str = "all_results.json", metric_key_prefix: str = "eval")` to solve the following problem: Save results while prefixing metric names. Args: results: (:obj:`dict`): A dictionary of results. output_dir: (:obj:`str`): An output directory. file_name: (:obj:`str`, `optional`, defaults to :obj:`all_results.json`): An output file name. metric_key_prefix: (:obj:`str`, `optional`, defaults to :obj:`eval`): A metric name prefix. Here is the function: def save_prefixed_metrics(results, output_dir, file_name: str = "all_results.json", metric_key_prefix: str = "eval"): """ Save results while prefixing metric names. Args: results: (:obj:`dict`): A dictionary of results. output_dir: (:obj:`str`): An output directory. file_name: (:obj:`str`, `optional`, defaults to :obj:`all_results.json`): An output file name. metric_key_prefix: (:obj:`str`, `optional`, defaults to :obj:`eval`): A metric name prefix. """ # Prefix all keys with metric_key_prefix + '_' for key in list(results.keys()): if not key.startswith(f"{metric_key_prefix}_"): results[f"{metric_key_prefix}_{key}"] = results.pop(key) with open(os.path.join(output_dir, file_name), "w") as f: json.dump(results, f, indent=4)
Save results while prefixing metric names. Args: results: (:obj:`dict`): A dictionary of results. output_dir: (:obj:`str`): An output directory. file_name: (:obj:`str`, `optional`, defaults to :obj:`all_results.json`): An output file name. metric_key_prefix: (:obj:`str`, `optional`, defaults to :obj:`eval`): A metric name prefix.
11,750
import argparse import json import logging import math import os import random from pathlib import Path import datasets import numpy as np import torch from datasets import load_dataset from torch.utils.data import DataLoader from tqdm.auto import tqdm import evaluate import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from huggingface_hub import Repository from transformers import ( AdamW, DataCollatorWithPadding, EvalPrediction, SchedulerType, XLNetConfig, XLNetForQuestionAnswering, XLNetTokenizerFast, default_data_collator, get_scheduler, ) from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry from transformers.utils.versions import require_version from utils_qa import postprocess_qa_predictions_with_beam_search def parse_args(): parser = argparse.ArgumentParser(description="Finetune a transformers model on a Question Answering task") parser.add_argument( "--dataset_name", type=str, default=None, help="The name of the dataset to use (via the datasets library).", ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The configuration name of the dataset to use (via the datasets library).", ) parser.add_argument( "--train_file", type=str, default=None, help="A csv or a json file containing the training data." ) parser.add_argument( "--preprocessing_num_workers", type=int, default=1, help="A csv or a json file containing the training data." ) parser.add_argument("--do_predict", action="store_true", help="Eval the question answering model") parser.add_argument( "--validation_file", type=str, default=None, help="A csv or a json file containing the validation data." ) parser.add_argument( "--test_file", type=str, default=None, help="A csv or a json file containing the Prediction data." ) parser.add_argument( "--max_seq_length", type=int, default=384, help=( "The maximum total input sequence length after tokenization. Sequences longer than this will be truncated," " sequences shorter will be padded if `--pad_to_max_lengh` is passed." ), ) parser.add_argument( "--pad_to_max_length", action="store_true", help="If passed, pad all samples to `max_seq_length`. Otherwise, dynamic padding is used.", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=True, ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--doc_stride", type=int, default=128, help="When splitting up a long document into chunks how much stride to take between chunks.", ) parser.add_argument( "--n_best_size", type=int, default=20, help="The total number of n-best predictions to generate when looking for an answer.", ) parser.add_argument( "--null_score_diff_threshold", type=float, default=0.0, help=( "The threshold used to select the null answer: if the best answer has a score that is less than " "the score of the null answer minus this threshold, the null answer is selected for this example. " "Only useful when `version_2_with_negative=True`." ), ) parser.add_argument( "--version_2_with_negative", action="store_true", help="If true, some of the examples do not have an answer.", ) parser.add_argument( "--max_answer_length", type=int, default=30, help=( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ), ) parser.add_argument( "--max_train_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ), ) parser.add_argument( "--max_eval_samples", type=int, default=None, help=( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ), ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--max_predict_samples", type=int, default=None, help="For debugging purposes or quicker training, truncate the number of prediction examples to this", ) parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to load in all available experiment trackers from the environment and use them for logging.", ) args = parser.parse_args() # Sanity checks if ( args.dataset_name is None and args.train_file is None and args.validation_file is None and args.test_file is None ): raise ValueError("Need either a dataset name or a training/validation/test file.") else: if args.train_file is not None: extension = args.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if args.validation_file is not None: extension = args.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." if args.test_file is not None: extension = args.test_file.split(".")[-1] assert extension in ["csv", "json"], "`test_file` should be a csv or a json file." if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." return args
null
11,751
import logging import math import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional import datasets from datasets import load_dataset import evaluate import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForLanguageModeling, HfArgumentParser, Trainer, TrainingArguments, is_torch_tpu_available, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version def main(): def _mp_fn(index): # For xla_spawn (TPUs) main()
null
11,752
import logging import math import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional import datasets from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForPermutationLanguageModeling, HfArgumentParser, Trainer, TrainingArguments, XLNetConfig, XLNetLMHeadModel, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version def main(): def _mp_fn(index): # For xla_spawn (TPUs) main()
null
11,753
import logging import math import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional import datasets from datasets import load_dataset import evaluate import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_CAUSAL_LM_MAPPING, AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, is_torch_tpu_available, set_seed, ) from transformers.testing_utils import CaptureLogger from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_clm", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) if "validation" not in raw_datasets.keys(): raw_datasets["validation"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[:{data_args.validation_split_percentage}%]", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) raw_datasets["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[{data_args.validation_split_percentage}%:]", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: data_files = {} dataset_args = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = ( data_args.train_file.split(".")[-1] if data_args.train_file is not None else data_args.validation_file.split(".")[-1] ) if extension == "txt": extension = "text" dataset_args["keep_linebreaks"] = data_args.keep_linebreaks raw_datasets = load_dataset( extension, data_files=data_files, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, **dataset_args, ) # If no validation data is there, validation_split_percentage will be used to divide the dataset. if "validation" not in raw_datasets.keys(): raw_datasets["validation"] = load_dataset( extension, data_files=data_files, split=f"train[:{data_args.validation_split_percentage}%]", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, **dataset_args, ) raw_datasets["train"] = load_dataset( extension, data_files=data_files, split=f"train[{data_args.validation_split_percentage}%:]", cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, **dataset_args, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config_kwargs = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.config_overrides is not None: logger.info(f"Overriding config: {model_args.config_overrides}") config.update_from_string(model_args.config_overrides) logger.info(f"New config: {config}") tokenizer_kwargs = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs) elif model_args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if model_args.model_name_or_path: model = AutoModelForCausalLM.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) else: model = AutoModelForCausalLM.from_config(config) n_params = sum(dict((p.data_ptr(), p.numel()) for p in model.parameters()).values()) logger.info(f"Training new model from scratch - Total size={n_params/2**20:.2f}M params") model.resize_token_embeddings(len(tokenizer)) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: column_names = raw_datasets["train"].column_names else: column_names = raw_datasets["validation"].column_names text_column_name = "text" if "text" in column_names else column_names[0] # since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base") def tokenize_function(examples): with CaptureLogger(tok_logger) as cl: output = tokenizer(examples[text_column_name]) # clm input could be much much longer than block_size if "Token indices sequence length is longer than the" in cl.out: tok_logger.warning( "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits" " before being passed to the model." ) return output with training_args.main_process_first(desc="dataset map tokenization"): tokenized_datasets = raw_datasets.map( tokenize_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on dataset", ) if data_args.block_size is None: block_size = tokenizer.model_max_length if block_size > 1024: logger.warning( f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). " "Picking 1024 instead. You can change that default value by passing --block_size xxx." ) block_size = 1024 else: if data_args.block_size > tokenizer.model_max_length: logger.warning( f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model" f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}." ) block_size = min(data_args.block_size, tokenizer.model_max_length) # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can # customize this part to your needs. if total_length >= block_size: total_length = (total_length // block_size) * block_size # Split by chunks of max_len. result = { k: [t[i : i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } result["labels"] = result["input_ids"].copy() return result # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower # to preprocess. # # To speed up this part, we use multiprocessing. See the documentation of the map method for more information: # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map with training_args.main_process_first(desc="grouping texts together"): lm_datasets = tokenized_datasets.map( group_texts, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, desc=f"Grouping texts in chunks of {block_size}", ) if training_args.do_train: if "train" not in tokenized_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = lm_datasets["train"] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) if training_args.do_eval: if "validation" not in tokenized_datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = lm_datasets["validation"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) def preprocess_logits_for_metrics(logits, labels): if isinstance(logits, tuple): # Depending on the model and config, logits may contain extra tensors, # like past_key_values, but logits always come first logits = logits[0] return logits.argmax(dim=-1) metric = evaluate.load("accuracy") def compute_metrics(eval_preds): preds, labels = eval_preds # preds have the same shape as the labels, after the argmax(-1) has been calculated # by preprocess_logits_for_metrics but we need to shift the labels labels = labels[:, 1:].reshape(-1) preds = preds[:, :-1].reshape(-1) return metric.compute(predictions=preds, references=labels) # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=tokenizer, # Data collator will default to DataCollatorWithPadding, so we change it. data_collator=default_data_collator, compute_metrics=compute_metrics if training_args.do_eval and not is_torch_tpu_available() else None, preprocess_logits_for_metrics=preprocess_logits_for_metrics if training_args.do_eval and not is_torch_tpu_available() else None, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) try: perplexity = math.exp(metrics["eval_loss"]) except OverflowError: perplexity = float("inf") metrics["perplexity"] = perplexity trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-generation"} if data_args.dataset_name is not None: kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None: kwargs["dataset_args"] = data_args.dataset_config_name kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else: kwargs["dataset"] = data_args.dataset_name if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) def _mp_fn(index): # For xla_spawn (TPUs) main()
null
11,754
import argparse import json import logging import math import os import random from itertools import chain from pathlib import Path import datasets import torch from datasets import load_dataset from torch.utils.data import DataLoader from tqdm.auto import tqdm import transformers from accelerate import Accelerator, DistributedType from accelerate.logging import get_logger from accelerate.utils import set_seed from huggingface_hub import Repository from transformers import ( CONFIG_MAPPING, MODEL_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForLanguageModeling, SchedulerType, get_scheduler, ) from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry from transformers.utils.versions import require_version MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def parse_args(): parser = argparse.ArgumentParser(description="Finetune a transformers model on a Masked Language Modeling task") parser.add_argument( "--dataset_name", type=str, default=None, help="The name of the dataset to use (via the datasets library).", ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The configuration name of the dataset to use (via the datasets library).", ) parser.add_argument( "--train_file", type=str, default=None, help="A csv or a json file containing the training data." ) parser.add_argument( "--validation_file", type=str, default=None, help="A csv or a json file containing the validation data." ) parser.add_argument( "--validation_split_percentage", default=5, help="The percentage of the train set used as validation set in case there's no validation split", ) parser.add_argument( "--pad_to_max_length", action="store_true", help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=False, ) parser.add_argument( "--config_name", type=str, default=None, help="Pretrained config name or path if not the same as model_name", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--use_slow_tokenizer", action="store_true", help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--model_type", type=str, default=None, help="Model type to use if training from scratch.", choices=MODEL_TYPES, ) parser.add_argument( "--max_seq_length", type=int, default=None, help=( "The maximum total input sequence length after tokenization. Sequences longer than this will be truncated." ), ) parser.add_argument( "--line_by_line", type=bool, default=False, help="Whether distinct lines of text in the dataset are to be handled as distinct sequences.", ) parser.add_argument( "--preprocessing_num_workers", type=int, default=None, help="The number of processes to use for the preprocessing.", ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--mlm_probability", type=float, default=0.15, help="Ratio of tokens to mask for masked language modeling loss" ) parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.' "Only applicable when `--with_tracking` is passed." ), ) args = parser.parse_args() # Sanity checks if args.dataset_name is None and args.train_file is None and args.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if args.train_file is not None: extension = args.train_file.split(".")[-1] if extension not in ["csv", "json", "txt"]: raise ValueError("`train_file` should be a csv, json or txt file.") if args.validation_file is not None: extension = args.validation_file.split(".")[-1] if extension not in ["csv", "json", "txt"]: raise ValueError("`validation_file` should be a csv, json or txt file.") if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." return args
null
11,755
import argparse import json import logging import math import os import random from itertools import chain from pathlib import Path import datasets import torch from datasets import load_dataset from torch.utils.data import DataLoader from tqdm.auto import tqdm import transformers from accelerate import Accelerator, DistributedType from accelerate.logging import get_logger from accelerate.utils import set_seed from huggingface_hub import Repository from transformers import ( CONFIG_MAPPING, MODEL_MAPPING, AutoConfig, AutoModelForCausalLM, AutoTokenizer, SchedulerType, default_data_collator, get_scheduler, ) from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry from transformers.utils.versions import require_version MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def parse_args(): parser = argparse.ArgumentParser(description="Finetune a transformers model on a causal language modeling task") parser.add_argument( "--dataset_name", type=str, default=None, help="The name of the dataset to use (via the datasets library).", ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The configuration name of the dataset to use (via the datasets library).", ) parser.add_argument( "--train_file", type=str, default=None, help="A csv or a json file containing the training data." ) parser.add_argument( "--validation_file", type=str, default=None, help="A csv or a json file containing the validation data." ) parser.add_argument( "--validation_split_percentage", default=5, help="The percentage of the train set used as validation set in case there's no validation split", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=False, ) parser.add_argument( "--config_name", type=str, default=None, help="Pretrained config name or path if not the same as model_name", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--use_slow_tokenizer", action="store_true", help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--model_type", type=str, default=None, help="Model type to use if training from scratch.", choices=MODEL_TYPES, ) parser.add_argument( "--block_size", type=int, default=None, help=( "Optional input sequence length after tokenization. The training dataset will be truncated in block of" " this size for training. Default to the model max input length for single sentence inputs (take into" " account special tokens)." ), ) parser.add_argument( "--preprocessing_num_workers", type=int, default=None, help="The number of processes to use for the preprocessing.", ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--no_keep_linebreaks", action="store_true", help="Do not keep line breaks when using TXT files." ) parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.' "Only applicable when `--with_tracking` is passed." ), ) args = parser.parse_args() # Sanity checks if args.dataset_name is None and args.train_file is None and args.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if args.train_file is not None: extension = args.train_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, json or txt file." if args.validation_file is not None: extension = args.validation_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, json or txt file." if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." return args
null
11,757
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np from datasets import load_dataset import evaluate import transformers from transformers import ( AutoConfig, AutoModelForSeq2SeqLM, AutoTokenizer, DataCollatorForSeq2Seq, HfArgumentParser, M2M100Tokenizer, MBart50Tokenizer, MBart50TokenizerFast, MBartTokenizer, MBartTokenizerFast, Seq2SeqTrainer, Seq2SeqTrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_translation", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") if data_args.source_prefix is None and model_args.model_name_or_path in [ "t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b", ]: logger.warning( "You're running a t5 model but didn't provide a source prefix, which is expected, e.g. with " "`--source_prefix 'translate English to German: ' `" ) # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own JSON training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For translation, only JSON files are supported, with one field named "translation" containing two keys for the # source and target languages (unless you adapt what follows). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.test_file.split(".")[-1] raw_datasets = load_dataset( extension, data_files=data_files, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForSeq2SeqLM.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model.resize_token_embeddings(len(tokenizer)) # Set decoder_start_token_id if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)): if isinstance(tokenizer, MBartTokenizer): model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.target_lang] else: model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.target_lang) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") prefix = data_args.source_prefix if data_args.source_prefix is not None else "" # Preprocessing the datasets. # We need to tokenize inputs and targets. if training_args.do_train: column_names = raw_datasets["train"].column_names elif training_args.do_eval: column_names = raw_datasets["validation"].column_names elif training_args.do_predict: column_names = raw_datasets["test"].column_names else: logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.") return # For translation we set the codes of our source and target languages (only useful for mBART, the others will # ignore those attributes). if isinstance(tokenizer, tuple(MULTILINGUAL_TOKENIZERS)): assert data_args.target_lang is not None and data_args.source_lang is not None, ( f"{tokenizer.__class__.__name__} is a multilingual tokenizer which requires --source_lang and " "--target_lang arguments." ) tokenizer.src_lang = data_args.source_lang tokenizer.tgt_lang = data_args.target_lang # For multilingual translation models like mBART-50 and M2M100 we need to force the target language token # as the first generated token. We ask the user to explicitly provide this as --forced_bos_token argument. forced_bos_token_id = ( tokenizer.lang_code_to_id[data_args.forced_bos_token] if data_args.forced_bos_token is not None else None ) model.config.forced_bos_token_id = forced_bos_token_id # Get the language codes for input/target. source_lang = data_args.source_lang.split("_")[0] target_lang = data_args.target_lang.split("_")[0] # Temporarily set max_target_length for training. max_target_length = data_args.max_target_length padding = "max_length" if data_args.pad_to_max_length else False if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"): logger.warning( "label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for" f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory" ) def preprocess_function(examples): inputs = [ex[source_lang] for ex in examples["translation"]] targets = [ex[target_lang] for ex in examples["translation"]] inputs = [prefix + inp for inp in inputs] model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True) # Tokenize targets with the `text_target` keyword argument labels = tokenizer(text_target=targets, max_length=max_target_length, padding=padding, truncation=True) # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore # padding in the loss. if padding == "max_length" and data_args.ignore_pad_token_for_loss: labels["input_ids"] = [ [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] ] model_inputs["labels"] = labels["input_ids"] return model_inputs if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) with training_args.main_process_first(desc="train dataset map pre-processing"): train_dataset = train_dataset.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on train dataset", ) if training_args.do_eval: max_target_length = data_args.val_max_target_length if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = raw_datasets["validation"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) with training_args.main_process_first(desc="validation dataset map pre-processing"): eval_dataset = eval_dataset.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on validation dataset", ) if training_args.do_predict: max_target_length = data_args.val_max_target_length if "test" not in raw_datasets: raise ValueError("--do_predict requires a test dataset") predict_dataset = raw_datasets["test"] if data_args.max_predict_samples is not None: max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) predict_dataset = predict_dataset.select(range(max_predict_samples)) with training_args.main_process_first(desc="prediction dataset map pre-processing"): predict_dataset = predict_dataset.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on prediction dataset", ) # Data collator label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id if data_args.pad_to_max_length: data_collator = default_data_collator else: data_collator = DataCollatorForSeq2Seq( tokenizer, model=model, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=8 if training_args.fp16 else None, ) # Metric metric = evaluate.load("sacrebleu") def postprocess_text(preds, labels): preds = [pred.strip() for pred in preds] labels = [[label.strip()] for label in labels] return preds, labels def compute_metrics(eval_preds): preds, labels = eval_preds if isinstance(preds, tuple): preds = preds[0] decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True) if data_args.ignore_pad_token_for_loss: # Replace -100 in the labels as we can't decode them. labels = np.where(labels != -100, labels, tokenizer.pad_token_id) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) # Some simple post-processing decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) result = metric.compute(predictions=decoded_preds, references=decoded_labels) result = {"bleu": result["score"]} prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds] result["gen_len"] = np.mean(prediction_lens) result = {k: round(v, 4) for k, v in result.items()} return result # Initialize our Trainer trainer = Seq2SeqTrainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics if training_args.predict_with_generate else None, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation results = {} max_length = ( training_args.generation_max_length if training_args.generation_max_length is not None else data_args.val_max_target_length ) num_beams = data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate(max_length=max_length, num_beams=num_beams, metric_key_prefix="eval") max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) if training_args.do_predict: logger.info("*** Predict ***") predict_results = trainer.predict( predict_dataset, metric_key_prefix="predict", max_length=max_length, num_beams=num_beams ) metrics = predict_results.metrics max_predict_samples = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset) ) metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset)) trainer.log_metrics("predict", metrics) trainer.save_metrics("predict", metrics) if trainer.is_world_process_zero(): if training_args.predict_with_generate: predictions = tokenizer.batch_decode( predict_results.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True ) predictions = [pred.strip() for pred in predictions] output_prediction_file = os.path.join(training_args.output_dir, "generated_predictions.txt") with open(output_prediction_file, "w", encoding="utf-8") as writer: writer.write("\n".join(predictions)) kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "translation"} if data_args.dataset_name is not None: kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None: kwargs["dataset_args"] = data_args.dataset_config_name kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else: kwargs["dataset"] = data_args.dataset_name languages = [l for l in [data_args.source_lang, data_args.target_lang] if l is not None] if len(languages) > 0: kwargs["language"] = languages if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) return results def _mp_fn(index): # For xla_spawn (TPUs) main()
null
11,758
import argparse import json import logging import math import os import random from pathlib import Path import datasets import numpy as np import torch from datasets import load_dataset from torch.utils.data import DataLoader from tqdm.auto import tqdm import evaluate import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from huggingface_hub import Repository from transformers import ( CONFIG_MAPPING, MODEL_MAPPING, AutoConfig, AutoModelForSeq2SeqLM, AutoTokenizer, DataCollatorForSeq2Seq, MBartTokenizer, MBartTokenizerFast, SchedulerType, default_data_collator, get_scheduler, ) from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry from transformers.utils.versions import require_version MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def parse_args(): parser = argparse.ArgumentParser(description="Finetune a transformers model on a text classification task") parser.add_argument( "--dataset_name", type=str, default=None, help="The name of the dataset to use (via the datasets library).", ) parser.add_argument( "--predict_with_generate", type=bool, default=True, help="", ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The configuration name of the dataset to use (via the datasets library).", ) parser.add_argument( "--train_file", type=str, default=None, help="A csv or a json file containing the training data." ) parser.add_argument( "--num_beams", type=int, default=None, help=( "Number of beams to use for evaluation. This argument will be " "passed to ``model.generate``, which is used during ``evaluate`` and ``predict``." ), ) parser.add_argument( "--max_source_length", type=int, default=1024, help=( "The maximum total input sequence length after " "tokenization.Sequences longer than this will be truncated, sequences shorter will be padded." ), ) parser.add_argument( "--max_target_length", type=int, default=128, help=( "The maximum total sequence length for target text after " "tokenization. Sequences longer than this will be truncated, sequences shorter will be padded." "during ``evaluate`` and ``predict``." ), ) parser.add_argument( "--val_max_target_length", type=int, default=None, help=( "The maximum total sequence length for validation " "target text after tokenization.Sequences longer than this will be truncated, sequences shorter will be " "padded. Will default to `max_target_length`.This argument is also used to override the ``max_length`` " "param of ``model.generate``, which is used during ``evaluate`` and ``predict``." ), ) parser.add_argument( "--pad_to_max_length", type=bool, default=False, help=( "Whether to pad all samples to model maximum sentence " "length. If False, will pad the samples dynamically when batching to the maximum length in the batch. More" "efficient on GPU but very bad for TPU." ), ) parser.add_argument( "--validation_file", type=str, default=None, help="A csv or a json file containing the validation data." ) parser.add_argument( "--ignore_pad_token_for_loss", type=bool, default=True, help="Whether to ignore the tokens corresponding to padded labels in the loss computation or not.", ) parser.add_argument("--source_lang", type=str, default=None, help="Source language id for translation.") parser.add_argument("--target_lang", type=str, default=None, help="Target language id for translation.") parser.add_argument( "--source_prefix", type=str, default=None, help="A prefix to add before every source text (useful for T5 models).", ) parser.add_argument( "--preprocessing_num_workers", type=int, default=None, help="The number of processes to use for the preprocessing.", ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--max_length", type=int, default=128, help=( "The maximum total input sequence length after tokenization. Sequences longer than this will be truncated," " sequences shorter will be padded if `--pad_to_max_lengh` is passed." ), ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=False, ) parser.add_argument( "--config_name", type=str, default=None, help="Pretrained config name or path if not the same as model_name", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--use_slow_tokenizer", action="store_true", help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--model_type", type=str, default=None, help="Model type to use if training from scratch.", choices=MODEL_TYPES, ) parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.' "Only applicable when `--with_tracking` is passed." ), ) args = parser.parse_args() # Sanity checks if args.dataset_name is None and args.train_file is None and args.validation_file is None: raise ValueError("Need either a task name or a training/validation file.") if args.train_file is not None: extension = args.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if args.validation_file is not None: extension = args.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." return args
null
11,759
argparse import math import os from dataclasses import dataclass from pathlib import Path from typing import Dict, List, Optional, Union import datasets import torch from datasets import DatasetDict, concatenate_datasets, load_dataset from torch.utils.data.dataloader import DataLoader from tqdm.auto import tqdm import transformers from accelerate import Accelerator from accelerate.logging import get_logger from huggingface_hub import Repository from transformers import ( AdamW, SchedulerType, Wav2Vec2Config, Wav2Vec2FeatureExtractor, Wav2Vec2ForPreTraining, get_scheduler, is_wandb_available, set_seed, ) from transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices, _sample_negative_indices from transformers.utils import get_full_repo_name, send_example_telemetry def parse_args(): parser = argparse.ArgumentParser(description="Finetune a transformers model on a text classification task") parser.add_argument( "--dataset_name", type=str, default=None, help="The name of the dataset to use (via the datasets library).", ) parser.add_argument( "--dataset_config_names", nargs="+", type=str, required=True, help="The configuration names of the dataset to use (via the datasets library).", ) parser.add_argument( "--dataset_split_names", nargs="+", type=str, required=True, help="The names of the training data set splits to use (via the datasets library).", ) parser.add_argument( "--preprocessing_num_workers", type=int, default=None, help="The number of processes to use for the preprocessing.", ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--preprocessing_only", action="store_true", help="Only run the preprocessing script to be cached for future use", ) parser.add_argument( "--cache_dir", type=str, default=None, help="Where do you want to store the pretrained models downloaded from huggingface.co", ) parser.add_argument( "--validation_split_percentage", type=int, default=1, help="Percentage of training data that should be used for validation if no validation is present in dataset.", ) parser.add_argument( "--logging_steps", type=int, default=500, help="Number of steps between each logging", ) parser.add_argument( "--saving_steps", type=int, default=500, help="Number of steps between each logging", ) parser.add_argument( "--audio_column_name", type=str, default="audio", help="Column in the dataset that contains speech file path. Defaults to 'audio'", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=True, ) parser.add_argument( "--config_name", type=str, default=None, help="Pretrained config name or path if not the same as model_name", ) parser.add_argument( "--train_cache_file_name", type=str, default=None, help="Path to the train cached file name", ) parser.add_argument( "--validation_cache_file_name", type=str, default=None, help="Path to the validation cached file name", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--gradient_checkpointing", action="store_true", help="If True, use gradient checkpointing to save memory at the expense of slower backward pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=0, help="A seed for reproducible training.") parser.add_argument( "--max_gumbel_temperature", type=float, default=2.0, help="Maximum temperature for gumbel softmax.", ) parser.add_argument( "--min_gumbel_temperature", type=float, default=0.5, help="Minimum temperature for gumbel softmax.", ) parser.add_argument( "--gumbel_temperature_decay", type=float, default=0.999995, help="Decay of gumbel temperature during training." ) parser.add_argument( "--max_duration_in_seconds", type=float, default=5.0, help="Filter out audio files that are longer than `max_duration_in_seconds` seconds", ) parser.add_argument( "--min_duration_in_seconds", type=float, default=3.0, help="Filter out audio files that are shorter than `min_duration_in_seconds` seconds", ) parser.add_argument( "--pad_to_multiple_of", type=int, default=None, help=( "If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the" " use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta)." ), ) parser.add_argument( "--adam_beta1", type=float, default=0.9, help="Beta1 for AdamW optimizer", ) parser.add_argument( "--adam_beta2", type=float, default=0.999, help="Beta2 for AdamW optimizer", ) parser.add_argument( "--adam_epsilon", type=float, default=1e-8, help="Epsilon for AdamW optimizer", ) parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") args = parser.parse_args() if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) return args
null
11,760
import math import os from dataclasses import dataclass from pathlib import Path from typing import Dict, List, Optional, Union import datasets import torch from datasets import DatasetDict, concatenate_datasets, load_dataset from torch.utils.data.dataloader import DataLoader from tqdm.auto import tqdm import transformers from accelerate import Accelerator from accelerate.logging import get_logger from huggingface_hub import Repository from transformers import ( AdamW, SchedulerType, Wav2Vec2Config, Wav2Vec2FeatureExtractor, Wav2Vec2ForPreTraining, get_scheduler, is_wandb_available, set_seed, ) from transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices, _sample_negative_indices from transformers.utils import get_full_repo_name, send_example_telemetry The provided code snippet includes necessary dependencies for implementing the `multiply_grads` function. Write a Python function `def multiply_grads(params, c)` to solve the following problem: Multiplies grads by a constant *c*. Here is the function: def multiply_grads(params, c): """Multiplies grads by a constant *c*.""" for p in params: if p.grad is not None: if torch.is_tensor(c): c = c.to(p.grad.device) p.grad.data.mul_(c)
Multiplies grads by a constant *c*.
11,761
import math import os from dataclasses import dataclass from pathlib import Path from typing import Dict, List, Optional, Union import datasets import torch from datasets import DatasetDict, concatenate_datasets, load_dataset from torch.utils.data.dataloader import DataLoader from tqdm.auto import tqdm import transformers from accelerate import Accelerator from accelerate.logging import get_logger from huggingface_hub import Repository from transformers import ( AdamW, SchedulerType, Wav2Vec2Config, Wav2Vec2FeatureExtractor, Wav2Vec2ForPreTraining, get_scheduler, is_wandb_available, set_seed, ) from transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices, _sample_negative_indices from transformers.utils import get_full_repo_name, send_example_telemetry The provided code snippet includes necessary dependencies for implementing the `get_grad_norm` function. Write a Python function `def get_grad_norm(params, scale=1)` to solve the following problem: Compute grad norm given a gradient scale. Here is the function: def get_grad_norm(params, scale=1): """Compute grad norm given a gradient scale.""" total_norm = 0.0 for p in params: if p.grad is not None: param_norm = (p.grad.detach().data / scale).norm(2) total_norm += param_norm.item() ** 2 total_norm = total_norm**0.5 return total_norm
Compute grad norm given a gradient scale.
11,762
import json import math import os import random from pathlib import Path import datasets import numpy as np import torch from datasets import load_dataset from PIL import Image from torch.utils.data import DataLoader from torchvision import transforms from torchvision.transforms import functional from tqdm.auto import tqdm import evaluate import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from huggingface_hub import Repository, hf_hub_download from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForSemanticSegmentation, SchedulerType, default_data_collator, get_scheduler, ) from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry from transformers.utils.versions import require_version def pad_if_smaller(img, size, fill=0): min_size = min(img.size) if min_size < size: original_width, original_height = img.size pad_height = size - original_height if original_height < size else 0 pad_width = size - original_width if original_width < size else 0 img = functional.pad(img, (0, 0, pad_width, pad_height), fill=fill) return img
null
11,763
argparse import json import math import os import random from pathlib import Path import datasets import numpy as np import torch from datasets import load_dataset from PIL import Image from torch.utils.data import DataLoader from torchvision import transforms from torchvision.transforms import functional from tqdm.auto import tqdm import evaluate import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from huggingface_hub import Repository, hf_hub_download from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForSemanticSegmentation, SchedulerType, default_data_collator, get_scheduler, ) from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry from transformers.utils.versions import require_version def parse_args(): parser = argparse.ArgumentParser(description="Finetune a transformers model on a text classification task") parser.add_argument( "--model_name_or_path", type=str, help="Path to a pretrained model or model identifier from huggingface.co/models.", default="nvidia/mit-b0", ) parser.add_argument( "--dataset_name", type=str, help="Name of the dataset on the hub.", default="segments/sidewalk-semantic", ) parser.add_argument( "--reduce_labels", action="store_true", help="Whether or not to reduce all labels by 1 and replace background by 255.", ) parser.add_argument( "--train_val_split", type=float, default=0.15, help="Fraction of the dataset to be used for validation.", ) parser.add_argument( "--cache_dir", type=str, help="Path to a folder in which the model and dataset will be cached.", ) parser.add_argument( "--use_auth_token", action="store_true", help="Whether to use an authentication token to access the model repository.", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--adam_beta1", type=float, default=0.9, help="Beta1 for AdamW optimizer", ) parser.add_argument( "--adam_beta2", type=float, default=0.999, help="Beta2 for AdamW optimizer", ) parser.add_argument( "--adam_epsilon", type=float, default=1e-8, help="Epsilon for AdamW optimizer", ) parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="polynomial", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", required=False, action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.' "Only applicable when `--with_tracking` is passed." ), ) args = parser.parse_args() # Sanity checks if args.push_to_hub or args.with_tracking: if args.output_dir is None: raise ValueError( "Need an `output_dir` to create a repo when `--push_to_hub` or `with_tracking` is specified." ) if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) return args
null
11,764
import json import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from PIL import Image from torch import nn from torchvision import transforms from torchvision.transforms import functional import evaluate import transformers from huggingface_hub import hf_hub_download from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForSemanticSegmentation, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version def pad_if_smaller(img, size, fill=0): min_size = min(img.size) if min_size < size: original_width, original_height = img.size pad_height = size - original_height if original_height < size else 0 pad_width = size - original_width if original_width < size else 0 img = functional.pad(img, (0, 0, pad_width, pad_height), fill=fill) return img
null
11,768
import argparse import logging import numpy as np import torch from transformers import AutoModelForCausalLM, AutoTokenizer def set_seed(args): np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed)
null
11,769
import argparse import logging import numpy as np import torch from transformers import ( CTRLLMHeadModel, CTRLTokenizer, GPT2LMHeadModel, GPT2Tokenizer, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, TransfoXLLMHeadModel, TransfoXLTokenizer, XLMTokenizer, XLMWithLMHeadModel, XLNetLMHeadModel, XLNetTokenizer, ) def set_seed(args): np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed)
null
11,770
import argparse import logging import numpy as np import torch from transformers import ( CTRLLMHeadModel, CTRLTokenizer, GPT2LMHeadModel, GPT2Tokenizer, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, TransfoXLLMHeadModel, TransfoXLTokenizer, XLMTokenizer, XLMWithLMHeadModel, XLNetLMHeadModel, XLNetTokenizer, ) logger = logging.getLogger(__name__) def prepare_ctrl_input(args, _, tokenizer, prompt_text): if args.temperature > 0.7: logger.info("CTRL typically works better with lower temperatures (and lower top_k).") encoded_prompt = tokenizer.encode(prompt_text, add_special_tokens=False) if not any(encoded_prompt[0] == x for x in tokenizer.control_codes.values()): logger.info("WARNING! You are not starting your generation from a control code so you won't get good results") return prompt_text
null
11,771
import argparse import logging import numpy as np import torch from transformers import ( CTRLLMHeadModel, CTRLTokenizer, GPT2LMHeadModel, GPT2Tokenizer, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, TransfoXLLMHeadModel, TransfoXLTokenizer, XLMTokenizer, XLMWithLMHeadModel, XLNetLMHeadModel, XLNetTokenizer, ) def prepare_xlm_input(args, model, tokenizer, prompt_text): # kwargs = {"language": None, "mask_token_id": None} # Set the language use_lang_emb = hasattr(model.config, "use_lang_emb") and model.config.use_lang_emb if hasattr(model.config, "lang2id") and use_lang_emb: available_languages = model.config.lang2id.keys() if args.xlm_language in available_languages: language = args.xlm_language else: language = None while language not in available_languages: language = input("Using XLM. Select language in " + str(list(available_languages)) + " >>> ") model.config.lang_id = model.config.lang2id[language] # kwargs["language"] = tokenizer.lang2id[language] # TODO fix mask_token_id setup when configurations will be synchronized between models and tokenizers # XLM masked-language modeling (MLM) models need masked token # is_xlm_mlm = "mlm" in args.model_name_or_path # if is_xlm_mlm: # kwargs["mask_token_id"] = tokenizer.mask_token_id return prompt_text
null
11,772
import argparse import logging import numpy as np import torch from transformers import ( CTRLLMHeadModel, CTRLTokenizer, GPT2LMHeadModel, GPT2Tokenizer, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, TransfoXLLMHeadModel, TransfoXLTokenizer, XLMTokenizer, XLMWithLMHeadModel, XLNetLMHeadModel, XLNetTokenizer, ) PREFIX = """In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos>""" def prepare_xlnet_input(args, _, tokenizer, prompt_text): prefix = args.prefix if args.prefix else args.padding_text if args.padding_text else PREFIX prompt_text = prefix + prompt_text return prompt_text
null
11,773
import argparse import logging import numpy as np import torch from transformers import ( CTRLLMHeadModel, CTRLTokenizer, GPT2LMHeadModel, GPT2Tokenizer, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, TransfoXLLMHeadModel, TransfoXLTokenizer, XLMTokenizer, XLMWithLMHeadModel, XLNetLMHeadModel, XLNetTokenizer, ) PREFIX = """In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision and denounces one of the men as a horse thief. Although his father initially slaps him for making such an accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, begging for his blessing. <eod> </s> <eos>""" def prepare_transfoxl_input(args, _, tokenizer, prompt_text): prefix = args.prefix if args.prefix else args.padding_text if args.padding_text else PREFIX prompt_text = prefix + prompt_text return prompt_text
null
11,774
import argparse import logging import numpy as np import torch from transformers import ( CTRLLMHeadModel, CTRLTokenizer, GPT2LMHeadModel, GPT2Tokenizer, OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, TransfoXLLMHeadModel, TransfoXLTokenizer, XLMTokenizer, XLMWithLMHeadModel, XLNetLMHeadModel, XLNetTokenizer, ) MAX_LENGTH = int(10000) def adjust_length_to_model(length, max_sequence_length): if length < 0 and max_sequence_length > 0: length = max_sequence_length elif 0 < max_sequence_length < length: length = max_sequence_length # No generation bigger than model size elif length < 0: length = MAX_LENGTH # avoid infinite loop return length
null
11,775
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoFeatureExtractor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) mask = torch.stack([example["mask"] for example in examples]) return {"pixel_values": pixel_values, "bool_masked_pos": mask}
null
11,776
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTFeatureExtractor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) return {"pixel_values": pixel_values}
null
11,777
import argparse import json import logging import math import os import random from pathlib import Path import datasets import nltk import numpy as np import torch from datasets import load_dataset from torch.utils.data import DataLoader from tqdm.auto import tqdm import evaluate import transformers from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from filelock import FileLock from huggingface_hub import Repository from transformers import ( CONFIG_MAPPING, MODEL_MAPPING, AutoConfig, AutoModelForSeq2SeqLM, AutoTokenizer, DataCollatorForSeq2Seq, SchedulerType, get_scheduler, ) from transformers.utils import check_min_version, get_full_repo_name, is_offline_mode, send_example_telemetry from transformers.utils.versions import require_version MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def parse_args(): parser = argparse.ArgumentParser(description="Finetune a transformers model on a summarization task") parser.add_argument( "--dataset_name", type=str, default=None, help="The name of the dataset to use (via the datasets library).", ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The configuration name of the dataset to use (via the datasets library).", ) parser.add_argument( "--train_file", type=str, default=None, help="A csv or a json file containing the training data." ) parser.add_argument( "--validation_file", type=str, default=None, help="A csv or a json file containing the validation data." ) parser.add_argument( "--ignore_pad_token_for_loss", type=bool, default=True, help="Whether to ignore the tokens corresponding to padded labels in the loss computation or not.", ) parser.add_argument( "--max_source_length", type=int, default=1024, help=( "The maximum total input sequence length after " "tokenization.Sequences longer than this will be truncated, sequences shorter will be padded." ), ) parser.add_argument( "--source_prefix", type=str, default=None, help="A prefix to add before every source text (useful for T5 models).", ) parser.add_argument( "--preprocessing_num_workers", type=int, default=None, help="The number of processes to use for the preprocessing.", ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument( "--max_target_length", type=int, default=128, help=( "The maximum total sequence length for target text after " "tokenization. Sequences longer than this will be truncated, sequences shorter will be padded." "during ``evaluate`` and ``predict``." ), ) parser.add_argument( "--val_max_target_length", type=int, default=None, help=( "The maximum total sequence length for validation " "target text after tokenization.Sequences longer than this will be truncated, sequences shorter will be " "padded. Will default to `max_target_length`.This argument is also used to override the ``max_length`` " "param of ``model.generate``, which is used during ``evaluate`` and ``predict``." ), ) parser.add_argument( "--max_length", type=int, default=128, help=( "The maximum total input sequence length after tokenization. Sequences longer than this will be truncated," " sequences shorter will be padded if `--pad_to_max_lengh` is passed." ), ) parser.add_argument( "--num_beams", type=int, default=None, help=( "Number of beams to use for evaluation. This argument will be " "passed to ``model.generate``, which is used during ``evaluate`` and ``predict``." ), ) parser.add_argument( "--pad_to_max_length", action="store_true", help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=False, ) parser.add_argument( "--config_name", type=str, default=None, help="Pretrained config name or path if not the same as model_name", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--text_column", type=str, default=None, help="The name of the column in the datasets containing the full texts (for summarization).", ) parser.add_argument( "--summary_column", type=str, default=None, help="The name of the column in the datasets containing the summaries (for summarization).", ) parser.add_argument( "--use_slow_tokenizer", action="store_true", help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--model_type", type=str, default=None, help="Model type to use if training from scratch.", choices=MODEL_TYPES, ) parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.' "Only applicable when `--with_tracking` is passed." ), ) args = parser.parse_args() # Sanity checks if args.dataset_name is None and args.train_file is None and args.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if args.train_file is not None: extension = args.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if args.validation_file is not None: extension = args.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." return args
null
11,778
import logging import os import sys from dataclasses import dataclass, field from typing import Optional import datasets import nltk import numpy as np from datasets import load_dataset import evaluate import transformers from filelock import FileLock from transformers import ( AutoConfig, AutoModelForSeq2SeqLM, AutoTokenizer, DataCollatorForSeq2Seq, HfArgumentParser, MBart50Tokenizer, MBart50TokenizerFast, MBartTokenizer, MBartTokenizerFast, Seq2SeqTrainer, Seq2SeqTrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, is_offline_mode, send_example_telemetry from transformers.utils.versions import require_version def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_summarization", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") if data_args.source_prefix is None and model_args.model_name_or_path in [ "t5-small", "t5-base", "t5-large", "t5-3b", "t5-11b", ]: logger.warning( "You're running a t5 model but didn't provide a source prefix, which is the expected, e.g. with " "`--source_prefix 'summarize: ' `" ) # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files this script will use the first column for the full texts and the second column for the # summaries (unless you specify column names for this with the `text_column` and `summary_column` arguments). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.test_file.split(".")[-1] raw_datasets = load_dataset( extension, data_files=data_files, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForSeq2SeqLM.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model.resize_token_embeddings(len(tokenizer)) if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)): if isinstance(tokenizer, MBartTokenizer): model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.lang] else: model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.lang) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") if ( hasattr(model.config, "max_position_embeddings") and model.config.max_position_embeddings < data_args.max_source_length ): if model_args.resize_position_embeddings is None: logger.warning( "Increasing the model's number of position embedding vectors from" f" {model.config.max_position_embeddings} to {data_args.max_source_length}." ) model.resize_position_embeddings(data_args.max_source_length) elif model_args.resize_position_embeddings: model.resize_position_embeddings(data_args.max_source_length) else: raise ValueError( f"`--max_source_length` is set to {data_args.max_source_length}, but the model only has" f" {model.config.max_position_embeddings} position encodings. Consider either reducing" f" `--max_source_length` to {model.config.max_position_embeddings} or to automatically resize the" " model's position encodings by passing `--resize_position_embeddings`." ) prefix = data_args.source_prefix if data_args.source_prefix is not None else "" # Preprocessing the datasets. # We need to tokenize inputs and targets. if training_args.do_train: column_names = raw_datasets["train"].column_names elif training_args.do_eval: column_names = raw_datasets["validation"].column_names elif training_args.do_predict: column_names = raw_datasets["test"].column_names else: logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.") return if isinstance(tokenizer, tuple(MULTILINGUAL_TOKENIZERS)): assert ( data_args.lang is not None ), f"{tokenizer.__class__.__name__} is a multilingual tokenizer which requires --lang argument" tokenizer.src_lang = data_args.lang tokenizer.tgt_lang = data_args.lang # For multilingual translation models like mBART-50 and M2M100 we need to force the target language token # as the first generated token. We ask the user to explicitly provide this as --forced_bos_token argument. forced_bos_token_id = ( tokenizer.lang_code_to_id[data_args.forced_bos_token] if data_args.forced_bos_token is not None else None ) model.config.forced_bos_token_id = forced_bos_token_id # Get the column names for input/target. dataset_columns = summarization_name_mapping.get(data_args.dataset_name, None) if data_args.text_column is None: text_column = dataset_columns[0] if dataset_columns is not None else column_names[0] else: text_column = data_args.text_column if text_column not in column_names: raise ValueError( f"--text_column' value '{data_args.text_column}' needs to be one of: {', '.join(column_names)}" ) if data_args.summary_column is None: summary_column = dataset_columns[1] if dataset_columns is not None else column_names[1] else: summary_column = data_args.summary_column if summary_column not in column_names: raise ValueError( f"--summary_column' value '{data_args.summary_column}' needs to be one of: {', '.join(column_names)}" ) # Temporarily set max_target_length for training. max_target_length = data_args.max_target_length padding = "max_length" if data_args.pad_to_max_length else False if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"): logger.warning( "label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for" f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory" ) def preprocess_function(examples): # remove pairs where at least one record is None inputs, targets = [], [] for i in range(len(examples[text_column])): if examples[text_column][i] and examples[summary_column][i]: inputs.append(examples[text_column][i]) targets.append(examples[summary_column][i]) inputs = [prefix + inp for inp in inputs] model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True) # Tokenize targets with the `text_target` keyword argument labels = tokenizer(text_target=targets, max_length=max_target_length, padding=padding, truncation=True) # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore # padding in the loss. if padding == "max_length" and data_args.ignore_pad_token_for_loss: labels["input_ids"] = [ [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] ] model_inputs["labels"] = labels["input_ids"] return model_inputs if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) with training_args.main_process_first(desc="train dataset map pre-processing"): train_dataset = train_dataset.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on train dataset", ) if training_args.do_eval: max_target_length = data_args.val_max_target_length if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = raw_datasets["validation"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) with training_args.main_process_first(desc="validation dataset map pre-processing"): eval_dataset = eval_dataset.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on validation dataset", ) if training_args.do_predict: max_target_length = data_args.val_max_target_length if "test" not in raw_datasets: raise ValueError("--do_predict requires a test dataset") predict_dataset = raw_datasets["test"] if data_args.max_predict_samples is not None: max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) predict_dataset = predict_dataset.select(range(max_predict_samples)) with training_args.main_process_first(desc="prediction dataset map pre-processing"): predict_dataset = predict_dataset.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on prediction dataset", ) # Data collator label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id data_collator = DataCollatorForSeq2Seq( tokenizer, model=model, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=8 if training_args.fp16 else None, ) # Metric metric = evaluate.load("rouge") def postprocess_text(preds, labels): preds = [pred.strip() for pred in preds] labels = [label.strip() for label in labels] # rougeLSum expects newline after each sentence preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in preds] labels = ["\n".join(nltk.sent_tokenize(label)) for label in labels] return preds, labels def compute_metrics(eval_preds): preds, labels = eval_preds if isinstance(preds, tuple): preds = preds[0] decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True) if data_args.ignore_pad_token_for_loss: # Replace -100 in the labels as we can't decode them. labels = np.where(labels != -100, labels, tokenizer.pad_token_id) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) # Some simple post-processing decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True) result = {k: round(v * 100, 4) for k, v in result.items()} prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds] result["gen_len"] = np.mean(prediction_lens) return result # Initialize our Trainer trainer = Seq2SeqTrainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics if training_args.predict_with_generate else None, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation results = {} max_length = ( training_args.generation_max_length if training_args.generation_max_length is not None else data_args.val_max_target_length ) num_beams = data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate(max_length=max_length, num_beams=num_beams, metric_key_prefix="eval") max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) if training_args.do_predict: logger.info("*** Predict ***") predict_results = trainer.predict( predict_dataset, metric_key_prefix="predict", max_length=max_length, num_beams=num_beams ) metrics = predict_results.metrics max_predict_samples = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset) ) metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset)) trainer.log_metrics("predict", metrics) trainer.save_metrics("predict", metrics) if trainer.is_world_process_zero(): if training_args.predict_with_generate: predictions = tokenizer.batch_decode( predict_results.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True ) predictions = [pred.strip() for pred in predictions] output_prediction_file = os.path.join(training_args.output_dir, "generated_predictions.txt") with open(output_prediction_file, "w") as writer: writer.write("\n".join(predictions)) kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "summarization"} if data_args.dataset_name is not None: kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None: kwargs["dataset_args"] = data_args.dataset_config_name kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else: kwargs["dataset"] = data_args.dataset_name if data_args.lang is not None: kwargs["language"] = data_args.lang if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) return results def _mp_fn(index): # For xla_spawn (TPUs) main()
null
11,779
import logging import math import os import random import sys import time from dataclasses import dataclass, field from pathlib import Path from typing import Any, Callable, Dict, Optional, Tuple import datasets import numpy as np from datasets import load_dataset from tqdm import tqdm import evaluate import jax import jax.numpy as jnp import optax import transformers from flax import struct, traverse_util from flax.jax_utils import pad_shard_unpad, replicate, unreplicate from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard from huggingface_hub import Repository from transformers import ( AutoConfig, AutoTokenizer, FlaxAutoModelForSequenceClassification, HfArgumentParser, PretrainedConfig, TrainingArguments, is_tensorboard_available, ) from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry The provided code snippet includes necessary dependencies for implementing the `create_train_state` function. Write a Python function `def create_train_state( model: FlaxAutoModelForSequenceClassification, learning_rate_fn: Callable[[int], float], is_regression: bool, num_labels: int, weight_decay: float, ) -> train_state.TrainState` to solve the following problem: Create initial training state. Here is the function: def create_train_state( model: FlaxAutoModelForSequenceClassification, learning_rate_fn: Callable[[int], float], is_regression: bool, num_labels: int, weight_decay: float, ) -> train_state.TrainState: """Create initial training state.""" class TrainState(train_state.TrainState): """Train state with an Optax optimizer. The two functions below differ depending on whether the task is classification or regression. Args: logits_fn: Applied to last layer to obtain the logits. loss_fn: Function to compute the loss. """ logits_fn: Callable = struct.field(pytree_node=False) loss_fn: Callable = struct.field(pytree_node=False) # We use Optax's "masking" functionality to not apply weight decay # to bias and LayerNorm scale parameters. decay_mask_fn returns a # mask boolean with the same structure as the parameters. # The mask is True for parameters that should be decayed. def decay_mask_fn(params): flat_params = traverse_util.flatten_dict(params) # find out all LayerNorm parameters layer_norm_candidates = ["layernorm", "layer_norm", "ln"] layer_norm_named_params = set( [ layer[-2:] for layer_norm_name in layer_norm_candidates for layer in flat_params.keys() if layer_norm_name in "".join(layer).lower() ] ) flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_named_params) for path in flat_params} return traverse_util.unflatten_dict(flat_mask) tx = optax.adamw( learning_rate=learning_rate_fn, b1=0.9, b2=0.999, eps=1e-6, weight_decay=weight_decay, mask=decay_mask_fn ) if is_regression: def mse_loss(logits, labels): return jnp.mean((logits[..., 0] - labels) ** 2) return TrainState.create( apply_fn=model.__call__, params=model.params, tx=tx, logits_fn=lambda logits: logits[..., 0], loss_fn=mse_loss, ) else: # Classification. def cross_entropy_loss(logits, labels): xentropy = optax.softmax_cross_entropy(logits, onehot(labels, num_classes=num_labels)) return jnp.mean(xentropy) return TrainState.create( apply_fn=model.__call__, params=model.params, tx=tx, logits_fn=lambda logits: logits.argmax(-1), loss_fn=cross_entropy_loss, )
Create initial training state.
11,780
import logging import math import os import random import sys import time from dataclasses import dataclass, field from pathlib import Path from typing import Any, Callable, Dict, Optional, Tuple import datasets import numpy as np from datasets import load_dataset from tqdm import tqdm import evaluate import jax import jax.numpy as jnp import optax import transformers from flax import struct, traverse_util from flax.jax_utils import pad_shard_unpad, replicate, unreplicate from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard from huggingface_hub import Repository from transformers import ( AutoConfig, AutoTokenizer, FlaxAutoModelForSequenceClassification, HfArgumentParser, PretrainedConfig, TrainingArguments, is_tensorboard_available, ) from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry The provided code snippet includes necessary dependencies for implementing the `create_learning_rate_fn` function. Write a Python function `def create_learning_rate_fn( train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float ) -> Callable[[int], jnp.array]` to solve the following problem: Returns a linear warmup, linear_decay learning rate function. Here is the function: def create_learning_rate_fn( train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float ) -> Callable[[int], jnp.array]: """Returns a linear warmup, linear_decay learning rate function.""" steps_per_epoch = train_ds_size // train_batch_size num_train_steps = steps_per_epoch * num_train_epochs warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps) decay_fn = optax.linear_schedule( init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps ) schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps]) return schedule_fn
Returns a linear warmup, linear_decay learning rate function.
11,781
import logging import math import os import random import sys import time from dataclasses import dataclass, field from pathlib import Path from typing import Any, Callable, Dict, Optional, Tuple import datasets import numpy as np from datasets import load_dataset from tqdm import tqdm import evaluate import jax import jax.numpy as jnp import optax import transformers from flax import struct, traverse_util from flax.jax_utils import pad_shard_unpad, replicate, unreplicate from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard from huggingface_hub import Repository from transformers import ( AutoConfig, AutoTokenizer, FlaxAutoModelForSequenceClassification, HfArgumentParser, PretrainedConfig, TrainingArguments, is_tensorboard_available, ) from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry Dataset = datasets.arrow_dataset.Dataset PRNGKey = Any The provided code snippet includes necessary dependencies for implementing the `glue_train_data_collator` function. Write a Python function `def glue_train_data_collator(rng: PRNGKey, dataset: Dataset, batch_size: int)` to solve the following problem: Returns shuffled batches of size `batch_size` from truncated `train dataset`, sharded over all local devices. Here is the function: def glue_train_data_collator(rng: PRNGKey, dataset: Dataset, batch_size: int): """Returns shuffled batches of size `batch_size` from truncated `train dataset`, sharded over all local devices.""" steps_per_epoch = len(dataset) // batch_size perms = jax.random.permutation(rng, len(dataset)) perms = perms[: steps_per_epoch * batch_size] # Skip incomplete batch. perms = perms.reshape((steps_per_epoch, batch_size)) for perm in perms: batch = dataset[perm] batch = {k: np.array(v) for k, v in batch.items()} batch = shard(batch) yield batch
Returns shuffled batches of size `batch_size` from truncated `train dataset`, sharded over all local devices.
11,782
import logging import math import os import random import sys import time from dataclasses import dataclass, field from pathlib import Path from typing import Any, Callable, Dict, Optional, Tuple import datasets import numpy as np from datasets import load_dataset from tqdm import tqdm import evaluate import jax import jax.numpy as jnp import optax import transformers from flax import struct, traverse_util from flax.jax_utils import pad_shard_unpad, replicate, unreplicate from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard from huggingface_hub import Repository from transformers import ( AutoConfig, AutoTokenizer, FlaxAutoModelForSequenceClassification, HfArgumentParser, PretrainedConfig, TrainingArguments, is_tensorboard_available, ) from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry Dataset = datasets.arrow_dataset.Dataset The provided code snippet includes necessary dependencies for implementing the `glue_eval_data_collator` function. Write a Python function `def glue_eval_data_collator(dataset: Dataset, batch_size: int)` to solve the following problem: Returns batches of size `batch_size` from `eval dataset`. Sharding handled by `pad_shard_unpad` in the eval loop. Here is the function: def glue_eval_data_collator(dataset: Dataset, batch_size: int): """Returns batches of size `batch_size` from `eval dataset`. Sharding handled by `pad_shard_unpad` in the eval loop.""" batch_idx = np.arange(len(dataset)) steps_per_epoch = math.ceil(len(dataset) / batch_size) batch_idx = np.array_split(batch_idx, steps_per_epoch) for idx in batch_idx: batch = dataset[idx] batch = {k: np.array(v) for k, v in batch.items()} yield batch
Returns batches of size `batch_size` from `eval dataset`. Sharding handled by `pad_shard_unpad` in the eval loop.
11,783
import json import logging import os import sys import time from dataclasses import asdict, dataclass, field from enum import Enum from functools import partial from pathlib import Path from typing import Callable, Optional import datasets import nltk import numpy as np from datasets import Dataset, load_dataset from PIL import Image from tqdm import tqdm import evaluate import jax import jax.numpy as jnp import optax import transformers from filelock import FileLock from flax import jax_utils, traverse_util from flax.jax_utils import unreplicate from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key from huggingface_hub import Repository from transformers import ( AutoFeatureExtractor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser, is_tensorboard_available, ) from transformers.utils import get_full_repo_name, is_offline_mode, send_example_telemetry The provided code snippet includes necessary dependencies for implementing the `shift_tokens_right` function. Write a Python function `def shift_tokens_right(input_ids: np.ndarray, pad_token_id: int, decoder_start_token_id: int) -> np.ndarray` to solve the following problem: Shift input ids one token to the right. Here is the function: def shift_tokens_right(input_ids: np.ndarray, pad_token_id: int, decoder_start_token_id: int) -> np.ndarray: """ Shift input ids one token to the right. """ shifted_input_ids = np.zeros_like(input_ids) shifted_input_ids[:, 1:] = input_ids[:, :-1] shifted_input_ids[:, 0] = decoder_start_token_id shifted_input_ids = np.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids) return shifted_input_ids
Shift input ids one token to the right.
11,784
import json import logging import os import sys import time from dataclasses import asdict, dataclass, field from enum import Enum from functools import partial from pathlib import Path from typing import Callable, Optional import datasets import nltk import numpy as np from datasets import Dataset, load_dataset from PIL import Image from tqdm import tqdm import evaluate import jax import jax.numpy as jnp import optax import transformers from filelock import FileLock from flax import jax_utils, traverse_util from flax.jax_utils import unreplicate from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key from huggingface_hub import Repository from transformers import ( AutoFeatureExtractor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser, is_tensorboard_available, ) from transformers.utils import get_full_repo_name, is_offline_mode, send_example_telemetry The provided code snippet includes necessary dependencies for implementing the `data_loader` function. Write a Python function `def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool = False)` to solve the following problem: Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices. Shuffle batches if `shuffle` is `True`. Here is the function: def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool = False): """ Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices. Shuffle batches if `shuffle` is `True`. """ steps = len(dataset) // batch_size # Skip incomplete batch. # We use `numpy.ndarray` to interact with `datasets.Dataset`, since using `jax.numpy.array` to index into a # dataset is significantly slow. Using JAX array at the 1st place is only to keep JAX's PRNGs generation # mechanism, which works differently from NumPy/SciPy. if shuffle: batch_idx = jax.random.permutation(rng, len(dataset)) batch_idx = np.asarray(batch_idx) else: batch_idx = np.arange(len(dataset)) for idx in range(steps): start_idx = batch_size * idx end_idx = batch_size * (idx + 1) selected_indices = batch_idx[start_idx:end_idx] batch = dataset[selected_indices] batch = shard(batch) yield batch
Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices. Shuffle batches if `shuffle` is `True`.
11,785
import json import logging import os import sys import time from dataclasses import asdict, dataclass, field from enum import Enum from functools import partial from pathlib import Path from typing import Callable, Optional import datasets import nltk import numpy as np from datasets import Dataset, load_dataset from PIL import Image from tqdm import tqdm import evaluate import jax import jax.numpy as jnp import optax import transformers from filelock import FileLock from flax import jax_utils, traverse_util from flax.jax_utils import unreplicate from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key from huggingface_hub import Repository from transformers import ( AutoFeatureExtractor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser, is_tensorboard_available, ) from transformers.utils import get_full_repo_name, is_offline_mode, send_example_telemetry def write_metric(summary_writer, metrics, train_time, step, metric_key_prefix="train"): if train_time: summary_writer.scalar("train_time", train_time, step) metrics = get_metrics(metrics) for key, vals in metrics.items(): tag = f"{metric_key_prefix}_{key}" for i, val in enumerate(vals): summary_writer.scalar(tag, val, step - len(vals) + i + 1) else: for metric_name, value in metrics.items(): summary_writer.scalar(f"{metric_key_prefix}_{metric_name}", value, step)
null
11,786
import json import logging import os import sys import time from dataclasses import asdict, dataclass, field from enum import Enum from functools import partial from pathlib import Path from typing import Callable, Optional import datasets import nltk import numpy as np from datasets import Dataset, load_dataset from PIL import Image from tqdm import tqdm import evaluate import jax import jax.numpy as jnp import optax import transformers from filelock import FileLock from flax import jax_utils, traverse_util from flax.jax_utils import unreplicate from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key from huggingface_hub import Repository from transformers import ( AutoFeatureExtractor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser, is_tensorboard_available, ) from transformers.utils import get_full_repo_name, is_offline_mode, send_example_telemetry The provided code snippet includes necessary dependencies for implementing the `create_learning_rate_fn` function. Write a Python function `def create_learning_rate_fn( train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float ) -> Callable[[int], jnp.array]` to solve the following problem: Returns a linear warmup, linear_decay learning rate function. Here is the function: def create_learning_rate_fn( train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float ) -> Callable[[int], jnp.array]: """Returns a linear warmup, linear_decay learning rate function.""" steps_per_epoch = train_ds_size // train_batch_size num_train_steps = steps_per_epoch * num_train_epochs warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps) decay_fn = optax.linear_schedule( init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps ) schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps]) return schedule_fn
Returns a linear warmup, linear_decay learning rate function.
11,787
import logging import math import os import random import sys import time from dataclasses import asdict, dataclass, field from enum import Enum from itertools import chain from pathlib import Path from typing import Any, Callable, Dict, Optional, Tuple import datasets import numpy as np from datasets import ClassLabel, load_dataset from tqdm import tqdm import evaluate import jax import jax.numpy as jnp import optax import transformers from flax import struct, traverse_util from flax.jax_utils import pad_shard_unpad, replicate, unreplicate from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard from huggingface_hub import Repository from transformers import ( AutoConfig, AutoTokenizer, FlaxAutoModelForTokenClassification, HfArgumentParser, is_tensorboard_available, ) from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry from transformers.utils.versions import require_version class TrainingArguments: output_dir: str = field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."}, ) overwrite_output_dir: bool = field( default=False, metadata={ "help": ( "Overwrite the content of the output directory. " "Use this to continue training if output_dir points to a checkpoint directory." ) }, ) do_train: bool = field(default=False, metadata={"help": "Whether to run training."}) do_eval: bool = field(default=False, metadata={"help": "Whether to run eval on the dev set."}) per_device_train_batch_size: int = field( default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."} ) per_device_eval_batch_size: int = field( default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."} ) learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for AdamW."}) weight_decay: float = field(default=0.0, metadata={"help": "Weight decay for AdamW if we apply some."}) adam_beta1: float = field(default=0.9, metadata={"help": "Beta1 for AdamW optimizer"}) adam_beta2: float = field(default=0.999, metadata={"help": "Beta2 for AdamW optimizer"}) adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."}) adafactor: bool = field(default=False, metadata={"help": "Whether or not to replace AdamW by Adafactor."}) num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."}) warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."}) logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."}) save_steps: int = field(default=500, metadata={"help": "Save checkpoint every X updates steps."}) eval_steps: int = field(default=None, metadata={"help": "Run an evaluation every X steps."}) seed: int = field(default=42, metadata={"help": "Random seed that will be set at the beginning of training."}) push_to_hub: bool = field( default=False, metadata={"help": "Whether or not to upload the trained model to the model hub after training."} ) hub_model_id: str = field( default=None, metadata={"help": "The name of the repository to keep in sync with the local `output_dir`."} ) hub_token: str = field(default=None, metadata={"help": "The token to use to push to the Model Hub."}) def __post_init__(self): if self.output_dir is not None: self.output_dir = os.path.expanduser(self.output_dir) def to_dict(self): """ Serializes this instance while replace `Enum` by their values (for JSON serialization support). It obfuscates the token values by removing their value. """ d = asdict(self) for k, v in d.items(): if isinstance(v, Enum): d[k] = v.value if isinstance(v, list) and len(v) > 0 and isinstance(v[0], Enum): d[k] = [x.value for x in v] if k.endswith("_token"): d[k] = f"<{k.upper()}>" return d The provided code snippet includes necessary dependencies for implementing the `create_train_state` function. Write a Python function `def create_train_state( model: FlaxAutoModelForTokenClassification, learning_rate_fn: Callable[[int], float], num_labels: int, training_args: TrainingArguments, ) -> train_state.TrainState` to solve the following problem: Create initial training state. Here is the function: def create_train_state( model: FlaxAutoModelForTokenClassification, learning_rate_fn: Callable[[int], float], num_labels: int, training_args: TrainingArguments, ) -> train_state.TrainState: """Create initial training state.""" class TrainState(train_state.TrainState): """Train state with an Optax optimizer. The two functions below differ depending on whether the task is classification or regression. Args: logits_fn: Applied to last layer to obtain the logits. loss_fn: Function to compute the loss. """ logits_fn: Callable = struct.field(pytree_node=False) loss_fn: Callable = struct.field(pytree_node=False) # We use Optax's "masking" functionality to not apply weight decay # to bias and LayerNorm scale parameters. decay_mask_fn returns a # mask boolean with the same structure as the parameters. # The mask is True for parameters that should be decayed. def decay_mask_fn(params): flat_params = traverse_util.flatten_dict(params) # find out all LayerNorm parameters layer_norm_candidates = ["layernorm", "layer_norm", "ln"] layer_norm_named_params = set( [ layer[-2:] for layer_norm_name in layer_norm_candidates for layer in flat_params.keys() if layer_norm_name in "".join(layer).lower() ] ) flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_named_params) for path in flat_params} return traverse_util.unflatten_dict(flat_mask) tx = optax.adamw( learning_rate=learning_rate_fn, b1=training_args.adam_beta1, b2=training_args.adam_beta2, eps=training_args.adam_epsilon, weight_decay=training_args.weight_decay, mask=decay_mask_fn, ) def cross_entropy_loss(logits, labels): xentropy = optax.softmax_cross_entropy(logits, onehot(labels, num_classes=num_labels)) return jnp.mean(xentropy) return TrainState.create( apply_fn=model.__call__, params=model.params, tx=tx, logits_fn=lambda logits: logits.argmax(-1), loss_fn=cross_entropy_loss, )
Create initial training state.
11,788
import logging import math import os import random import sys import time from dataclasses import asdict, dataclass, field from enum import Enum from itertools import chain from pathlib import Path from typing import Any, Callable, Dict, Optional, Tuple import datasets import numpy as np from datasets import ClassLabel, load_dataset from tqdm import tqdm import evaluate import jax import jax.numpy as jnp import optax import transformers from flax import struct, traverse_util from flax.jax_utils import pad_shard_unpad, replicate, unreplicate from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard from huggingface_hub import Repository from transformers import ( AutoConfig, AutoTokenizer, FlaxAutoModelForTokenClassification, HfArgumentParser, is_tensorboard_available, ) from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry from transformers.utils.versions import require_version The provided code snippet includes necessary dependencies for implementing the `create_learning_rate_fn` function. Write a Python function `def create_learning_rate_fn( train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float ) -> Callable[[int], jnp.array]` to solve the following problem: Returns a linear warmup, linear_decay learning rate function. Here is the function: def create_learning_rate_fn( train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float ) -> Callable[[int], jnp.array]: """Returns a linear warmup, linear_decay learning rate function.""" steps_per_epoch = train_ds_size // train_batch_size num_train_steps = steps_per_epoch * num_train_epochs warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps) decay_fn = optax.linear_schedule( init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps ) schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps]) return schedule_fn
Returns a linear warmup, linear_decay learning rate function.
11,789
import logging import math import os import random import sys import time from dataclasses import asdict, dataclass, field from enum import Enum from itertools import chain from pathlib import Path from typing import Any, Callable, Dict, Optional, Tuple import datasets import numpy as np from datasets import ClassLabel, load_dataset from tqdm import tqdm import evaluate import jax import jax.numpy as jnp import optax import transformers from flax import struct, traverse_util from flax.jax_utils import pad_shard_unpad, replicate, unreplicate from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard from huggingface_hub import Repository from transformers import ( AutoConfig, AutoTokenizer, FlaxAutoModelForTokenClassification, HfArgumentParser, is_tensorboard_available, ) from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry from transformers.utils.versions import require_version Dataset = datasets.arrow_dataset.Dataset PRNGKey = Any The provided code snippet includes necessary dependencies for implementing the `train_data_collator` function. Write a Python function `def train_data_collator(rng: PRNGKey, dataset: Dataset, batch_size: int)` to solve the following problem: Returns shuffled batches of size `batch_size` from truncated `train dataset`, sharded over all local devices. Here is the function: def train_data_collator(rng: PRNGKey, dataset: Dataset, batch_size: int): """Returns shuffled batches of size `batch_size` from truncated `train dataset`, sharded over all local devices.""" steps_per_epoch = len(dataset) // batch_size perms = jax.random.permutation(rng, len(dataset)) perms = perms[: steps_per_epoch * batch_size] # Skip incomplete batch. perms = perms.reshape((steps_per_epoch, batch_size)) for perm in perms: batch = dataset[perm] batch = {k: np.array(v) for k, v in batch.items()} batch = shard(batch) yield batch
Returns shuffled batches of size `batch_size` from truncated `train dataset`, sharded over all local devices.
11,790
import logging import math import os import random import sys import time from dataclasses import asdict, dataclass, field from enum import Enum from itertools import chain from pathlib import Path from typing import Any, Callable, Dict, Optional, Tuple import datasets import numpy as np from datasets import ClassLabel, load_dataset from tqdm import tqdm import evaluate import jax import jax.numpy as jnp import optax import transformers from flax import struct, traverse_util from flax.jax_utils import pad_shard_unpad, replicate, unreplicate from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard from huggingface_hub import Repository from transformers import ( AutoConfig, AutoTokenizer, FlaxAutoModelForTokenClassification, HfArgumentParser, is_tensorboard_available, ) from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry from transformers.utils.versions import require_version Dataset = datasets.arrow_dataset.Dataset The provided code snippet includes necessary dependencies for implementing the `eval_data_collator` function. Write a Python function `def eval_data_collator(dataset: Dataset, batch_size: int)` to solve the following problem: Returns batches of size `batch_size` from `eval dataset`. Sharding handled by `pad_shard_unpad` in the eval loop. Here is the function: def eval_data_collator(dataset: Dataset, batch_size: int): """Returns batches of size `batch_size` from `eval dataset`. Sharding handled by `pad_shard_unpad` in the eval loop.""" batch_idx = np.arange(len(dataset)) steps_per_epoch = math.ceil(len(dataset) / batch_size) batch_idx = np.array_split(batch_idx, steps_per_epoch) for idx in batch_idx: batch = dataset[idx] batch = {k: np.array(v) for k, v in batch.items()} yield batch
Returns batches of size `batch_size` from `eval dataset`. Sharding handled by `pad_shard_unpad` in the eval loop.
11,791
import json import logging import math import os import random import sys import time from dataclasses import asdict, dataclass, field from enum import Enum from pathlib import Path from typing import Any, Callable, Dict, Optional, Tuple import datasets import numpy as np from datasets import load_dataset from tqdm import tqdm import evaluate import jax import jax.numpy as jnp import optax import transformers from flax import struct, traverse_util from flax.jax_utils import pad_shard_unpad, replicate, unreplicate from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard from huggingface_hub import Repository from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, FlaxAutoModelForQuestionAnswering, HfArgumentParser, PreTrainedTokenizerFast, is_tensorboard_available, ) from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry from utils_qa import postprocess_qa_predictions class TrainingArguments: output_dir: str = field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."}, ) overwrite_output_dir: bool = field( default=False, metadata={ "help": ( "Overwrite the content of the output directory. " "Use this to continue training if output_dir points to a checkpoint directory." ) }, ) do_train: bool = field(default=False, metadata={"help": "Whether to run training."}) do_eval: bool = field(default=False, metadata={"help": "Whether to run eval on the dev set."}) do_predict: bool = field(default=False, metadata={"help": "Whether to run predictions on the test set."}) per_device_train_batch_size: int = field( default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."} ) per_device_eval_batch_size: int = field( default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."} ) learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for AdamW."}) weight_decay: float = field(default=0.0, metadata={"help": "Weight decay for AdamW if we apply some."}) adam_beta1: float = field(default=0.9, metadata={"help": "Beta1 for AdamW optimizer"}) adam_beta2: float = field(default=0.999, metadata={"help": "Beta2 for AdamW optimizer"}) adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."}) adafactor: bool = field(default=False, metadata={"help": "Whether or not to replace AdamW by Adafactor."}) num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."}) warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."}) logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."}) save_steps: int = field(default=500, metadata={"help": "Save checkpoint every X updates steps."}) eval_steps: int = field(default=None, metadata={"help": "Run an evaluation every X steps."}) seed: int = field(default=42, metadata={"help": "Random seed that will be set at the beginning of training."}) push_to_hub: bool = field( default=False, metadata={"help": "Whether or not to upload the trained model to the model hub after training."} ) hub_model_id: str = field( default=None, metadata={"help": "The name of the repository to keep in sync with the local `output_dir`."} ) hub_token: str = field(default=None, metadata={"help": "The token to use to push to the Model Hub."}) def __post_init__(self): if self.output_dir is not None: self.output_dir = os.path.expanduser(self.output_dir) def to_dict(self): """ Serializes this instance while replace `Enum` by their values (for JSON serialization support). It obfuscates the token values by removing their value. """ d = asdict(self) for k, v in d.items(): if isinstance(v, Enum): d[k] = v.value if isinstance(v, list) and len(v) > 0 and isinstance(v[0], Enum): d[k] = [x.value for x in v] if k.endswith("_token"): d[k] = f"<{k.upper()}>" return d The provided code snippet includes necessary dependencies for implementing the `create_train_state` function. Write a Python function `def create_train_state( model: FlaxAutoModelForQuestionAnswering, learning_rate_fn: Callable[[int], float], num_labels: int, training_args: TrainingArguments, ) -> train_state.TrainState` to solve the following problem: Create initial training state. Here is the function: def create_train_state( model: FlaxAutoModelForQuestionAnswering, learning_rate_fn: Callable[[int], float], num_labels: int, training_args: TrainingArguments, ) -> train_state.TrainState: """Create initial training state.""" class TrainState(train_state.TrainState): """Train state with an Optax optimizer. The two functions below differ depending on whether the task is classification or regression. Args: logits_fn: Applied to last layer to obtain the logits. loss_fn: Function to compute the loss. """ logits_fn: Callable = struct.field(pytree_node=False) loss_fn: Callable = struct.field(pytree_node=False) # We use Optax's "masking" functionality to not apply weight decay # to bias and LayerNorm scale parameters. decay_mask_fn returns a # mask boolean with the same structure as the parameters. # The mask is True for parameters that should be decayed. def decay_mask_fn(params): flat_params = traverse_util.flatten_dict(params) # find out all LayerNorm parameters layer_norm_candidates = ["layernorm", "layer_norm", "ln"] layer_norm_named_params = set( [ layer[-2:] for layer_norm_name in layer_norm_candidates for layer in flat_params.keys() if layer_norm_name in "".join(layer).lower() ] ) flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_named_params) for path in flat_params} return traverse_util.unflatten_dict(flat_mask) tx = optax.adamw( learning_rate=learning_rate_fn, b1=training_args.adam_beta1, b2=training_args.adam_beta2, eps=training_args.adam_epsilon, weight_decay=training_args.weight_decay, mask=decay_mask_fn, ) def cross_entropy_loss(logits, labels): start_loss = optax.softmax_cross_entropy(logits[0], onehot(labels[0], num_classes=num_labels)) end_loss = optax.softmax_cross_entropy(logits[1], onehot(labels[1], num_classes=num_labels)) xentropy = (start_loss + end_loss) / 2.0 return jnp.mean(xentropy) return TrainState.create( apply_fn=model.__call__, params=model.params, tx=tx, logits_fn=lambda logits: logits, loss_fn=cross_entropy_loss, )
Create initial training state.
11,792
import json import logging import math import os import random import sys import time from dataclasses import asdict, dataclass, field from enum import Enum from pathlib import Path from typing import Any, Callable, Dict, Optional, Tuple import datasets import numpy as np from datasets import load_dataset from tqdm import tqdm import evaluate import jax import jax.numpy as jnp import optax import transformers from flax import struct, traverse_util from flax.jax_utils import pad_shard_unpad, replicate, unreplicate from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard from huggingface_hub import Repository from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, FlaxAutoModelForQuestionAnswering, HfArgumentParser, PreTrainedTokenizerFast, is_tensorboard_available, ) from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry from utils_qa import postprocess_qa_predictions The provided code snippet includes necessary dependencies for implementing the `create_learning_rate_fn` function. Write a Python function `def create_learning_rate_fn( train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float ) -> Callable[[int], jnp.array]` to solve the following problem: Returns a linear warmup, linear_decay learning rate function. Here is the function: def create_learning_rate_fn( train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float ) -> Callable[[int], jnp.array]: """Returns a linear warmup, linear_decay learning rate function.""" steps_per_epoch = train_ds_size // train_batch_size num_train_steps = steps_per_epoch * num_train_epochs warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps) decay_fn = optax.linear_schedule( init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps ) schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps]) return schedule_fn
Returns a linear warmup, linear_decay learning rate function.
11,793
import json import logging import math import os import random import sys import time from dataclasses import asdict, dataclass, field from enum import Enum from pathlib import Path from typing import Any, Callable, Dict, Optional, Tuple import datasets import numpy as np from datasets import load_dataset from tqdm import tqdm import evaluate import jax import jax.numpy as jnp import optax import transformers from flax import struct, traverse_util from flax.jax_utils import pad_shard_unpad, replicate, unreplicate from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard from huggingface_hub import Repository from transformers import ( AutoConfig, AutoTokenizer, EvalPrediction, FlaxAutoModelForQuestionAnswering, HfArgumentParser, PreTrainedTokenizerFast, is_tensorboard_available, ) from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry from utils_qa import postprocess_qa_predictions Dataset = datasets.arrow_dataset.Dataset PRNGKey = Any The provided code snippet includes necessary dependencies for implementing the `train_data_collator` function. Write a Python function `def train_data_collator(rng: PRNGKey, dataset: Dataset, batch_size: int)` to solve the following problem: Returns shuffled batches of size `batch_size` from truncated `train dataset`, sharded over all local devices. Here is the function: def train_data_collator(rng: PRNGKey, dataset: Dataset, batch_size: int): """Returns shuffled batches of size `batch_size` from truncated `train dataset`, sharded over all local devices.""" steps_per_epoch = len(dataset) // batch_size perms = jax.random.permutation(rng, len(dataset)) perms = perms[: steps_per_epoch * batch_size] # Skip incomplete batch. perms = perms.reshape((steps_per_epoch, batch_size)) for perm in perms: batch = dataset[perm] batch = {k: np.array(v) for k, v in batch.items()} batch = shard(batch) yield batch
Returns shuffled batches of size `batch_size` from truncated `train dataset`, sharded over all local devices.