code
stringlengths
17
6.64M
def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if (args.n_gpu > 0): torch.cuda.manual_seed_all(args.seed)
def _sorted_checkpoints(args, checkpoint_prefix='checkpoint', use_mtime=False) -> List[str]: ordering_and_checkpoint_path = [] glob_checkpoints = glob.glob(os.path.join(args.output_dir, '{}-*'.format(checkpoint_prefix))) for path in glob_checkpoints: if use_mtime: ordering_and_checkpoint_path.append((os.path.getmtime(path), path)) else: regex_match = re.match('.*{}-([0-9]+)'.format(checkpoint_prefix), path) if (regex_match and regex_match.groups()): ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path)) checkpoints_sorted = sorted(ordering_and_checkpoint_path) checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted] return checkpoints_sorted
def _rotate_checkpoints(args, checkpoint_prefix='checkpoint', use_mtime=False) -> None: if (not args.save_total_limit): return if (args.save_total_limit <= 0): return checkpoints_sorted = _sorted_checkpoints(args, checkpoint_prefix, use_mtime) if (len(checkpoints_sorted) <= args.save_total_limit): return number_of_checkpoints_to_delete = max(0, (len(checkpoints_sorted) - args.save_total_limit)) checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete] for checkpoint in checkpoints_to_be_deleted: logger.info('Deleting older checkpoint [{}] due to args.save_total_limit'.format(checkpoint)) shutil.rmtree(checkpoint)
def mask_tokens(inputs: torch.Tensor, tokenizer: PreTrainedTokenizer, args) -> Tuple[(torch.Tensor, torch.Tensor)]: ' Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. ' if (tokenizer.mask_token is None): raise ValueError('This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer.') labels = inputs.clone() probability_matrix = torch.full(labels.shape, args.mlm_probability) special_tokens_mask = [tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()] probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0) if (tokenizer._pad_token is not None): padding_mask = labels.eq(tokenizer.pad_token_id) probability_matrix.masked_fill_(padding_mask, value=0.0) masked_indices = torch.bernoulli(probability_matrix).bool() labels[(~ masked_indices)] = (- 100) indices_replaced = (torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices) inputs[indices_replaced] = tokenizer.convert_tokens_to_ids(tokenizer.mask_token) indices_random = ((torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices) & (~ indices_replaced)) random_words = torch.randint(len(tokenizer), labels.shape, dtype=torch.long) inputs[indices_random] = random_words[indices_random] return (inputs, labels)
def train(args, train_dataset, model: PreTrainedModel, tokenizer: PreTrainedTokenizer) -> Tuple[(int, float)]: ' Train the model ' if (args.local_rank in [(- 1), 0]): tb_writer = SummaryWriter() args.train_batch_size = (args.per_gpu_train_batch_size * max(1, args.n_gpu)) def collate(examples: List[torch.Tensor]): if (tokenizer._pad_token is None): return pad_sequence(examples, batch_first=True) return pad_sequence(examples, batch_first=True, padding_value=tokenizer.pad_token_id) train_sampler = (RandomSampler(train_dataset) if (args.local_rank == (- 1)) else DistributedSampler(train_dataset)) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=collate) if (args.max_steps > 0): t_total = args.max_steps args.num_train_epochs = ((args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps)) + 1) else: t_total = ((len(train_dataloader) // args.gradient_accumulation_steps) * args.num_train_epochs) no_decay = ['bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [{'params': [p for (n, p) in model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': args.weight_decay}, {'params': [p for (n, p) in model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}] optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total) if (args.model_name_or_path and os.path.isfile(os.path.join(args.model_name_or_path, 'optimizer.pt')) and os.path.isfile(os.path.join(args.model_name_or_path, 'scheduler.pt'))): optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, 'optimizer.pt'))) scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, 'scheduler.pt'))) if args.fp16: try: from apex import amp except ImportError: raise ImportError('Please install apex from https://www.github.com/nvidia/apex to use fp16 training.') (model, optimizer) = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) if (args.n_gpu > 1): model = torch.nn.DataParallel(model) if (args.local_rank != (- 1)): model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True) logger.info('***** Running training *****') logger.info(' Num examples = %d', len(train_dataset)) logger.info(' Num Epochs = %d', args.num_train_epochs) logger.info(' Instantaneous batch size per GPU = %d', args.per_gpu_train_batch_size) logger.info(' Total train batch size (w. parallel, distributed & accumulation) = %d', ((args.train_batch_size * args.gradient_accumulation_steps) * (torch.distributed.get_world_size() if (args.local_rank != (- 1)) else 1))) logger.info(' Gradient Accumulation steps = %d', args.gradient_accumulation_steps) logger.info(' Total optimization steps = %d', t_total) global_step = 0 epochs_trained = 0 steps_trained_in_current_epoch = 0 if (args.model_name_or_path and os.path.exists(args.model_name_or_path)): try: checkpoint_suffix = args.model_name_or_path.split('-')[(- 1)].split('/')[0] global_step = int(checkpoint_suffix) epochs_trained = (global_step // (len(train_dataloader) // args.gradient_accumulation_steps)) steps_trained_in_current_epoch = (global_step % (len(train_dataloader) // args.gradient_accumulation_steps)) logger.info(' Continuing training from checkpoint, will skip to saved global_step') logger.info(' Continuing training from epoch %d', epochs_trained) logger.info(' Continuing training from global step %d', global_step) logger.info(' Will skip the first %d steps in the first epoch', steps_trained_in_current_epoch) except ValueError: logger.info(' Starting fine-tuning.') (tr_loss, logging_loss) = (0.0, 0.0) model_to_resize = (model.module if hasattr(model, 'module') else model) model_to_resize.resize_token_embeddings(len(tokenizer)) model.zero_grad() train_iterator = trange(epochs_trained, int(args.num_train_epochs), desc='Epoch', disable=(args.local_rank not in [(- 1), 0])) set_seed(args) for (epoch_num, _) in enumerate(train_iterator): if isinstance(train_sampler, DistributedSampler): train_sampler.set_epoch(epoch_num) if getattr(args, 'evaluate_every_epoch', False): results = evaluate(args, model, tokenizer, prefix=f'epoch_{epoch_num}') epoch_iterator = tqdm(train_dataloader, desc='Iteration', disable=(args.local_rank not in [(- 1), 0])) for (step, batch) in enumerate(epoch_iterator): if (steps_trained_in_current_epoch > 0): steps_trained_in_current_epoch -= 1 continue (inputs, labels) = (mask_tokens(batch, tokenizer, args) if args.mlm else (batch, batch)) inputs = inputs.to(args.device) labels = labels.to(args.device) model.train() outputs = (model(inputs, masked_lm_labels=labels) if args.mlm else model(inputs, labels=labels)) loss = outputs[0] if (args.n_gpu > 1): loss = loss.mean() if (args.gradient_accumulation_steps > 1): loss = (loss / args.gradient_accumulation_steps) if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (((step + 1) % args.gradient_accumulation_steps) == 0): if (args.max_grad_norm > 0): if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() model.zero_grad() global_step += 1 if ((args.local_rank in [(- 1), 0]) and (args.logging_steps > 0) and ((global_step % args.logging_steps) == 0)): if ((args.local_rank == (- 1)) and args.evaluate_during_training): results = evaluate(args, model, tokenizer) for (key, value) in results.items(): tb_writer.add_scalar('eval_{}'.format(key), value, global_step) tb_writer.add_scalar('lr', scheduler.get_last_lr()[0], global_step) tb_writer.add_scalar('loss', ((tr_loss - logging_loss) / args.logging_steps), global_step) logging_loss = tr_loss if ((args.local_rank in [(- 1), 0]) and (args.save_steps > 0) and ((global_step % args.save_steps) == 0)): checkpoint_prefix = 'checkpoint' output_dir = os.path.join(args.output_dir, '{}-{}'.format(checkpoint_prefix, global_step)) os.makedirs(output_dir, exist_ok=True) model_to_save = (model.module if hasattr(model, 'module') else model) model_to_save.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, 'training_args.bin')) logger.info('Saving model checkpoint to %s', output_dir) _rotate_checkpoints(args, checkpoint_prefix) torch.save(optimizer.state_dict(), os.path.join(output_dir, 'optimizer.pt')) torch.save(scheduler.state_dict(), os.path.join(output_dir, 'scheduler.pt')) logger.info('Saving optimizer and scheduler states to %s', output_dir) if ((args.max_steps > 0) and (global_step > args.max_steps)): epoch_iterator.close() break if ((args.max_steps > 0) and (global_step > args.max_steps)): train_iterator.close() break if (args.local_rank in [(- 1), 0]): tb_writer.close() return (global_step, (tr_loss / global_step))
class AverageMeter(object): 'Computes and stores the average and current value' def __init__(self): self.reset() def reset(self): self.avg = 0 self.sum = 0 self.count = 0 def update(self, val, n=1): self.sum += (val * n) self.count += n def get_avg(self): return (self.sum / self.count)
def evaluate(args, model: PreTrainedModel, tokenizer: PreTrainedTokenizer, prefix='') -> Dict: eval_output_dir = args.output_dir eval_dataset = load_and_cache_examples(args, tokenizer, evaluate=True) if (args.local_rank in [(- 1), 0]): os.makedirs(eval_output_dir, exist_ok=True) args.eval_batch_size = (args.per_gpu_eval_batch_size * max(1, args.n_gpu)) def collate(examples: List[torch.Tensor]): if (tokenizer._pad_token is None): return pad_sequence(examples, batch_first=True) return pad_sequence(examples, batch_first=True, padding_value=tokenizer.pad_token_id) eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, collate_fn=collate) if (args.n_gpu > 1): model = torch.nn.DataParallel(model) logger.info('***** Running evaluation {} *****'.format(prefix)) logger.info(' Num examples = %d', len(eval_dataset)) logger.info(' Batch size = %d', args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 eval_loss_meter = AverageMeter() model.eval() for batch in tqdm(eval_dataloader, desc='Evaluating'): (inputs, labels) = (mask_tokens(batch, tokenizer, args) if args.mlm else (batch, batch)) inputs = inputs.to(args.device) labels = labels.to(args.device) with torch.no_grad(): outputs = (model(inputs, masked_lm_labels=labels) if args.mlm else model(inputs, labels=labels)) lm_loss = outputs[0] current_loss = lm_loss.mean().item() current_batch_size = labels.size(0) eval_loss_meter.update(current_loss, current_batch_size) eval_loss += current_loss nb_eval_steps += 1 eval_loss = (eval_loss / nb_eval_steps) perplexity = torch.exp(torch.tensor(eval_loss)) from_meter_eval_loss = eval_loss_meter.get_avg() from_meter_perplexity = torch.exp(torch.tensor(from_meter_eval_loss)) from_meter_perplexity_math = math.exp(from_meter_eval_loss) result = {'perplexity_old': perplexity, 'perplexity': from_meter_perplexity, 'math_ppl': from_meter_perplexity_math} output_eval_file = os.path.join(eval_output_dir, prefix, 'eval_results.txt') os.makedirs(os.path.dirname(output_eval_file), exist_ok=True) with open(output_eval_file, 'w') as writer: logger.info('***** Eval results {} *****'.format(prefix)) for key in sorted(result.keys()): logger.info(' %s = %s', key, str(result[key])) writer.write(('%s = %s\n' % (key, str(result[key])))) return result
def main(): parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--train_data_file', default=None, type=str, required=True, help='The input training data file (a text file).') parser.add_argument('--output_dir', type=str, required=True, help='The output directory where the model predictions and checkpoints will be written.') parser.add_argument('--model_type', type=str, required=True, help='The model architecture to be trained or fine-tuned.') parser.add_argument('--eval_data_file', default=None, type=str, help='An optional input evaluation data file to evaluate the perplexity on (a text file).') parser.add_argument('--line_by_line', action='store_true', help='Whether distinct lines of text in the dataset are to be handled as distinct sequences.') parser.add_argument('--should_continue', action='store_true', help='Whether to continue from latest checkpoint in output_dir') parser.add_argument('--model_name_or_path', default=None, type=str, help='The model checkpoint for weights initialization. Leave None if you want to train a model from scratch.') parser.add_argument('--do_lower_case', action='store_true', help='Set this flag if you are using an uncased model.') parser.add_argument('--mlm', action='store_true', help='Train with masked-language modeling loss instead of language modeling.') parser.add_argument('--mlm_probability', type=float, default=0.15, help='Ratio of tokens to mask for masked language modeling loss') parser.add_argument('--config_name', default=None, type=str, help='Optional pretrained config name or path if not the same as model_name_or_path. If both are None, initialize a new config.') parser.add_argument('--tokenizer_name', default=None, type=str, help='Optional pretrained tokenizer name or path if not the same as model_name_or_path. If both are None, initialize a new tokenizer.') parser.add_argument('--cache_dir', default=None, type=str, help='Optional directory to store the pre-trained models downloaded from s3 (instead of the default one)') parser.add_argument('--block_size', default=(- 1), type=int, help='Optional input sequence length after tokenization.The training dataset will be truncated in block of this size for training.Default to the model max input length for single sentence inputs (take into account special tokens).') parser.add_argument('--do_train', action='store_true', help='Whether to run training.') parser.add_argument('--do_eval', action='store_true', help='Whether to run eval on the dev set.') parser.add_argument('--evaluate_during_training', action='store_true', help='Run evaluation during training at each logging step.') parser.add_argument('--evaluate_every_epoch', action='store_true', help='Run evaluation at the beginning of every epoch') parser.add_argument('--untied', action='store_true', help="Don't use tied weights: (using torchscript flag)") parser.add_argument('--per_gpu_train_batch_size', default=4, type=int, help='Batch size per GPU/CPU for training.') parser.add_argument('--per_gpu_eval_batch_size', default=4, type=int, help='Batch size per GPU/CPU for evaluation.') parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help='Number of updates steps to accumulate before performing a backward/update pass.') parser.add_argument('--learning_rate', default=5e-05, type=float, help='The initial learning rate for Adam.') parser.add_argument('--weight_decay', default=0.0, type=float, help='Weight decay if we apply some.') parser.add_argument('--adam_epsilon', default=1e-08, type=float, help='Epsilon for Adam optimizer.') parser.add_argument('--max_grad_norm', default=1.0, type=float, help='Max gradient norm.') parser.add_argument('--num_train_epochs', default=1.0, type=float, help='Total number of training epochs to perform.') parser.add_argument('--max_steps', default=(- 1), type=int, help='If > 0: set total number of training steps to perform. Override num_train_epochs.') parser.add_argument('--warmup_steps', default=0, type=int, help='Linear warmup over warmup_steps.') parser.add_argument('--logging_steps', type=int, default=500, help='Log every X updates steps.') parser.add_argument('--save_steps', type=int, default=500, help='Save checkpoint every X updates steps.') parser.add_argument('--save_total_limit', type=int, default=None, help='Limit the total amount of checkpoints, delete the older checkpoints in the output_dir, does not delete by default') parser.add_argument('--eval_all_checkpoints', action='store_true', help='Evaluate all checkpoints starting with the same prefix as model_name_or_path ending and ending with step number') parser.add_argument('--no_cuda', action='store_true', help='Avoid using CUDA when available') parser.add_argument('--overwrite_output_dir', action='store_true', help='Overwrite the content of the output directory') parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets') parser.add_argument('--seed', type=int, default=42, help='random seed for initialization') parser.add_argument('--fp16', action='store_true', help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit') parser.add_argument('--fp16_opt_level', type=str, default='O1', help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].See details at https://nvidia.github.io/apex/amp.html") parser.add_argument('--local_rank', type=int, default=(- 1), help='For distributed training: local_rank') parser.add_argument('--server_ip', type=str, default='', help='For distant debugging.') parser.add_argument('--server_port', type=str, default='', help='For distant debugging.') args = parser.parse_args() if ((args.model_type in ['bert', 'roberta', 'distilbert', 'camembert']) and (not args.mlm)): raise ValueError('BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the --mlm flag (masked language modeling).') if ((args.eval_data_file is None) and args.do_eval): raise ValueError('Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file or remove the --do_eval argument.') if args.should_continue: sorted_checkpoints = _sorted_checkpoints(args) if (len(sorted_checkpoints) == 0): raise ValueError('Used --should_continue but no checkpoint was found in --output_dir.') else: args.model_name_or_path = sorted_checkpoints[(- 1)] if (os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and (not args.overwrite_output_dir)): raise ValueError('Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.'.format(args.output_dir)) if (args.server_ip and args.server_port): import ptvsd print('Waiting for debugger attach') ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() if ((args.local_rank == (- 1)) or args.no_cuda): device = torch.device(('cuda' if (torch.cuda.is_available() and (not args.no_cuda)) else 'cpu')) args.n_gpu = (0 if args.no_cuda else torch.cuda.device_count()) else: torch.cuda.set_device(args.local_rank) device = torch.device('cuda', args.local_rank) torch.distributed.init_process_group(backend='nccl') args.n_gpu = 1 args.device = device logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=(logging.INFO if (args.local_rank in [(- 1), 0]) else logging.WARN)) logger.warning('Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s', args.local_rank, device, args.n_gpu, bool((args.local_rank != (- 1))), args.fp16) set_seed(args) if (args.local_rank not in [(- 1), 0]): torch.distributed.barrier() (config_class, model_class, tokenizer_class) = MODEL_CLASSES[args.model_type] if args.config_name: config = config_class.from_pretrained(args.config_name, cache_dir=args.cache_dir) elif args.model_name_or_path: config = config_class.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir) else: config = config_class() config.output_past = False if args.untied: config.torchscript = True if args.tokenizer_name: tokenizer = tokenizer_class.from_pretrained(args.tokenizer_name, cache_dir=args.cache_dir, do_lower_case=args.do_lower_case) elif args.model_name_or_path: tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path, cache_dir=args.cache_dir, do_lower_case=args.do_lower_case) else: raise ValueError('You are instantiating a new {} tokenizer. This is not supported, but you can do it from another script, save it,and load it from here, using --tokenizer_name'.format(tokenizer_class.__name__)) if (args.block_size <= 0): args.block_size = tokenizer.max_len else: args.block_size = min(args.block_size, tokenizer.max_len) if args.model_name_or_path: model = model_class.from_pretrained(args.model_name_or_path, from_tf=bool(('.ckpt' in args.model_name_or_path)), config=config, cache_dir=args.cache_dir) else: logger.info('Training new model from scratch') model = model_class(config=config) model.to(args.device) if (args.local_rank == 0): torch.distributed.barrier() logger.info('Training/evaluation parameters %s', args) if args.do_train: if (args.local_rank not in [(- 1), 0]): torch.distributed.barrier() train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False) if (args.local_rank == 0): torch.distributed.barrier() (global_step, tr_loss) = train(args, train_dataset, model, tokenizer) logger.info(' global_step = %s, average loss = %s', global_step, tr_loss) if (args.do_train and ((args.local_rank == (- 1)) or (torch.distributed.get_rank() == 0))): if (args.local_rank in [(- 1), 0]): os.makedirs(args.output_dir, exist_ok=True) logger.info('Saving model checkpoint to %s', args.output_dir) model_to_save = (model.module if hasattr(model, 'module') else model) model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) torch.save(args, os.path.join(args.output_dir, 'training_args.bin')) model = model_class.from_pretrained(args.output_dir) tokenizer = tokenizer_class.from_pretrained(args.output_dir) model.to(args.device) results = {} if (args.do_eval and (args.local_rank in [(- 1), 0])): checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list((os.path.dirname(c) for c in sorted(glob.glob(((args.output_dir + '/**/') + WEIGHTS_NAME), recursive=True)))) logging.getLogger('transformers.modeling_utils').setLevel(logging.WARN) logger.info('Evaluate the following checkpoints: %s', checkpoints) for checkpoint in checkpoints: global_step = (checkpoint.split('-')[(- 1)] if (len(checkpoints) > 1) else '') prefix = (checkpoint.split('/')[(- 1)] if (checkpoint.find('checkpoint') != (- 1)) else '') model = model_class.from_pretrained(checkpoint) model.to(args.device) result = evaluate(args, model, tokenizer, prefix=prefix) result = dict((((k + '_{}'.format(global_step)), v) for (k, v) in result.items())) results.update(result) return results
class Loader(abc.ABC): ALLOW_UNSHARED = {} ALLOW_UNLOADEDED = {} @abc.abstractmethod def load_from_saved_pipeline(self, args, to_original=True, **kw): raise NotImplementedError() def _check_load_matching(self, original_state, unified_state): if (not (self.ALLOW_UNSHARED or self.ALLOW_UNLOADEDED)): return True not_shared = set(original_state.keys()).difference(unified_state.keys()) problematic = not_shared.difference(self.ALLOW_UNSHARED) if problematic: raise ValueError(f'Parameters {problematic} are not in unified_state, but is in original state') not_shared = set(unified_state.keys()).difference(original_state.keys()) problematic = not_shared.difference(self.ALLOW_UNLOADEDED) if problematic: raise ValueError(f'Parameters {problematic} are in unified_state, but not in original state') return False
def base_checkoint_name(name_prefix, stage): return f'{name_prefix}_Partition{stage}.pt'
class HFLoader(Loader): IS_HUGGINFACE_TRANSFORMER = True def __init__(self, hf_transformers_model_class=AutoModel): super().__init__() self.MODEL_CLASS = hf_transformers_model_class def load_from_saved_pipeline(self, args, to_original=True, **kw): cfg = args.model partitions_saved_dir = args.checkpoints_save_dir name_prefix = getattr(args, 'checkpoints_save_name_prefix', '') add_to_prefix = kw.pop('add_to_prefix', '') name_prefix += add_to_prefix unified_state = self.get_unified_state_dict(cfg, name_prefix, partitions_saved_dir) print(f'-I- Loaded state dict from {partitions_saved_dir}') if to_original: unified_state = self.substitue_state_dict_keys_back_to_original(unified_state) model_name_or_path = args.model_name_or_path if all([(k in kw) for k in ['model', 'tokenizer', 'config']]): model = kw.get('model') tokenizer = kw.get('tokenizer') config = kw.get('config') else: (model, tokenizer, config) = self.get_hf_original_model_tokenizer_and_config(model_name_or_path) elif all([(k in kw) for k in ['model', 'tokenizer', 'config']]): model = kw.get('model') tokenizer = kw.get('tokenizer') config = kw.get('config') else: handler = AVAILABLE_MODELS.get(cfg) model = handler.get_normal_model_instance() tokenizer = handler.tokenizer config = handler.config strict = self._check_load_matching(original_state=model.state_dict(), unified_state=unified_state) model.load_state_dict(unified_state, strict=strict) print('-I- Loaded state into the model') extra = dict(tokenizer=tokenizer, config=config) return (model, extra) def get_unified_state_dict(self, cfg, name_prefix, partitions_saved_dir): n_stages = AVAILABLE_MODELS.get(cfg).get_pipe_config().n_stages names = [base_checkoint_name(name_prefix, stage=i) for i in range(n_stages)] names = [os.path.join(partitions_saved_dir, name) for name in names] print(f'-V- loading from {names}') loaded = [torch.load(name, map_location='cpu') for name in names] unified_state = dict() for d in loaded: unified_state.update(d) return unified_state def get_hf_original_model_tokenizer_and_config(self, model_name_or_path, cache_dir='', config_name=None, tokenizer_name=None, tokenizer_kw=dict(do_lower_case=False), config_kw=dict(), resize_embeds=True): 'Get Huggingface model, tokenizer and config we want to load to.' (config, unsed) = AutoConfig.from_pretrained((config_name if config_name else model_name_or_path), cache_dir=(cache_dir if cache_dir else None), return_unused_kwargs=True, **config_kw) if unsed: print(f'warning: Unused config kwargs when loading transformer model: {unsed}') tokenizer = AutoTokenizer.from_pretrained((tokenizer_name if tokenizer_name else model_name_or_path), cache_dir=(cache_dir if cache_dir else None), **tokenizer_kw) if (transformers.__version__ > '4.1.1'): model = self.MODEL_CLASS.from_pretrained(model_name_or_path, from_tf=bool(('.ckpt' in model_name_or_path)), config=config, cache_dir=(cache_dir if cache_dir else None)) else: use_cdn = (model_name_or_path not in {'t5-11b'}) model = self.MODEL_CLASS.from_pretrained(model_name_or_path, from_tf=bool(('.ckpt' in model_name_or_path)), config=config, cache_dir=(cache_dir if cache_dir else None), use_cdn=use_cdn) if resize_embeds: resize_token_embeddings(model, tokenizer) return (model, tokenizer, config) @abc.abstractmethod def substitue_state_dict_keys_back_to_original(self, training_state_dict): raise NotImplementedError()
class T5HFLoader(HFLoader): def __init__(self, hf_transformers_model_class=T5ForConditionalGeneration): super().__init__(hf_transformers_model_class=hf_transformers_model_class) def substitue_state_dict_keys_back_to_original(self, training_state_dict): d = dict() for (k, v) in training_state_dict.items(): new_key = re.sub('([0-9]+.)([0-9]+.)', 'block.\\1layer.\\2', k) d[new_key] = v if ('shared_embed_weight' in d): w = d.pop('shared_embed_weight') d['shared.weight'] = d['encoder.embed_tokens.weight'] = d['decoder.embed_tokens.weight'] = w return d
class NaiveModelParallelSplitter(): def __init__(self): pass @staticmethod def spread_on_devices(model: torch.nn.Module, devices: Optional[List]=None): ' Spread a transformers model on several devices by moving block on several devices (simple model parallelism)\n The blocks of the transformers are spread among the given device list\n or on all visible CUDA devices if no device list is given.\n The first device will host in addition the embeddings and the input/output tensors.\n\n\n requires model implements:\n # def get_block_list(model):\n # return list(model.encoder.get_block_list()) + list(model.decoder.get_block_list())\n\n\n ' if ((devices is None) and torch.cuda.is_available()): devices = list(range(torch.cuda.device_count())) if (len(devices) < 2): device = (devices[0] if devices else None) if (device is not None): model.forward = decorate_args_and_kwargs_to_deivce(func=model.forward, device=device) model.to(device) return modules_to_move = set(model.modules()) handled_models = set() block_list = model.get_block_list() group_size = (len(block_list) // len(devices)) for (i, block) in enumerate(block_list): device = devices[(i // group_size)] block.to(device) block.device = device block.forward = decorate_args_and_kwargs_to_deivce(func=block.forward, device=device) modules_to_move.remove(block) handled_models.add(block) for (nm, m) in block.named_modules(): if (m in modules_to_move): m.forward = decorate_args_and_kwargs_to_deivce(func=m.forward, device=device) modules_to_move.remove(m) handled_models.add(m) else: logger.info(f'Shared model not moved {nm}') device = devices[0] for module in list(modules_to_move): sumbs = set(module.modules()) intersection = (sumbs & handled_models) if intersection: logger.info('skipping model because it or one or more of submodules was already handled') continue else: logger.info(f'remaining module will be placed on device {device} ') module.to(device) module.device = device module.forward = decorate_args_and_kwargs_to_deivce(func=module.forward, device=device)
def decorate_args_and_kwargs_to_deivce(func, device): "Decorate torch.nn.Module forward function by moving all inputs and outputs to device\n\n Note that we cannot easily use `forward_pre_hook` to move tensors around since this type of hooks currently\n only act on the positional arguments send to the forward pass (PyTorch 1.6.0).\n So you should call your model's forward pass with tensors as positional arguments\n\n # NOTE: consider moving the model to device stright away here\n # NOTE: can save original function in model to remove the decoration later\n " def to_device_if_tensor(obj): return (obj.to(device) if isinstance(obj, torch.Tensor) else obj) def wrapper(*args, **kwargs): args = [to_device_if_tensor(x) for x in args] kwargs = {k: to_device_if_tensor(v) for (k, v) in kwargs.items()} return func(*args, **kwargs) return wrapper
def get_my_send_recv_ranks(pipe_config: PipelineConfig, stage_id, stage_to_rank_map=None, prefer_seq_sends=True): def ranks_in_stage(given_stage): if stage_to_rank_map: return stage_to_rank_map[given_stage] else: return [given_stage] stages = pipe_config.d['stages'] receive_ranks = OrderedDict() send_ranks = defaultdict(list) for i in range(len(stages)): for j in range((i + 1), len(stages)): if ((i != stage_id) and (j != stage_id)): continue stage_i = stages[i] stage_j = stages[j] for tensor_name in stage_i['outputs']: if (tensor_name in stage_j['inputs']): if (stage_id == j): if (tensor_name in receive_ranks): if prefer_seq_sends: print(f'-V- stage {stage_id}: preferring to recv from a later: {i}-->{j}: {tensor_name}') else: raise ValueError(f'Input {tensor_name} received from multiple stages') receive_ranks[tensor_name] = ranks_in_stage(i) else: if prefer_seq_sends: all_sending_dist = [x for (x, v) in stages.items() if ((tensor_name in v['outputs']) and (x < j))] assert (len(all_sending_dist) > 0) assert (stage_id in all_sending_dist) closest_sender = max(all_sending_dist) if (stage_id != closest_sender): print(f'-v- stage {stage_id}: will not send {i}-->{j}: {tensor_name}. There is a closer sender: {closest_sender}') continue send_ranks[tensor_name].extend(ranks_in_stage(j)) send_ranks = OrderedDict(((k, send_ranks[k]) for k in stages[stage_id]['outputs'] if (k in send_ranks))) receive_ranks = OrderedDict(((k, receive_ranks[k]) for k in stages[stage_id]['inputs'] if (k in receive_ranks))) return (send_ranks, receive_ranks)
class PartitioningConfigParser(): def __init__(self, cfg, rank, bs_train, bs_eval, handler=None, send_target_in_pipe=False, prefer_seq_sends=True): if (handler is None): handler = AVAILABLE_MODELS.get(cfg) if (handler is None): raise ValueError(f'Model {cfg} not found. AVAILABLE_MODELS={AVAILABLE_MODELS.keys()}') pipe_config = handler.get_pipe_config() self.stage_id = pipe_config.rank_to_stage_idx(rank) self.pipe_config = pipe_config self.num_stages = pipe_config.n_stages stage_to_rank_map = pipe_config.get_stage_to_ranks_map() (self.send_ranks, self.receive_ranks) = get_my_send_recv_ranks(pipe_config, self.stage_id, stage_to_rank_map=stage_to_rank_map, prefer_seq_sends=prefer_seq_sends) if send_target_in_pipe: warnings.warn('Sending targets in pipeline is deprecated') self.target_tensor_names = pipe_config.d['model_outputs'] if (self.stage_id > 0): self.ranks_in_previous_stage = stage_to_rank_map[(self.stage_id - 1)] else: self.ranks_in_previous_stage = [] if (self.stage_id < (self.num_stages - 1)): self.ranks_in_next_stage = stage_to_rank_map[(self.stage_id + 1)] else: self.ranks_in_next_stage = [] else: self.target_tensor_names = None self.ranks_in_previous_stage = None self.ranks_in_next_stage = None self.eval_tensor_shapes = self.get_shapes(bs_eval) self.training_tensor_shapes = self.get_shapes(bs_train) self.training_tensor_dtypes = self.eval_tensor_dtypes = pipe_config.get_dtypes_for_stage(self.stage_id) self.req_grad = pipe_config.get_inputs_req_grad_for_stage(self.stage_id) self.outputs_req_grad = pipe_config.get_outputs_req_grad_for_stage(self.stage_id) _check_shared_parameters(pipe_config) def comm_init_args(self): return (self.receive_ranks, self.send_ranks, self.target_tensor_names, self.ranks_in_previous_stage, self.ranks_in_next_stage, self.req_grad, self.outputs_req_grad, self.pipe_config) def load_model(self, handler, bs_train, rank): model = handler.realize_stage_for_rank(batch_size=bs_train, my_rank=rank) self.model = model def get_shapes(self, batch_size): pipe_config = self.pipe_config pipe_config.change_batch(batch_size, for_replicated=True) return deepcopy(pipe_config.get_shapes_for_stage(self.stage_id))
def is_shared_parameter(tensor_scope): return ('Parameter' in tensor_scope)
def _check_shared_parameters(pipe_config: PipelineConfig): shared = defaultdict(set) for (i, s) in pipe_config.d['stages'].items(): for n in chain(s['inputs'], s['outputs']): if is_shared_parameter(n): shared[i].add(n) if shared: pprint(f'Shared Parameters: {shared}') return shared
def _import_handlers_from_dir(tasks_dir=os.path.dirname(__file__), module_name='.models.registery.', package='pipe'): ' Automatically import any Python files in the tasks directory\n in order to automatically register all available tasks\n Args:\n tasks_dir: task dir to import from\n ' for file in os.listdir(tasks_dir): path = os.path.join(tasks_dir, file) if ((not file.startswith('_')) and (not file.startswith('.')) and (file.endswith('.py') or os.path.isdir(path))): task_name = (file[:file.find('.py')] if file.endswith('.py') else file) importlib.import_module((module_name + task_name), package=package)
def get_cep_model(n=50, k=11, c=500, n_split=4): model = Net(n, c, n_split=n_split) return model
class CEPModelHandler(CommonModelHandler): def __init__(self, normal_model_fn, *args, **kw): super().__init__(*args, **kw) self.normal_model_fn = normal_model_fn def _get_normal_model_instance(self, *args, **kwargs): return self.normal_model_fn(*args, **kwargs)
class ParamDictCVMOdelHandler(CommonModelHandler): def __init__(self, dict_params, model_class, *args, **kw): super().__init__(*args, **kw) self.dict_params = dict_params self.model_class = model_class def _get_normal_model_instance(self, *args, **kw): return self.model_class(**self.dict_params) def get_loader(self, *args, **kw): raise NotImplementedError()
def register_cv_hardcoded_model(name, *args, **kw): ParamDictCVMOdelHandler(*args, **kw).register_autogenerated(generated_file_name_or_path=name)
class DummyModelHandler(CommonModelHandler): def __init__(self, *args, **kw): super().__init__(*args, **kw) def _get_normal_model_instance(self, *args, **kwargs): if (self.normal_model_instance is None): args = SimpleNamespace() p = DumT5Partitioner(args) args.lmhead = True args.stateless_tied = True args.precompute_masks = False self.normal_model_instance = p.get_model(args) self.tokenizer = p.tokenizer self.config = p.config return self.normal_model_instance def get_extra(self, *args, **kw): return dict(config=self.config, tokenizer=self.tokenizer)
class GetConfigFrom(Enum): HardCoded = auto() ParsedArgs = auto() Generated = auto()
class HFModelHandler(CommonModelHandler): def __init__(self, method: GetConfigFrom=GetConfigFrom.HardCoded, *args, **kw): super().__init__(*args, **kw) self.pipeline_transformer_config = None self.method = method self.tokenizer = None self.config = None def _get_normal_model_instance(self, *args, **kw): if (self.normal_model_instance is None): cfg = self.get_pipeline_transformer_config() (model, tokenizer, config) = pretrained_model_config_and_tokenizer(**cfg) self.tokenizer = tokenizer self.config = config self.normal_model_instance = model assert hasattr(self, 'tokenizer') assert hasattr(self, 'config') return self.normal_model_instance def get_pipeline_transformer_config(self): if (self.pipeline_transformer_config is None): if (self.method == GetConfigFrom.Generated): raise NotImplementedError() elif (self.method == GetConfigFrom.ParsedArgs): raise NotImplementedError() elif (self.method == GetConfigFrom.HardCoded): assert (not os.path.exists(self.generated_file_name_or_path)) cfg = MODEL_TOKENIZER_AND_CONFIG_FUNCTIONS.get(self.generated_file_name_or_path)() else: raise NotImplementedError() self.pipeline_transformer_config = cfg return self.pipeline_transformer_config def get_extra(self, *args, **kw): return dict(config=self.config, tokenizer=self.tokenizer) def get_loader(self, *args, **kw): raise NotImplementedError()
class CommonModelHandler(abc.ABC): def __init__(self, partitioned_models_package=_PARTITIONED_MODELS_PACKAGE): self.partitioned_models_package = partitioned_models_package self.generated_file_name_or_path = None self.normal_model_instance = None self.generated = None self.pipe_config = None @abc.abstractmethod def _get_normal_model_instance(self, *args, **kw): raise NotImplementedError() def get_normal_model_instance(self, *args, **kw): if (self.normal_model_instance is None): self.normal_model_instance = self._get_normal_model_instance(*args, **kw) return self.normal_model_instance def get_generated_module(self): if (self.generated is None): cfg = self.generated_file_name_or_path is_full_path = os.path.exists(cfg) try: if is_full_path: generated = load_module(cfg) else: generated_file_name = self.generated_file_name_or_path generated = importlib.import_module(('.' + generated_file_name), package=self.partitioned_models_package) except Exception as e: print(f'-E- error loading generated config given {cfg}. is_full_path={is_full_path}') raise e self.generated = generated return self.generated def get_pipe_config(self) -> PipelineConfig: if (self.pipe_config is None): generated = self.get_generated_module() GET_PARTITIONS_ON_CPU = True create_pipeline_configuration = generated.create_pipeline_configuration config = create_pipeline_configuration(DEBUG=GET_PARTITIONS_ON_CPU) pipe_config = PipelineConfig(config) self.pipe_config = pipe_config return self.pipe_config def realize_stage_for_rank(self, batch_size, my_rank): pipe_config = self.get_pipe_config() (layers, tensors) = self.get_layers_and_tensors() return pipe_config.realize_stage_for_rank(layers, tensors, batch_size, my_rank) def get_layers_and_tensors(self, *args, **kw): if (self.normal_model_instance is None): self.normal_model_instance = self.get_normal_model_instance() model_instance = self.normal_model_instance pipe_config = self.get_pipe_config() generated = self.get_generated_module() layerDict = generated.layerDict tensorDict = generated.tensorDict depth = pipe_config.d['depth'] blocks = pipe_config.d['basic_blocks'] layers = layerDict(model_instance, depth=depth, basic_blocks=blocks) tensors = tensorDict(model_instance) return (layers, tensors) def get_loader(self, *args, **kw): raise NotImplementedError() def get_extra(self, *args, **kwargs): 'extra keywords for dataset,\n return a dict if there is something to return' pass def register_autogenerated(self, generated_file_name_or_path: str): self.generated_file_name_or_path = generated_file_name_or_path register_model(generated_file_name_or_path=generated_file_name_or_path, handler=self) def set_partitioned_models_package(self, partitioned_models_package): self.partitioned_models_package = partitioned_models_package
def register_model(generated_file_name_or_path, handler: CommonModelHandler): global AVAILABLE_MODELS AVAILABLE_MODELS[generated_file_name_or_path] = handler
def register_model_func(generated_file_name_or_path, _get_normal_model_instance, get_extra=None): d = dict(_get_normal_model_instance=_get_normal_model_instance) if get_extra: d['get_extra'] = get_extra handler_cls = type('AutoGeneratedModelHandler', (CommonModelHandler,), d) handler: CommonModelHandler = handler_cls() handler.register_autogenerated(generated_file_name_or_path=generated_file_name_or_path)
def load_module(full_path: str): spec = importlib.util.spec_from_file_location('module.name', full_path) foo = importlib.util.module_from_spec(spec) spec.loader.exec_module(foo) return foo
def register_normal_model_by_function(fn): model_name = fn.__name__ NORMAL_MODEL_ENTRY_POINTS[model_name] = fn class EntryPointFunctionModelHandler(CommonModelHandler): def __init__(self, normal_model_fn, *args, **kw): super().__init__(*args, **kw) self.normal_model_fn = normal_model_fn def _get_normal_model_instance(self, *args, **kwargs): return self.normal_model_fn(*args, **kwargs) handler = EntryPointFunctionModelHandler(normal_model_fn=fn) if (model_name in NORMAL_MODEL_ENTRY_POINTS_HANDLERS): warnings.warn(f'model_name {model_name} already exisits in NORMAL_MODEL_ENTRY_POINTS_HANDLERS') NORMAL_MODEL_ENTRY_POINTS_HANDLERS[model_name] = handler
def normal_model_entry_point(model_name): return NORMAL_MODEL_ENTRY_POINTS[model_name]
def normal_model_entry_point_handler(model_name): return NORMAL_MODEL_ENTRY_POINTS_HANDLERS[model_name]
class PipelineConfig(): '\n Config to handle basic partitioning.\n ' def __init__(self, d): self.d = d @property def n_ranks(self) -> int: return sum((len(stage['devices']) for stage in self.d['stages'])) def get_stage_to_ranks_map(self) -> Dict[(int, List[int])]: counter = itertools.count() stage_to_ranks_map = {i: [next(counter) for _ in stage['devices']] for (i, stage) in self.d['stages'].items()} return stage_to_ranks_map def rank_to_stage_idx(self, rank) -> int: assert (rank >= 0) running_cumsum = 0 for (i, stage) in self.d['stages'].items(): running_cumsum += len(stage['devices']) if (rank < running_cumsum): return i raise ValueError(f'Invalid rank {rank}') @property def n_stages(self) -> int: return len(self.d['stages']) def get_shapes_for_stage(self, stage_id: int) -> Dict[(str, torch.Size)]: res = {name: inorout['shape'] for (name, inorout) in chain(self.d['stages'][stage_id]['inputs'].items(), self.d['stages'][stage_id]['outputs'].items())} return res def get_dtypes_for_stage(self, stage_id: int) -> Dict[(str, torch.Size)]: res = {name: inorout['dtype'] for (name, inorout) in chain(self.d['stages'][stage_id]['inputs'].items(), self.d['stages'][stage_id]['outputs'].items())} return res def change_batch(self, batch_size: int, for_replicated: bool=True): d = self.d batch_dim = d['batch_dim'] for inorout in chain(d['model_inputs'].values(), d['model_outputs'].values()): inorout['shape'] = atomic_batch_change(inorout['is_batched'], inorout['shape'], batch_dim, batch_size) for stage in d['stages'].values(): if for_replicated: n_devices = len(stage['devices']) assert ((batch_size % n_devices) == 0) stage_batch_size = (batch_size // n_devices) for inorout in chain(stage['inputs'].values(), stage['outputs'].values()): inorout['shape'] = atomic_batch_change(inorout['is_batched'], inorout['shape'], batch_dim, stage_batch_size) def realize_stage_for_rank(self, layers: Dict[(str, Tensor)], tensors: Dict[(str, Tensor)], batch_size: int, my_rank: int, for_replicated: bool=True, device='cpu') -> torch.nn.Module: stage_id = self.rank_to_stage_idx(my_rank) self.change_batch(batch_size=batch_size, for_replicated=for_replicated) return self.realize_stage(layers, tensors, stage_id, device=device) def realize_stage(self, layers: Dict[(str, Tensor)], tensors: Dict[(str, Tensor)], stage_id: int, device='cpu') -> torch.nn.Module: d = self.d stage_cls = d['stages'][stage_id]['stage_cls'] return stage_cls(layers, tensors, device=device) def filter_layers_and_tensors_for_stage(self, layers: Dict[(str, Tensor)], tensors: Dict[(str, Tensor)], stage_id: int, device='cpu'): d = self.d stage_cls = d['stages'][stage_id]['stage_cls'] filtered_layers = {i: v for (i, v) in layers.items() if (i in stage_cls.LAYER_SCOPES)} filtered_tensors = {i: v for (i, v) in tensors.items() if (i in stage_cls.TENSORS)} return (filtered_layers, filtered_tensors) def get_inputs_req_grad_for_stage(self, stage_id: int) -> Dict[(str, bool)]: my_inputs = self.d['stages'][stage_id]['inputs'] if ('req_grad' in next(iter(my_inputs.values()))): return {i: v['req_grad'] for (i, v) in my_inputs.items()} else: raise NotImplementedError() def get_outputs_req_grad_for_stage(self, stage_id: int) -> Dict[(str, bool)]: 'Infer grad requirements for output tensors ' my_outputs = self.d['stages'][stage_id]['outputs'] if ('req_grad' in next(iter(my_outputs.values()))): return {i: v['req_grad'] for (i, v) in my_outputs.items()} warnings.warn('inferring output req grad from input req grad. (deprecated)') outputs_req_grad = {} for (i, stage) in self.d['stages'].items(): for (name, r) in stage['inputs'].items(): r = r['req_grad'] if (name in my_outputs): if (name in outputs_req_grad): assert (outputs_req_grad[name] == r) outputs_req_grad[name] = r n_my_model_outputs = len([i for i in my_outputs if (i in self.d['model_outputs'])]) assert (len(my_outputs) == (len(outputs_req_grad) + n_my_model_outputs)), (my_outputs, outputs_req_grad, n_my_model_outputs) if (not outputs_req_grad): assert (len(my_outputs) == n_my_model_outputs) return outputs_req_grad def get_dataset_inputs_for_stage(self, stage_id: int): 'Enables auto-spliting the dataset ' pcs = self.d['stages'][stage_id] inputs_from_dl = [i for i in pcs['inputs'] if (i in self.d['model_inputs'])] return inputs_from_dl def is_first_forward_stage(self, stage_id: int) -> bool: return (self.get_depth_for_stage(stage_id) == (self.pipeline_depth - 1)) def is_last_forward_stage(self, stage_id: int) -> bool: return (self.get_depth_for_stage(stage_id) == 0) def get_depth_for_stage(self, my_stage_id: int) -> int: stage = self.d['stages'][my_stage_id] try: stage_depth = stage['stage_depth'] except KeyError as e: warnings.warn('KeyError: missing stage_depth. Probably using old config. Will try to infer otherwise') inputs_to_stage_ids = defaultdict(set) for (stage_id, s) in self.d['stages'].items(): for input_name in s['inputs']: inputs_to_stage_ids[input_name].add(stage_id) edges = list() for (stage_id, s) in self.d['stages'].items(): for output_name in s['outputs']: if (output_name in inputs_to_stage_ids): edges.extend([(x, stage_id) for x in inputs_to_stage_ids[output_name]]) else: assert (output_name in self.d['model_outputs']) edges.append(('output', stage_id)) import networkx as nx num_partitions = self.n_stages G = nx.DiGraph(list(edges)) def longest_depth_length(target): return (reduce(max, map(len, nx.all_simple_edge_paths(G, source='output', target=target))) - 1) distance_dict = {i: longest_depth_length(i) for i in range(num_partitions)} for (i, v) in distance_dict.items(): if (v < 0): warnings.warn(f'Stage {i} was not used in output calculation. distance_dict={distance_dict}') if (len(set(distance_dict.values())) < num_partitions): warnings.warn(f"Detected parallel stages. Naive pipelines can't run this. distance_dict={distance_dict}") stage_depth = distance_dict[my_stage_id] stage['stage_depth'] = stage_depth return stage_depth def sent_items_between_stages(self, send_stage, recv_stage, is_activations: bool=True): stage_id = send_stage targets = (self.d['stages'][stage_id]['outputs'] if is_activations else self.d['stages'][stage_id]['inputs']) names = set() for (name, tgt) in targets.items(): if ((name in self.d['model_inputs']) or (name in self.d['model_outputs'])): continue if ((not is_activations) and (not tgt['req_grad'])): continue if is_activations: if (recv_stage in tgt['used_by']): names.add(name) elif (recv_stage == tgt['created_by']): names.add(name) return names def send_depth_between_stages(self, send_stage, recv_stage, specific_tensor_name=None, is_activations: bool=True): stage_to_depth = {x: self.get_depth_for_stage(x) for x in [send_stage, recv_stage]} stage_id = send_stage targets = (self.d['stages'][stage_id]['outputs'] if is_activations else self.d['stages'][stage_id]['inputs']) if (specific_tensor_name is not None): assert (specific_tensor_name in targets) stage_to_max_send_depth = 0 for (name, tgt) in targets.items(): if ((name in self.d['model_inputs']) or (name in self.d['model_outputs'])): continue if ((not is_activations) and (not tgt['req_grad'])): continue if ((specific_tensor_name is not None) and (name != specific_tensor_name)): continue my_depth = stage_to_depth[stage_id] tgt_dept = stage_to_depth[recv_stage] if is_activations: used_by_send_depth_diff = [(my_depth - tgt_dept)] if (recv_stage not in tgt['used_by']): continue else: created_by = tgt['created_by'] assert isinstance(created_by, int) if (created_by == (- 1)): raise NotImplementedError(f'we assume model_inputs do not require grad. But got: {name}, {stage_id}') if (recv_stage != created_by): continue used_by_send_depth_diff = [(tgt_dept - my_depth)] if used_by_send_depth_diff: stage_max_send_depth_for_tgt = max(used_by_send_depth_diff) else: stage_max_send_depth_for_tgt = 0 stage_to_max_send_depth = max(stage_max_send_depth_for_tgt, stage_to_max_send_depth) return stage_to_max_send_depth def max_send_depth_dict(self, is_activations: bool=True) -> Dict[(int, int)]: stage_to_depth = {x: self.get_depth_for_stage(x) for x in range(self.n_stages)} stage_to_max_send_depth = defaultdict(int) for stage_id in range(self.n_stages): targets = (self.d['stages'][stage_id]['outputs'] if is_activations else self.d['stages'][stage_id]['inputs']) for (name, tgt) in targets.items(): if ((name in self.d['model_inputs']) or (name in self.d['model_outputs'])): continue if ((not is_activations) and (not tgt['req_grad'])): continue my_depth = stage_to_depth[stage_id] if is_activations: used_by = tgt['used_by'] used_by_depth = [stage_to_depth[x] for x in used_by] used_by_send_depth_diff = [(my_depth - x) for x in used_by_depth] else: created_by = tgt['created_by'] assert isinstance(created_by, int) if (created_by == (- 1)): raise NotImplementedError(f'we assume model_inputs do not require grad. But got: {name}, {stage_id}') else: created_by_depth = stage_to_depth[created_by] used_by_send_depth_diff = [(created_by_depth - my_depth)] if used_by_send_depth_diff: stage_max_send_depth_for_tgt = max(used_by_send_depth_diff) else: stage_max_send_depth_for_tgt = 0 stage_to_max_send_depth[stage_id] = max(stage_max_send_depth_for_tgt, stage_to_max_send_depth[stage_id]) return stage_to_max_send_depth def max_send_depth(self) -> int: max_send_depth_dict = self.max_send_depth_dict() return max(max_send_depth_dict.values()) def max_send_depth_for_stage(self, stage_id: int) -> int: max_send_depth_dict_a = self.max_send_depth_dict(is_activations=True) max_send_depth_dict_g = self.max_send_depth_dict(is_activations=False) res = max(max_send_depth_dict_a[stage_id], max_send_depth_dict_g[stage_id]) if (res > 1): warnings.warn(f'Stage: {stage_id} has max_send_depth={res}. This means holding multiple ({(res + 1)}) versions of activations/gradients in memory') return res @property def pipeline_depth(self) -> int: return (max((self.get_depth_for_stage(x) for x in range(self.n_stages))) + 1)
def atomic_batch_change(atomic_is_batched, atomic_shape, dim, batch_size) -> torch.Size: assert isinstance(atomic_is_batched, bool) if atomic_is_batched: TMP_SHAPE_CLS = type(atomic_shape) assert (TMP_SHAPE_CLS == _SHAPE_CLS) atomic_shape = list(atomic_shape) atomic_shape[dim] = batch_size atomic_shape = TMP_SHAPE_CLS(atomic_shape) return atomic_shape
def op_graph_t5_3b_tied_lmheads_64_4_8p_bw12_squad1_pipedream(): return dict(model_type='new_t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'return_dict': False, 'use_cache': False, 'output_only': True, 'output_attentions': False, 'precomputed_masks': False, 'output_hidden_states': False}, stateless_tied=True)
def op_graph_t5_3b_tied_lmheads_512_4_8p_bw12_squad1_pipedream(): return dict(model_type='new_t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'return_dict': False, 'use_cache': False, 'output_only': True, 'output_attentions': False, 'precomputed_masks': False, 'output_hidden_states': False}, stateless_tied=True)
def op_graph_t5_3b_tied_lmheads_320_8_8p_bw12_squad1_pipedream(): return dict(model_type='new_t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'return_dict': False, 'use_cache': False, 'output_only': True, 'output_attentions': False, 'precomputed_masks': False, 'output_hidden_states': False}, stateless_tied=True)
def layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_squad1_pipedream(): return dict(model_type='new_t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'return_dict': False, 'use_cache': False, 'output_only': True, 'output_attentions': False, 'precomputed_masks': False, 'output_hidden_states': False}, stateless_tied=True)
def layer_graph_t5_3b_tied_lmheads_320_8_8p_bw12_squad1_pipedream(): return dict(model_type='new_t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'return_dict': False, 'use_cache': False, 'output_only': True, 'output_attentions': False, 'precomputed_masks': False, 'output_hidden_states': False}, stateless_tied=True)
def layer_graph_t5_3b_tied_lmheads_64_4_8p_bw12_squad1_pipedream(): return dict(model_type='new_t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'return_dict': False, 'use_cache': False, 'output_only': True, 'output_attentions': False, 'precomputed_masks': False, 'output_hidden_states': False}, stateless_tied=True)
def op_t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_mpipe(): return dict(model_type='new_t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'return_dict': False, 'use_cache': False, 'output_only': True, 'output_attentions': False, 'precomputed_masks': False, 'output_hidden_states': False}, stateless_tied=True)
def layer_graph_t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_mpipe(): return dict(model_type='new_t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'return_dict': False, 'use_cache': False, 'output_only': True, 'output_attentions': False, 'precomputed_masks': False, 'output_hidden_states': False}, stateless_tied=True)
def op_t5_3b_tied_lmheads_320_8_8p_bw12_async_squad1_mpipe(): return dict(model_type='new_t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'return_dict': False, 'use_cache': False, 'output_only': True, 'output_attentions': False, 'precomputed_masks': False, 'output_hidden_states': False}, stateless_tied=True)
def layer_graph_t5_3b_tied_lmheads_320_8_8p_bw12_async_squad1_mpipe(): return dict(model_type='new_t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'return_dict': False, 'use_cache': False, 'output_only': True, 'output_attentions': False, 'precomputed_masks': False, 'output_hidden_states': False}, stateless_tied=True)
def op_t5_3b_tied_lmheads_64_4_8p_bw12_async_squad1_mpipe(): return dict(model_type='new_t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'return_dict': False, 'use_cache': False, 'output_only': True, 'output_attentions': False, 'precomputed_masks': False, 'output_hidden_states': False}, stateless_tied=True)
def layer_graph_t5_3b_tied_lmheads_64_4_8p_bw12_async_squad1_mpipe(): return dict(model_type='new_t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'return_dict': False, 'use_cache': False, 'output_only': True, 'output_attentions': False, 'precomputed_masks': False, 'output_hidden_states': False}, stateless_tied=True)
def roberta_large_8p_bw11_0_async_mnli_glue(): return dict(model_type='roberta_glue', model_name_or_path='roberta-large', do_lower_case=False, output_past=False, stateless_tied=False, explicitly_set_dict={'precompute_attention_mask': True}, num_labels=3, finetuning_task='mnli')
def bert_large_uncased_whole_word_masking_8p_bw11_0_async_rte_glue(): return dict(model_type='bert_glue', model_name_or_path='bert-large-uncased-whole-word-masking', do_lower_case=False, output_past=False, explicitly_set_dict={'precompute_attention_mask': True}, stateless_tied=False, num_labels=2, finetuning_task='rte')
def bert_large_uncased_whole_word_masking_8p_bw11_0_async_mnli_glue(): return dict(model_type='bert_glue', model_name_or_path='bert-large-uncased-whole-word-masking', do_lower_case=False, output_past=False, stateless_tied=False, num_labels=3, explicitly_set_dict={'precompute_attention_mask': True}, finetuning_task='mnli')
def bert_base_uncased_4p_bw11_0_async_mnli_glue(): return dict(model_type='bert_glue', model_name_or_path='bert-base-uncased', do_lower_case=False, output_past=False, stateless_tied=False, num_labels=3, finetuning_task='mnli')
def bert_base_uncased_8p_bw11_0_async_mnli_glue(): return dict(model_type='bert_glue', model_name_or_path='bert-base-uncased', do_lower_case=False, output_past=False, stateless_tied=False, num_labels=3, explicitly_set_dict={'precompute_attention_mask': True}, finetuning_task='mnli')
def roberta_base_8p_bw11_0_async_mnli_glue(): return dict(model_type='roberta_glue', model_name_or_path='roberta-base', do_lower_case=False, output_past=False, stateless_tied=False, num_labels=3, explicitly_set_dict={'precompute_attention_mask': True}, finetuning_task='mnli')
def gpt2_p4_lm_untied(): return dict(model_type='gpt2_lm_stateless', model_name_or_path='gpt2', do_lower_case=False, explicitly_set_dict=dict(output_past=False), stateless_tied=False)
def gpt2_p4_lm_tied(): return dict(model_type='gpt2_lm_stateless', model_name_or_path='gpt2', do_lower_case=False, explicitly_set_dict=dict(output_past=False), stateless_tied=True)
def new_gpt2_xl_tied_lm_p8_seq_512(): return dict(model_type='gpt2_lm', model_name_or_path='gpt2-xl', do_lower_case=False, explicitly_set_dict=dict(output_past=False), stateless_tied=False)
def old_gpt2xl_8p_untied(): return dict(model_type='gpt2_lm_stateless', model_name_or_path='gpt2-xl', do_lower_case=False, explicitly_set_dict=dict(output_past=False), stateless_tied=False)
def gpt2_xl_p8_lm_untied(): return dict(model_type='gpt2_lm_stateless', model_name_or_path='gpt2-xl', do_lower_case=False, explicitly_set_dict=dict(output_past=False), stateless_tied=False)
def gpt2_xl_p8_lm_tied(): return dict(model_type='gpt2_lm_stateless', model_name_or_path='gpt2-xl', do_lower_case=False, explicitly_set_dict=dict(output_past=False), stateless_tied=True)
def bert_large_uncased_squad_8p(): return dict(model_type='bert_squad_old', model_name_or_path='bert-large-uncased-whole-word-masking', do_lower_case=True, output_past=False, stateless_tied=False)
def bert_base_uncaseds_384_2p_bw12_pipedream(): return dict(model_type='bert_squad_old', model_name_or_path='bert-base-uncased', do_lower_case=True, output_past=False, stateless_tied=False, explicitly_set_dict={'return_dict': False}, do_resize_token_embedding=False)
def bert_base_uncaseds_384_2p_bw12_async_pipedream(): return dict(model_type='bert_squad_old', model_name_or_path='bert-base-uncased', do_lower_case=True, output_past=False, stateless_tied=False, explicitly_set_dict={'return_dict': False}, do_resize_token_embedding=False)
def bert_large_uncased_whole_word_maskings_384_2p_bw12_pipedream(): return dict(model_type='bert_squad', model_name_or_path='bert-large-uncased-whole-word-masking', do_lower_case=True, output_past=False, stateless_tied=False, explicitly_set_dict={'precompute_attention_mask': True, 'return_dict': False}, do_resize_token_embedding=False)
def bert_large_uncased_whole_word_maskings_384_2p_bw12_async_pipedream(): return dict(model_type='bert_squad', model_name_or_path='bert-large-uncased-whole-word-masking', do_lower_case=True, output_past=False, stateless_tied=False, explicitly_set_dict={'precompute_attention_mask': True, 'return_dict': False}, do_resize_token_embedding=False)
def bert_large_uncased_whole_word_maskings_384_8p_bw12_pipedream(): return dict(model_type='bert_squad', model_name_or_path='bert-large-uncased-whole-word-masking', do_lower_case=True, output_past=False, stateless_tied=False, explicitly_set_dict={'precompute_attention_mask': True, 'return_dict': False}, do_resize_token_embedding=False)
def bert_large_uncased_whole_word_maskings_384_8p_bw12_async_pipedream(): return dict(model_type='bert_squad', model_name_or_path='bert-large-uncased-whole-word-masking', do_lower_case=True, output_past=False, stateless_tied=False, explicitly_set_dict={'precompute_attention_mask': True, 'return_dict': False}, do_resize_token_embedding=False)
def layer_bert_large_uncased_whole_word_maskings_384_8p_bw12_async_pipedream(): return dict(model_type='bert_squad', model_name_or_path='bert-large-uncased-whole-word-masking', do_lower_case=True, output_past=False, stateless_tied=False, explicitly_set_dict={'precompute_attention_mask': True, 'return_dict': False}, do_resize_token_embedding=False)
def bert_large_uncased_whole_word_maskings_384_4p_bw12_pipedream(): return dict(model_type='bert_squad', model_name_or_path='bert-large-uncased-whole-word-masking', do_lower_case=True, output_past=False, stateless_tied=False, explicitly_set_dict={'precompute_attention_mask': True, 'return_dict': False}, do_resize_token_embedding=False)
def bert_large_uncased_whole_word_maskings_384_4p_bw12_async_pipedream(): return dict(model_type='bert_squad', model_name_or_path='bert-large-uncased-whole-word-masking', do_lower_case=True, output_past=False, stateless_tied=False, explicitly_set_dict={'precompute_attention_mask': True, 'return_dict': False}, do_resize_token_embedding=False)
def gpt2_p4_lm_tied_gpipe(): return gpt2_p4_lm_tied()
def gpt2_p4_lm_untied_gpipe(): return gpt2_p4_lm_untied()
def gpt2_xl_p8_lm_tied_gpipe(): return gpt2_xl_p8_lm_tied()
def gpt2_xl_p8_lm_untied_gpipe(): return gpt2_xl_p8_lm_untied()
def t5_small_tied_lmhead_4p_bw12_async_squad1(): return dict(model_type='t5_stateless', model_name_or_path='t5-small', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, explicitly_set_dict={'output_only': True, 'output_attentions': False, 'precomputed_masks': True, 'output_hidden_states': False}, stateless_tied=True)
def t5_large_tied_lmhead_8p_bw12_async_squad1(): return dict(model_type='t5_stateless', model_name_or_path='t5-large', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, explicitly_set_dict={'output_only': True, 'output_attentions': False, 'precomputed_masks': True, 'output_hidden_states': False}, stateless_tied=True)
def t5_3b_tied_lmheads_320_8_8p_bw12_squad1(): return dict(model_type='t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, explicitly_set_dict={'output_only': True, 'output_attentions': False, 'precomputed_masks': True, 'output_hidden_states': False}, stateless_tied=True)
def t5_3b_tied_lmheads_320_8_8p_bw12_squad1_virtual_stages(): return dict(model_type='t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'output_only': True, 'output_attentions': False, 'precomputed_masks': True, 'output_hidden_states': False}, stateless_tied=True)
def t5_3b_tied_lmheads_64_6_8p_bw12_squad1_virtual_stages(): return dict(model_type='t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'output_only': True, 'output_attentions': False, 'precomputed_masks': True, 'output_hidden_states': False}, stateless_tied=True)
def t5_3b_tied_lmheads_64_4_8p_bw12_squad1_virtual_stages(): return dict(model_type='t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'output_only': True, 'output_attentions': False, 'precomputed_masks': True, 'output_hidden_states': False}, stateless_tied=True)
def t5_3b_tied_lmheads_64_4_8p_bw12_async_squad1_mpipe(): return dict(model_type='t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'output_only': True, 'output_attentions': False, 'precomputed_masks': True, 'output_hidden_states': False}, stateless_tied=True)
def t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_mpipe(): return dict(model_type='t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'output_only': True, 'output_attentions': False, 'precomputed_masks': True, 'output_hidden_states': False}, stateless_tied=True)
def t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_pipedream(): return dict(model_type='t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'output_only': True, 'output_attentions': False, 'precomputed_masks': True, 'output_hidden_states': False}, stateless_tied=True)
def t5_3b_tied_lmheads_320_8_8p_bw12_squad1_pipedream(): return dict(model_type='t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'output_only': True, 'output_attentions': False, 'precomputed_masks': True, 'output_hidden_states': False}, stateless_tied=True)
def t5_3b_tied_lmheads_320_8_8p_bw12_async_squad1_mpipe(): return dict(model_type='t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'output_only': True, 'output_attentions': False, 'precomputed_masks': True, 'output_hidden_states': False}, stateless_tied=True)
def t5_3b_tied_lmheads_512_4_8p_bw12_squad1_virtual_stages(): return dict(model_type='t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'output_only': True, 'output_attentions': False, 'precomputed_masks': True, 'output_hidden_states': False}, stateless_tied=True)
def t5_3b_tied_lmheads_512_4_8p_bw12_async_squad1_mpipe_L32(): return dict(model_type='t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'output_only': True, 'output_attentions': False, 'precomputed_masks': True, 'output_hidden_states': False}, stateless_tied=True)
def t5_3b_tied_lmheads_64_4_8p_bw12_squad1_acyclic(): return dict(model_type='t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'output_only': True, 'output_attentions': False, 'precomputed_masks': True, 'output_hidden_states': False}, stateless_tied=True)
def t5_3b_tied_lmheads_64_4_8p_bw12_squad1_pipedream(): return dict(model_type='t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'output_only': True, 'output_attentions': False, 'precomputed_masks': True, 'output_hidden_states': False}, stateless_tied=True)
def t5_3b_tied_lmheads_512_4_8p_bw12_squad1_acyclic(): return dict(model_type='t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'output_only': True, 'output_attentions': False, 'precomputed_masks': True, 'output_hidden_states': False}, stateless_tied=True)
def t5_3b_tied_lmheads_512_4_8p_bw12_squad1_pipedream(): return dict(model_type='t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'output_only': True, 'output_attentions': False, 'precomputed_masks': True, 'output_hidden_states': False}, stateless_tied=True)
def t5_base_tied_lmheads_512_4_8p_bw12_squad1_pipedream(): return dict(model_type='t5_stateless', model_name_or_path='t5-base', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'output_only': True, 'output_attentions': False, 'precomputed_masks': True, 'output_hidden_states': False}, stateless_tied=True)
def t5_small_tied_lmheads_512_4_3p_bw12_squad1_virtual_stages(): return dict(model_type='t5_stateless', model_name_or_path='t5-small', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, do_resize_token_embedding=True, explicitly_set_dict={'output_only': True, 'output_attentions': False, 'precomputed_masks': True, 'output_hidden_states': False}, stateless_tied=True)
def t5_3b_tied_lmheads_64_4_8p_bw12_squad1(): return dict(model_type='t5_stateless', model_name_or_path='t5-3b', do_lower_case=False, output_past=False, output_attentions=False, output_hidden_states=False, explicitly_set_dict={'output_only': True, 'output_attentions': False, 'precomputed_masks': True, 'output_hidden_states': False}, stateless_tied=True)
class GetConfigFrom(Enum): HardCoded = auto() ParsedArgs = auto() Generated = auto()
def resize_token_embeddings(model, tokenizer): model_to_resize = (model.module if hasattr(model, 'module') else model) model_to_resize.resize_token_embeddings(len(tokenizer))
def pretrained_model_config_and_tokenizer(model_type: str, model_name_or_path: str, config_name: str='', tokenizer_name: str='', do_lower_case: bool=False, cache_dir: str='', stateless_tied=False, do_resize_token_embedding=True, explicitly_set_dict={}, **config_kw): (config_class, model_class, tokenizer_class) = MODEL_TYPES[model_type] config = config_class.from_pretrained((config_name if config_name else model_name_or_path), cache_dir=(cache_dir if cache_dir else None), **config_kw) for (k, v) in explicitly_set_dict.items(): setattr(config, k, v) tokenizer = tokenizer_class.from_pretrained((tokenizer_name if tokenizer_name else model_name_or_path), do_lower_case=do_lower_case, cache_dir=(cache_dir if cache_dir else None)) extra_kwargs = {} if (model_name_or_path in {'t5-11b'}): extra_kwargs['use_cdn'] = False model = model_class.from_pretrained(model_name_or_path, from_tf=bool(('.ckpt' in model_name_or_path)), config=config, cache_dir=(cache_dir if cache_dir else None), **extra_kwargs) if do_resize_token_embedding: resize_token_embeddings(model, tokenizer) if stateless_tied: model_to_resize = (model.module if hasattr(model, 'module') else model) if hasattr(model_to_resize, 'make_stateless_after_loaded_tied_and_resized'): model_to_resize.make_stateless_after_loaded_tied_and_resized() elif hasattr(model_to_resize, 'make_stateless'): model_to_resize.make_stateless() else: raise ValueError(f'Problem making model stateless. model_type: {model_type}') return (model, tokenizer, config)
def _dev(): 'Used to infer the mapping manually' MODEL_PATH = 'C:\\Users\\saareliad\\workspace\\ViT-B_16.npz' MODEL_PATH = pathlib.Path(MODEL_PATH) def read_npz_checkpoint(path): with np.load(path) as data: lst = data.files state_dict = {k: data[k] for k in lst} dd = {k: data[k].shape for k in lst} pprint(dd) def get_vit_pytorch(*args, **kwargs): from pipe.models.registery.vit import vit_base_patch16_384_in21k model = vit_base_patch16_384_in21k() sd = model.state_dict() def shape_or_val(v): if hasattr(v, 'shape'): return v.shape else: return v dd = {k: shape_or_val(v) for (k, v) in sd.items()} pprint(dd) read_npz_checkpoint(path=MODEL_PATH) get_vit_pytorch()
def map_checkpoint_to_state_dict(state_dict: Dict[(str, np.ndarray)]): '\n See: https://github.com/google/flax/blob/9015cc26d1d4a8b086e1bffacd157f863988fc4d/flax/linen/attention.py\n See: https://github.com/google-research/vision_transformer/blob/master/vit_jax/models.py\n\n Args:\n state_dict:\n\n Returns:\n\n ' d = {} for (full_s, v) in state_dict.items(): split = full_s.split('/') new = [] for (i, s) in enumerate(split): if (s == 'Transformer'): pass elif (m := re.match('encoderblock_([0-9]+)', s)): new.append(f'blocks.{m.group(1)}') elif (s == 'MultiHeadDotProductAttention_1'): new.append('attn') so_far = '/'.join(split[:(i + 1)]) q = state_dict[f'{so_far}/query/kernel'] k = state_dict[f'{so_far}/key/kernel'] v = state_dict[f'{so_far}/value/kernel'] q = np.reshape(q, (q.shape[0], (- 1))).transpose((1, 0)) k = np.reshape(k, (k.shape[0], (- 1))).transpose((1, 0)) v = np.reshape(v, (v.shape[0], (- 1))).transpose((1, 0)) qkv = np.concatenate([q, k, v]) d['.'.join((new + ['qkv.weight']))] = qkv q = state_dict[f'{so_far}/query/bias'] k = state_dict[f'{so_far}/key/bias'] v = state_dict[f'{so_far}/value/bias'] q = np.reshape(q, (- 1)) k = np.reshape(k, (- 1)) v = np.reshape(v, (- 1)) qkv = np.concatenate([q, k, v]) d['.'.join((new + ['qkv.bias']))] = qkv out_kernel = state_dict[f'{so_far}/out/kernel'] out_kernel = np.reshape(out_kernel, ((out_kernel.shape[0] * out_kernel.shape[1]), out_kernel.shape[2])) out_bias = state_dict[f'{so_far}/out/bias'] d['.'.join((new + ['proj.weight']))] = out_kernel.transpose((1, 0)) d['.'.join((new + ['proj.bias']))] = out_bias break elif (m := re.match('MlpBlock_([0-9]+)', s)): if (int(m.group(1)) != 3): raise NotImplementedError() new.append(f'mlp') elif (m := re.match('Dense_([0-9]+)', s)): if (int(m.group(1)) not in {0, 1}): raise NotImplementedError() so_far = '/'.join(split[:(i + 1)]) d['.'.join((new + [f'fc{(int(m.group(1)) + 1)}.weight']))] = state_dict[f'{so_far}/kernel'].transpose((1, 0)) d['.'.join((new + [f'fc{(int(m.group(1)) + 1)}.bias']))] = state_dict[f'{so_far}/bias'] break elif (m := re.match('LayerNorm_([0-9]+)', s)): if (int(m.group(1)) == 0): normid = 1 elif (int(m.group(1)) == 2): normid = 2 else: raise NotImplementedError() so_far = '/'.join(split[:(i + 1)]) d['.'.join((new + [f'norm{normid}.bias']))] = state_dict[f'{so_far}/bias'] d['.'.join((new + [f'norm{normid}.weight']))] = state_dict[f'{so_far}/scale'] break elif (s == 'posembed_input'): so_far = '/'.join(split[:(i + 1)]) d['pos_embed'] = state_dict[f'{so_far}/pos_embedding'] break elif (s == 'embedding'): so_far = '/'.join(split[:(i + 1)]) d['patch_embed.proj.bias'] = state_dict[f'{so_far}/bias'] w = state_dict[f'{so_far}/kernel'] d['patch_embed.proj.weight'] = w.transpose([3, 2, 0, 1]) break elif (s == 'cls'): d['cls_token'] = state_dict['cls'] break elif (s == 'head'): d['head.bias'] = state_dict['head/bias'] d['head.weight'] = state_dict['head/kernel'].transpose((1, 0)) break elif (s == 'encoder_norm'): so_far = '/'.join(split[:(i + 1)]) d['.'.join((new + [f'norm.bias']))] = state_dict[f'{so_far}/bias'] d['.'.join((new + [f'norm.weight']))] = state_dict[f'{so_far}/scale'] break elif (s == 'pre_logits'): warnings.warn("ignoring 'pre_logits' since its unused") break else: raise ValueError(full_s) d = {k: torch.from_numpy(v) for (k, v) in d.items()} return d
class Adafactor(torch.optim.Optimizer): 'Implements Adafactor algorithm.\n This implementation is based on:\n `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost`\n (see https://arxiv.org/abs/1804.04235)\n Note that this optimizer internally adjusts the learning rate\n depending on the *scale_parameter*, *relative_step* and\n *warmup_init* options. To use a manual (external) learning rate\n schedule you should set `scale_parameter=False` and\n `relative_step=False`.\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): external learning rate (default: None)\n eps (tuple[float, float]): regularization constants for square gradient\n and parameter scale respectively (default: (1e-30, 1e-3))\n clip_threshold (float): threshold of root mean square of\n final gradient update (default: 1.0)\n decay_rate (float): coefficient used to compute running averages of square\n gradient (default: -0.8)\n beta1 (float): coefficient used for computing running averages of gradient\n (default: None)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n scale_parameter (bool): if True, learning rate is scaled by root mean square of\n parameter (default: True)\n relative_step (bool): if True, time-dependent learning rate is computed\n instead of external learning rate (default: True)\n warmup_init (bool): time-dependent learning rate computation depends on\n whether warm-up initialization is being used (default: False)\n ' def __init__(self, params, lr=None, eps=(1e-30, 0.001), clip_threshold=1.0, decay_rate=(- 0.8), beta1=None, weight_decay=0.0, scale_parameter=True, relative_step=True, warmup_init=False): if ((lr is not None) and relative_step): raise ValueError('Cannot combine manual lr and relative_step options') if (warmup_init and (not relative_step)): raise ValueError('warmup_init requires relative_step=True') defaults = dict(lr=lr, eps=eps, clip_threshold=clip_threshold, decay_rate=decay_rate, beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter, relative_step=relative_step, warmup_init=warmup_init) super(Adafactor, self).__init__(params, defaults) @property def supports_memory_efficient_fp16(self): return True @property def supports_flat_params(self): return False def _get_lr(self, param_group, param_state): rel_step_sz = param_group['lr'] if param_group['relative_step']: min_step = ((1e-06 * param_state['step']) if param_group['warmup_init'] else 0.01) rel_step_sz = min(min_step, (1.0 / math.sqrt(param_state['step']))) param_scale = 1.0 if param_group['scale_parameter']: param_scale = max(param_group['eps'][1], param_state['RMS']) return (param_scale * rel_step_sz) def _get_options(self, param_group, param_shape): factored = (len(param_shape) >= 2) use_first_moment = (param_group['beta1'] is not None) return (factored, use_first_moment) def _rms(self, tensor): return (tensor.norm(2) / (tensor.numel() ** 0.5)) def _approx_sq_grad(self, exp_avg_sq_row, exp_avg_sq_col): r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=(- 1), keepdim=True)).rsqrt_() c_factor = exp_avg_sq_col.rsqrt() return torch.mm(r_factor.unsqueeze((- 1)), c_factor.unsqueeze(0)) def step(self, closure=None): 'Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n ' loss = None if (closure is not None): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue grad = p.grad.data if (grad.dtype in {torch.float16, torch.bfloat16}): grad = grad.float() if grad.is_sparse: raise RuntimeError('Adafactor does not support sparse gradients.') state = self.state[p] grad_shape = grad.shape (factored, use_first_moment) = self._get_options(group, grad_shape) if (len(state) == 0): state['step'] = 0 if use_first_moment: state['exp_avg'] = torch.zeros_like(grad) if factored: state['exp_avg_sq_row'] = torch.zeros(grad_shape[:(- 1)]).to(grad) state['exp_avg_sq_col'] = torch.zeros((grad_shape[:(- 2)] + grad_shape[(- 1):])).to(grad) else: state['exp_avg_sq'] = torch.zeros_like(grad) state['RMS'] = 0 else: if use_first_moment: state['exp_avg'] = state['exp_avg'].to(grad) if factored: state['exp_avg_sq_row'] = state['exp_avg_sq_row'].to(grad) state['exp_avg_sq_col'] = state['exp_avg_sq_col'].to(grad) else: state['exp_avg_sq'] = state['exp_avg_sq'].to(grad) p_data_fp32 = p.data if (p.data.dtype in {torch.float16, torch.bfloat16}): p_data_fp32 = p_data_fp32.float() state['step'] += 1 state['RMS'] = self._rms(p_data_fp32) group['lr'] = self._get_lr(group, state) beta2t = (1.0 - math.pow(state['step'], group['decay_rate'])) update = ((grad ** 2) + group['eps'][0]) if factored: exp_avg_sq_row = state['exp_avg_sq_row'] exp_avg_sq_col = state['exp_avg_sq_col'] exp_avg_sq_row.mul_(beta2t).add_(update.mean(dim=(- 1)), alpha=(1.0 - beta2t)) exp_avg_sq_col.mul_(beta2t).add_(update.mean(dim=(- 2)), alpha=(1.0 - beta2t)) update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) update.mul_(grad) else: exp_avg_sq = state['exp_avg_sq'] exp_avg_sq.mul_(beta2t).add_(update, alpha=(1.0 - beta2t)) update = exp_avg_sq.rsqrt().mul_(grad) update.div_((self._rms(update) / group['clip_threshold']).clamp_(min=1.0)) update.mul_(group['lr']) if use_first_moment: exp_avg = state['exp_avg'] exp_avg.mul_(group['beta1']).add_(update, alpha=(1 - group['beta1'])) update = exp_avg if (group['weight_decay'] != 0): p_data_fp32.add_(p_data_fp32, alpha=((- group['weight_decay']) * group['lr'])) p_data_fp32.add_((- update)) if (p.data.dtype in {torch.float16, torch.bfloat16}): p.data.copy_(p_data_fp32) return loss
class Adam(Optimizer): 'Implements Adam algorithm.\n\n It has been proposed in `Adam: A Method for Stochastic Optimization`_.\n\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n amsgrad (boolean, optional): whether to use the AMSGrad variant of this\n algorithm from the paper `On the Convergence of Adam and Beyond`_\n (default: False)\n\n .. _Adam\\: A Method for Stochastic Optimization:\n https://arxiv.org/abs/1412.6980\n .. _On the Convergence of Adam and Beyond:\n https://openreview.net/forum?id=ryQu7f-RZ\n ' def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, amsgrad=False): if (not (0.0 <= lr)): raise ValueError('Invalid learning rate: {}'.format(lr)) if (not (0.0 <= eps)): raise ValueError('Invalid epsilon value: {}'.format(eps)) if (not (0.0 <= betas[0] < 1.0)): raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0])) if (not (0.0 <= betas[1] < 1.0)): raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1])) if (not (0.0 <= weight_decay)): raise ValueError('Invalid weight_decay value: {}'.format(weight_decay)) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad) super(Adam, self).__init__(params, defaults) def __setstate__(self, state): super(Adam, self).__setstate__(state) for group in self.param_groups: group.setdefault('amsgrad', False) @torch.no_grad() def step(self, closure=None): 'Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n ' loss = None if (closure is not None): with torch.enable_grad(): loss = closure() for group in self.param_groups: for p in group['params']: if (p.grad is None): continue grad = p.grad if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') amsgrad = group['amsgrad'] state = self.state[p] if (len(state) == 0): state['step'] = 0 state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) if amsgrad: state['max_exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) (exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq']) if amsgrad: max_exp_avg_sq = state['max_exp_avg_sq'] (beta1, beta2) = group['betas'] state['step'] += 1 bias_correction1 = (1 - (beta1 ** state['step'])) bias_correction2 = (1 - (beta2 ** state['step'])) if (group['weight_decay'] != 0): grad = grad.add(p, alpha=group['weight_decay']) exp_avg.mul_(beta1).add_(grad, alpha=(1 - beta1)) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2)) if amsgrad: torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) denom = (max_exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) else: denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(group['eps']) step_size = (group['lr'] / bias_correction1) p.data.addcdiv_(exp_avg, denom, value=(- step_size)) return loss