id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
182,526
import math from typing import Dict, List, Optional import torch import torch.nn as nn from fairseq import utils from fairseq.distributed import fsdp_wrap from fairseq.models import FairseqEncoder from fairseq.modules import ( FairseqDropout, LayerDropModuleList, LayerNorm, PositionalEmbedding, SinusoidalPositionalEmbedding, ) from fairseq.modules import transformer_layer from fairseq.modules.checkpoint_activations import checkpoint_wrapper from fairseq.modules.quant_noise import quant_noise as apply_quant_noise_ from torch import Tensor from fairseq.models.transformer import ( TransformerConfig, ) def module_name_fordropout(module_name: str) -> str: if module_name == 'TransformerEncoderBase': return 'TransformerEncoder' else: return module_name
null
182,535
import math import re from functools import partial from typing import List, Optional, Tuple import torch import torch.nn as nn from fairseq.models import ( FairseqEncoder, ) from fairseq.models.speech_to_text.utils import ( NoOp, lengths_to_padding_mask, segments_to_sequence, ) from fairseq.models.speech_to_text.utils import ( attention_suppression, layer_norm_backward_hook, ) from torch import Tensor, device as Device from torch.ao.quantization.qconfig import ( default_dynamic_qconfig, per_channel_dynamic_qconfig, ) class NoSegAugmentedMemoryTransformerEncoderLayer(FairseqEncoder): def __init__( self, input_dim, num_heads, ffn_dim, num_layers=1, dropout_in_attn=0.0, dropout_on_attn=0.0, dropout_on_fc1=0.0, dropout_on_fc2=0.0, segment_size=128, context_config=(0, 0), max_memory_size=0, scaled_init=True, std_scale=None, activation_fn="relu", tanh_on_mem=False, mini_batches=False, negative_inf="-inf", deep_init=True, summarization_method="mean", max_relative_position=0, rpe_old_option=True, ): def set_mini_batches(self, mini_batches): def _get_relative_position( self, input: Tensor, max_relative_position: int, left_context_length: int, past_length: int, is_decoding: bool, ): def _get_attention_mask(self, input, past_length=0, left_context_cache=0): def init_state( self, batch_size: int, device: Optional[Device] = None ) -> List[Tensor]: def batch_state(self, states: List[List[Tensor]]) -> List[Tensor]: def reorder_state(self, state: List[Tensor], indices: Tensor) -> List[Tensor]: def reset_state(self, state: List[Tensor], indices: Tensor) -> List[Tensor]: def state_size(self) -> int: def batch_size_in_state( self, state: Optional[List[Tensor]], sloppy: bool = True ) -> Optional[int]: def gen_summary_queries(self, input): def _gen_right_context_padded_input(self, input): def _gen_segs_right_context(self, input, lengths): def forward( self, input: Tensor, padding_masks: Tensor, state: Optional[List[Tensor]] = None ) -> Tuple[Tensor, Tensor, List[Tensor], List[Tensor]]: def forward_jit_mini_batch_init( self, seg: Tensor, state: Optional[List[Tensor]] = None, is_decoding: bool = False, ): def state_update_before( self, layer: int, state: List[Tensor], past_length: int, past_left_context: int ): def state_update_after( self, layer: int, state: List[Tensor], mems: Tensor, next_key: Tensor, next_val: Tensor, mems_list: List[Tensor], lc_key_list: List[Tensor], lc_val_list: List[Tensor], ): def state_update_after_loop( self, state: List[Tensor], mems_list: List[Tensor], lc_key_list: List[Tensor], lc_val_list: List[Tensor], update_length: int, ): def forward_mini_batches( self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None ) -> Tuple[Tensor, Tensor, List[Tensor], List[Tensor]]: def forward_jit_test( self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None ) -> Tuple[Tensor, Tensor, List[Tensor]]: def forward_jit( self, input: Tensor, lengths: Tensor, state: Optional[List[Tensor]] = None ) -> Tuple[Tensor, Tensor, List[Tensor]]: def quantize_(self, params=None): def emformer_encoder(klass): class SpeechEncoder(klass): def __init__(self, args): super().__init__(args) stride = SpeechEncoder.conv_layer_stride(args) trf_left_context = args.segment_left_context // stride trf_right_context = args.segment_right_context // stride context_config = [trf_left_context, trf_right_context] self.transformer_layers = nn.ModuleList( [ NoSegAugmentedMemoryTransformerEncoderLayer( input_dim=args.encoder_embed_dim, num_heads=args.encoder_attention_heads, ffn_dim=args.encoder_ffn_embed_dim, num_layers=args.encoder_layers, dropout_in_attn=args.dropout, dropout_on_attn=args.dropout, dropout_on_fc1=args.dropout, dropout_on_fc2=args.dropout, activation_fn=args.activation_fn, context_config=context_config, segment_size=args.segment_length, max_memory_size=args.max_memory_size, scaled_init=True, # TODO: use constant for now. tanh_on_mem=args.amtrf_tanh_on_mem, ) ] ) def forward(self, src_tokens, src_lengths): encoder_out = super().forward(src_tokens, src_lengths) output = encoder_out["encoder_out"][0] encoder_padding_masks = encoder_out["encoder_padding_mask"][0] # This is because that in the original implementation # the output didn't consider the last segment as right context. encoder_padding_masks = encoder_padding_masks[:, : output.size(0)] return { "encoder_out": [output], "encoder_padding_mask": [encoder_padding_masks], "encoder_embedding": [], "encoder_states": [], "src_tokens": [], "src_lengths": [], } @staticmethod def conv_layer_stride(args): # TODO: make it configurable from the args return 4 SpeechEncoder.__name__ = klass.__name__ return SpeechEncoder
null
182,536
import logging import math from typing import Dict, List, Optional, Tuple from pathlib import Path import torch import torch.nn as nn from fairseq import checkpoint_utils, utils from fairseq.data.data_utils import lengths_to_padding_mask from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.models.transformer import Embedding, TransformerDecoder from fairseq.modules import ( FairseqDropout, LayerNorm, PositionalEmbedding, TransformerEncoderLayer, ) from torch import Tensor def s2t_transformer_s(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 8) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4) args.dropout = getattr(args, "dropout", 0.1) base_architecture(args) def s2t_transformer_xs(args): args.encoder_layers = getattr(args, "encoder_layers", 6) args.decoder_layers = getattr(args, "decoder_layers", 3) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 256 * 4) args.dropout = getattr(args, "dropout", 0.3) s2t_transformer_s(args)
null
182,537
import logging import math from typing import Dict, List, Optional, Tuple from pathlib import Path import torch import torch.nn as nn from fairseq import checkpoint_utils, utils from fairseq.data.data_utils import lengths_to_padding_mask from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.models.transformer import Embedding, TransformerDecoder from fairseq.modules import ( FairseqDropout, LayerNorm, PositionalEmbedding, TransformerEncoderLayer, ) from torch import Tensor def s2t_transformer_s(args): def s2t_transformer_sp(args): args.encoder_layers = getattr(args, "encoder_layers", 16) s2t_transformer_s(args)
null
182,538
import logging import math from typing import Dict, List, Optional, Tuple from pathlib import Path import torch import torch.nn as nn from fairseq import checkpoint_utils, utils from fairseq.data.data_utils import lengths_to_padding_mask from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.models.transformer import Embedding, TransformerDecoder from fairseq.modules import ( FairseqDropout, LayerNorm, PositionalEmbedding, TransformerEncoderLayer, ) from torch import Tensor def s2t_transformer_m(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 512 * 4) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.dropout = getattr(args, "dropout", 0.15) base_architecture(args) def s2t_transformer_mp(args): args.encoder_layers = getattr(args, "encoder_layers", 16) s2t_transformer_m(args)
null
182,539
import logging import math from typing import Dict, List, Optional, Tuple from pathlib import Path import torch import torch.nn as nn from fairseq import checkpoint_utils, utils from fairseq.data.data_utils import lengths_to_padding_mask from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.models.transformer import Embedding, TransformerDecoder from fairseq.modules import ( FairseqDropout, LayerNorm, PositionalEmbedding, TransformerEncoderLayer, ) from torch import Tensor def s2t_transformer_l(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 1024 * 4) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.2) base_architecture(args) def s2t_transformer_lp(args): args.encoder_layers = getattr(args, "encoder_layers", 16) s2t_transformer_l(args)
null
182,561
import logging import copy from typing import Dict, List, Optional, Tuple from fairseq import utils, checkpoint_utils from fairseq.models import (FairseqEncoderDecoderModel, FairseqEncoder, register_model, register_model_architecture) from fairseq.models.transformer import Embedding, TransformerDecoder from fairseq.models.wav2vec import Wav2VecEncoder from fairseq.modules.layer_norm import LayerNorm from fairseq.data.data_utils import lengths_to_padding_mask from fairseq.utils import safe_hasattr from torch import Tensor import torch.nn as nn def add_wav2vec_asr_args(parser): parser.add_argument("--w2v-path", help="path to wav2vec 2.0 model") parser.add_argument( "--no-pretrained-weights", action="store_true", help="if true, does not load pretrained weights", ) parser.add_argument( "--dropout-input", type=float, metavar="D", help="dropout to apply to the input (after feat extr)", ) parser.add_argument( "--final-dropout", type=float, metavar="D", help="dropout after transformer and before final projection", ) parser.add_argument( "--apply-mask", action="store_true", help="apply masking during fine-tuning" ) parser.add_argument( "--dropout", type=float, metavar="D", help="dropout probability inside wav2vec 2.0 model", ) parser.add_argument( "--attention-dropout", type=float, metavar="D", help="dropout probability for attention weights inside wav2vec 2.0 model", ) parser.add_argument( "--activation-dropout", "--relu-dropout", type=float, metavar="D", help="dropout probability after activation in FFN inside wav2vec 2.0 model", ) parser.add_argument( "--mask-length", type=int, help="repeat the mask indices multiple times" ) parser.add_argument( "--mask-prob", type=float, help="probability of replacing a token with mask" ) parser.add_argument( "--mask-selection", type=str, choices=["static", "uniform", "normal", "poisson"], help="how to choose masks", ) parser.add_argument( "--mask-other", type=float, help="stdev of the mask length in case of 'normal' selection strategy", ) parser.add_argument( "--no-mask-overlap", action="store_true", help="whether to allow masks to overlap", ) parser.add_argument( "--mask-channel-length", type=int, help="repeat the mask indices multiple times" ) parser.add_argument( "--mask-channel-prob", type=float, help="probability of replacing a token with mask", ) parser.add_argument( "--mask-channel-selection", type=str, choices=["static", "uniform", "normal", "poisson"], help="how to choose masks", ) parser.add_argument( "--mask-channel-other", type=float, help="stdev of the mask length in case of 'normal' selection strategy", ) parser.add_argument( "--no-mask-channel-overlap", action="store_true", help="whether to allow masks to overlap", ) parser.add_argument( "--freeze-finetune-updates", default=0, type=int, help="dont finetune wav2vec for this many updates", ) parser.add_argument( "--feature-grad-mult", default=None, type=float, help="reset feature grad mult in wav2vec 2.0 to this", ) parser.add_argument( "--layerdrop", default=0.0, type=float, help="probability of dropping a layer in wav2vec 2.0", ) parser.add_argument("--w2v-args", default=None)
null
182,562
import logging import copy from typing import Dict, List, Optional, Tuple from fairseq import utils, checkpoint_utils from fairseq.models import (FairseqEncoderDecoderModel, FairseqEncoder, register_model, register_model_architecture) from fairseq.models.transformer import Embedding, TransformerDecoder from fairseq.models.wav2vec import Wav2VecEncoder from fairseq.modules.layer_norm import LayerNorm from fairseq.data.data_utils import lengths_to_padding_mask from fairseq.utils import safe_hasattr from torch import Tensor import torch.nn as nn def add_decoder_args(parser): parser.add_argument("--activation-fn", type=str, default='relu', choices=utils.get_available_activation_fns(), help="activation function to use") parser.add_argument("--decoder-dropout", type=float, metavar="D", help="dropout probability") parser.add_argument("--decoder-attention-dropout", type=float, metavar="D", help="dropout probability for attention weights") parser.add_argument("--decoder-activation-dropout", type=float, metavar="D", help="dropout probability after activation in FFN.") parser.add_argument("--decoder-embed-dim", type=int, metavar="N", help="decoder embedding dimension") parser.add_argument("--decoder-ffn-embed-dim", type=int, metavar="N", help="decoder embedding dimension for FFN") parser.add_argument("--decoder-layers", type=int, metavar="N", help="num decoder layers") parser.add_argument("--decoder-attention-heads", type=int, metavar="N", help="num decoder attention heads") parser.add_argument("--decoder-normalize-before", action="store_true", help="apply layernorm before each decoder block") parser.add_argument("--layernorm-embedding", action="store_true", help="add layernorm to embedding") parser.add_argument("--no-scale-embedding", action="store_true", help="if True, dont scale embeddings") parser.add_argument( "--load-pretrained-decoder-from", type=str, metavar="STR", help="model to take decoder weights from (for initialization)" ) parser.add_argument("--finetune-decoder-params", type=str, metavar="STR", help="comma-separated param strings to finetune.") parser.add_argument("--checkpoint-activations", action="store_true")
null
182,563
import logging import copy from typing import Dict, List, Optional, Tuple from fairseq import utils, checkpoint_utils from fairseq.models import (FairseqEncoderDecoderModel, FairseqEncoder, register_model, register_model_architecture) from fairseq.models.transformer import Embedding, TransformerDecoder from fairseq.models.wav2vec import Wav2VecEncoder from fairseq.modules.layer_norm import LayerNorm from fairseq.data.data_utils import lengths_to_padding_mask from fairseq.utils import safe_hasattr from torch import Tensor import torch.nn as nn def set_default_w2v_encoder_args(args): args.no_pretrained_weights = getattr(args, "no_pretrained_weights", False) args.dropout_input = getattr(args, "dropout_input", 0) args.final_dropout = getattr(args, "final_dropout", 0) args.apply_mask = getattr(args, "apply_mask", False) args.dropout = getattr(args, "dropout", 0) args.attention_dropout = getattr(args, "attention_dropout", 0) args.activation_dropout = getattr(args, "activation_dropout", 0) args.mask_length = getattr(args, "mask_length", 10) args.mask_prob = getattr(args, "mask_prob", 0.5) args.mask_selection = getattr(args, "mask_selection", "static") args.mask_other = getattr(args, "mask_other", 0) args.no_mask_overlap = getattr(args, "no_mask_overlap", False) args.mask_channel_length = getattr(args, "mask_channel_length", 10) args.mask_channel_prob = getattr(args, "mask_channel_prob", 0.5) args.mask_channel_before = getattr(args, "mask_channel_before", False) args.mask_channel_selection = getattr(args, "mask_channel_selection", "static") args.mask_channel_other = getattr(args, "mask_channel_other", 0) args.no_mask_channel_overlap = getattr(args, "no_mask_channel_overlap", False) args.freeze_finetune_updates = getattr(args, "freeze_finetune_updates", 0) args.feature_grad_mult = 0.1 args.layerdrop = getattr(args, "layerdrop", 0.0) args.normalize = getattr(args, "normalize", False) def set_default_adaptor_args(args): args.adaptor_n_layers = getattr(args, "adaptor_n_layers", 3) args.adaptor_kernel_size = getattr(args, "adaptor_kernel_size", 3) args.adaptor_stride = getattr(args, "adaptor_stride", 2) args.adaptor_layernorm = getattr(args, "adaptor_layernorm", False) def set_default_mbart_decoder_args(args): args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4 * 1024) args.decoder_layers = getattr(args, 'decoder_layers', 12) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) args.decoder_normalize_before = getattr(args, 'decoder_normalize_before', True) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', True) args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0) args.adaptive_input = getattr(args, "adaptive_input", False) args.decoder_attention_dropout = getattr(args, 'decoder_attention_dropout', 0.) args.decoder_activation_dropout = getattr(args, 'decoder_activation_dropout', 0.) args.decoder_dropout = getattr(args, 'decoder_dropout', 0.1) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.share_decoder_input_output_embed = getattr( args, 'share_decoder_input_output_embed', True ) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) args.no_scale_embedding = getattr(args, 'no_scale_embedding', False) args.quant_noise_pq = getattr(args, "quant_noise_pq", 0) args.layernorm_embedding = getattr(args, 'layernorm_embedding', True) args.activation_fn = getattr(args, 'activation_fn', 'gelu') args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh') args.pooler_dropout = getattr(args, 'pooler_dropout', 0.0) args.checkpoint_activations = getattr(args, "checkpoint_activations", False) arch_name="xm_transformer") def base_architecture(args): set_default_w2v_encoder_args(args) set_default_adaptor_args(args) set_default_mbart_decoder_args(args)
null
182,580
import logging import torch from torch import nn from fairseq.models import (FairseqEncoder, FairseqEncoderModel, register_model, register_model_architecture) from fairseq.modules import ( LayerNorm, PositionalEmbedding, FairseqDropout, MultiheadAttention ) from fairseq import utils from fairseq.data.data_utils import lengths_to_padding_mask def model_init(m): if isinstance(m, nn.Conv1d): nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("relu"))
null
182,581
import logging import torch from torch import nn from fairseq.models import (FairseqEncoder, FairseqEncoderModel, register_model, register_model_architecture) from fairseq.modules import ( LayerNorm, PositionalEmbedding, FairseqDropout, MultiheadAttention ) from fairseq import utils from fairseq.data.data_utils import lengths_to_padding_mask def Embedding(num_embeddings, embedding_dim, padding_idx=None): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) return m
null
182,582
import logging import torch from torch import nn from fairseq.models import (FairseqEncoder, FairseqEncoderModel, register_model, register_model_architecture) from fairseq.modules import ( LayerNorm, PositionalEmbedding, FairseqDropout, MultiheadAttention ) from fairseq import utils from fairseq.data.data_utils import lengths_to_padding_mask def base_architecture(args): args.dropout = getattr(args, "dropout", 0.2) args.output_frame_dim = getattr(args, "output_frame_dim", 80) args.speaker_embed_dim = getattr(args, "speaker_embed_dim", 64) # FFT blocks args.fft_hidden_dim = getattr(args, "fft_hidden_dim", 1024) args.fft_kernel_size = getattr(args, "fft_kernel_size", 9) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.encoder_layers = getattr(args, "encoder_layers", 4) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 2) args.decoder_layers = getattr(args, "decoder_layers", 4) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 2) # variance predictor args.var_pred_n_bins = getattr(args, "var_pred_n_bins", 256) args.var_pred_hidden_dim = getattr(args, "var_pred_hidden_dim", 256) args.var_pred_kernel_size = getattr(args, "var_pred_kernel_size", 3) args.var_pred_dropout = getattr(args, "var_pred_dropout", 0.5)
null
182,583
import logging import json from typing import Dict import numpy as np import torch from torch import nn import torch.nn.functional as F from fairseq.data.audio.audio_utils import ( get_window, get_fourier_basis, get_mel_filters, TTSSpectrogram ) from fairseq.data.audio.speech_to_text_dataset import S2TDataConfig from fairseq.models.text_to_speech.hifigan import Generator as HiFiGANModel class GriffinLimVocoder(nn.Module): def __init__(self, sample_rate, win_size, hop_size, n_fft, n_mels, f_min, f_max, window_fn, spec_bwd_max_iter=32, fp16=False): super().__init__() self.inv_mel_transform = PseudoInverseMelScale( n_stft=n_fft // 2 + 1, n_mels=n_mels, sample_rate=sample_rate, f_min=f_min, f_max=f_max ) self.gl_transform = GriffinLim( n_fft=n_fft, win_length=win_size, hop_length=hop_size, window_fn=window_fn, n_iter=spec_bwd_max_iter ) if fp16: self.half() self.inv_mel_transform.half() self.gl_transform.half() else: self.float() self.inv_mel_transform.float() self.gl_transform.float() def forward(self, x): # x: (B x) T x D -> (B x) 1 x T # NOTE: batched forward produces noisier waveform. recommend running # one utterance at a time self.eval() x = x.exp().transpose(-1, -2) x = self.inv_mel_transform(x) x = self.gl_transform(x) return x def from_data_cfg(cls, args, data_cfg: S2TDataConfig): feat_cfg = data_cfg.config["features"] window_fn = getattr(torch, feat_cfg["window_fn"] + "_window") return cls( sample_rate=feat_cfg["sample_rate"], win_size=int(feat_cfg["win_len_t"] * feat_cfg["sample_rate"]), hop_size=int(feat_cfg["hop_len_t"] * feat_cfg["sample_rate"]), n_fft=feat_cfg["n_fft"], n_mels=feat_cfg["n_mels"], f_min=feat_cfg["f_min"], f_max=feat_cfg["f_max"], window_fn=window_fn, spec_bwd_max_iter=args.spec_bwd_max_iter, fp16=args.fp16 ) class HiFiGANVocoder(nn.Module): def __init__( self, checkpoint_path: str, model_cfg: Dict[str, str], fp16: bool = False ) -> None: super().__init__() self.model = HiFiGANModel(model_cfg) state_dict = torch.load(checkpoint_path) self.model.load_state_dict(state_dict["generator"]) if fp16: self.model.half() logger.info(f"loaded HiFiGAN checkpoint from {checkpoint_path}") def forward(self, x: torch.Tensor) -> torch.Tensor: # (B x) T x D -> (B x) 1 x T model = self.model.eval() if len(x.shape) == 2: return model(x.unsqueeze(0).transpose(1, 2)).detach().squeeze(0) else: return model(x.transpose(-1, -2)).detach() def from_data_cfg(cls, args, data_cfg: S2TDataConfig): vocoder_cfg = data_cfg.vocoder assert vocoder_cfg.get("type", "griffin_lim") == "hifigan" with open(vocoder_cfg["config"]) as f: model_cfg = json.load(f) return cls(vocoder_cfg["checkpoint"], model_cfg, fp16=args.fp16) class S2TDataConfig(object): """Wrapper class for data config YAML""" def __init__(self, yaml_path): try: import yaml except ImportError: print("Please install PyYAML to load YAML files for " "S2T data config") self.config = {} if op.isfile(yaml_path): try: with open(yaml_path) as f: self.config = yaml.load(f, Loader=yaml.FullLoader) except Exception as e: logger.info(f"Failed to load config from {yaml_path}: {e}") else: logger.info(f"Cannot find {yaml_path}") def vocab_filename(self): """fairseq vocabulary file under data root""" return self.config.get("vocab_filename", "dict.txt") def shuffle(self) -> bool: """Shuffle dataset samples before batching""" return self.config.get("shuffle", False) def pre_tokenizer(self) -> Dict: """Pre-tokenizer to apply before subword tokenization. Returning a dictionary with `tokenizer` providing the tokenizer name and the other items providing the tokenizer-specific arguments. Tokenizers are defined in `fairseq.data.encoders.*`""" return self.config.get("pre_tokenizer", {"tokenizer": None}) def bpe_tokenizer(self) -> Dict: """Subword tokenizer to apply after pre-tokenization. Returning a dictionary with `bpe` providing the tokenizer name and the other items providing the tokenizer-specific arguments. Tokenizers are defined in `fairseq.data.encoders.*`""" return self.config.get("bpe_tokenizer", {"bpe": None}) def prepend_tgt_lang_tag(self) -> bool: """Prepend target lang ID token as the target BOS (e.g. for to-many multilingual setting). During inference, this requires `--prefix-size 1` to force BOS to be lang ID token.""" return self.config.get("prepend_tgt_lang_tag", False) def input_feat_per_channel(self): """The dimension of input features (per audio channel)""" return self.config.get("input_feat_per_channel", 80) def input_channels(self): """The number of channels in the input audio""" return self.config.get("input_channels", 1) def sampling_alpha(self): """Hyper-parameter alpha = 1/T for temperature-based resampling. (alpha = 1 for no resampling)""" return self.config.get("sampling_alpha", 1.0) def use_audio_input(self): """Needed by the dataset loader to see if the model requires raw audio as inputs.""" return self.config.get("use_audio_input", False) def audio_root(self): """Audio paths in the manifest TSV can be relative and this provides the root path. Set this to empty string when using absolute paths.""" return self.config.get("audio_root", "") def get_feature_transforms(self, split, is_train): """Split-specific feature transforms. Allowing train set wildcard `_train`, evaluation set wildcard `_eval` and general wildcard `*` for matching.""" from copy import deepcopy cfg = deepcopy(self.config) _cur = cfg.get("transforms", {}) cur = _cur.get(split) cur = _cur.get("_train") if cur is None and is_train else cur cur = _cur.get("_eval") if cur is None and not is_train else cur cur = _cur.get("*") if cur is None else cur cfg["transforms"] = cur return cfg def get_vocoder(args, data_cfg: S2TDataConfig): if args.vocoder == "griffin_lim": return GriffinLimVocoder.from_data_cfg(args, data_cfg) elif args.vocoder == "hifigan": return HiFiGANVocoder.from_data_cfg(args, data_cfg) else: raise ValueError("Unknown vocoder")
null
182,586
import logging from typing import List, Optional import torch from torch import nn from fairseq.models import (FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture) from fairseq.modules import ( TransformerEncoderLayer, TransformerDecoderLayer ) from fairseq.models.text_to_speech.tacotron2 import Prenet, Postnet from fairseq.modules import LayerNorm, PositionalEmbedding, FairseqDropout from fairseq.data.data_utils import lengths_to_padding_mask from fairseq import utils def encoder_init(m): if isinstance(m, nn.Conv1d): nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("relu"))
null
182,587
import logging from typing import List, Optional import torch from torch import nn from fairseq.models import (FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture) from fairseq.modules import ( TransformerEncoderLayer, TransformerDecoderLayer ) from fairseq.models.text_to_speech.tacotron2 import Prenet, Postnet from fairseq.modules import LayerNorm, PositionalEmbedding, FairseqDropout from fairseq.data.data_utils import lengths_to_padding_mask from fairseq import utils def Embedding(num_embeddings, embedding_dim): m = nn.Embedding(num_embeddings, embedding_dim) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) return m
null
182,588
import logging from typing import List, Optional import torch from torch import nn from fairseq.models import (FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture) from fairseq.modules import ( TransformerEncoderLayer, TransformerDecoderLayer ) from fairseq.models.text_to_speech.tacotron2 import Prenet, Postnet from fairseq.modules import LayerNorm, PositionalEmbedding, FairseqDropout from fairseq.data.data_utils import lengths_to_padding_mask from fairseq import utils def decoder_init(m): if isinstance(m, torch.nn.Conv1d): nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("tanh"))
null
182,589
import logging from typing import List, Optional import torch from torch import nn from fairseq.models import (FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture) from fairseq.modules import ( TransformerEncoderLayer, TransformerDecoderLayer ) from fairseq.models.text_to_speech.tacotron2 import Prenet, Postnet from fairseq.modules import LayerNorm, PositionalEmbedding, FairseqDropout from fairseq.data.data_utils import lengths_to_padding_mask from fairseq import utils def base_architecture(args): args.dropout = getattr(args, "dropout", 0.1) args.output_frame_dim = getattr(args, "output_frame_dim", 80) args.speaker_embed_dim = getattr(args, "speaker_embed_dim", 64) # encoder prenet args.encoder_dropout = getattr(args, "encoder_dropout", 0.5) args.encoder_conv_layers = getattr(args, "encoder_conv_layers", 3) args.encoder_conv_kernel_size = getattr(args, "encoder_conv_kernel_size", 5) # encoder transformer layers args.encoder_transformer_layers = getattr(args, "encoder_transformer_layers", 6) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4 * args.encoder_embed_dim) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 4) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") # decoder prenet args.prenet_dropout = getattr(args, "prenet_dropout", 0.5) args.prenet_layers = getattr(args, "prenet_layers", 2) args.prenet_dim = getattr(args, "prenet_dim", 256) # decoder postnet args.postnet_dropout = getattr(args, "postnet_dropout", 0.5) args.postnet_layers = getattr(args, "postnet_layers", 5) args.postnet_conv_dim = getattr(args, "postnet_conv_dim", 512) args.postnet_conv_kernel_size = getattr(args, "postnet_conv_kernel_size", 5) # decoder transformer layers args.decoder_transformer_layers = getattr(args, "decoder_transformer_layers", 6) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 512) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4 * args.decoder_embed_dim) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 4)
null
182,590
import logging import torch from torch import nn from torch.nn import functional as F from fairseq.models import (FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture) from fairseq.modules import LSTMCellWithZoneOut, LocationAttention def encoder_init(m): if isinstance(m, nn.Conv1d): nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("relu"))
null
182,591
import logging import torch from torch import nn from torch.nn import functional as F from fairseq.models import (FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture) from fairseq.modules import LSTMCellWithZoneOut, LocationAttention def decoder_init(m): if isinstance(m, torch.nn.Conv1d): nn.init.xavier_uniform_(m.weight, torch.nn.init.calculate_gain("tanh"))
null
182,592
import logging import torch from torch import nn from torch.nn import functional as F from fairseq.models import (FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, register_model_architecture) from fairseq.modules import LSTMCellWithZoneOut, LocationAttention def base_architecture(args): # encoder args.encoder_dropout = getattr(args, "encoder_dropout", 0.5) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_conv_layers = getattr(args, "encoder_conv_layers", 3) args.encoder_conv_kernel_size = getattr(args, "encoder_conv_kernel_size", 5) args.encoder_lstm_layers = getattr(args, "encoder_lstm_layers", 1) # decoder args.attention_dim = getattr(args, "attention_dim", 128) args.attention_conv_dim = getattr(args, "attention_conv_dim", 32) args.attention_conv_kernel_size = getattr(args, "attention_conv_kernel_size", 15) args.prenet_dropout = getattr(args, "prenet_dropout", 0.5) args.prenet_layers = getattr(args, "prenet_layers", 2) args.prenet_dim = getattr(args, "prenet_dim", 256) args.postnet_dropout = getattr(args, "postnet_dropout", 0.5) args.postnet_layers = getattr(args, "postnet_layers", 5) args.postnet_conv_dim = getattr(args, "postnet_conv_dim", 512) args.postnet_conv_kernel_size = getattr(args, "postnet_conv_kernel_size", 5) args.init_attn_c = getattr(args, "init_attn_c", "zero") args.attention_use_cumprob = getattr(args, "attention_use_cumprob", True) args.zoneout = getattr(args, "zoneout", 0.1) args.decoder_lstm_layers = getattr(args, "decoder_lstm_layers", 2) args.decoder_lstm_dim = getattr(args, "decoder_lstm_dim", 1024) args.output_frame_dim = getattr(args, "output_frame_dim", 80)
null
182,593
import logging from argparse import Namespace from typing import Dict, List, Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.data import Dictionary from fairseq.dataclass.utils import ( convert_namespace_to_omegaconf, gen_parser_from_dataclass, ) from fairseq.models import FairseqDecoder, FairseqEncoder from omegaconf import DictConfig from torch import Tensor def check_type(module, expected_type): if hasattr(module, "unwrapped_module"): assert isinstance(module.unwrapped_module, expected_type), \ f"{type(module.unwrapped_module)} != {expected_type}" else: assert isinstance(module, expected_type), f"{type(module)} != {expected_type}"
null
182,595
from argparse import Namespace import contextlib import copy import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from dataclasses import dataclass, field from omegaconf import MISSING, II, open_dict from typing import Any, Optional from fairseq import checkpoint_utils, tasks, utils from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.tasks import FairseqTask from fairseq.models import ( BaseFairseqModel, FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, ) from fairseq.models.wav2vec.wav2vec2 import MASKING_DISTRIBUTION_CHOICES from fairseq.modules import ( LayerNorm, PositionalEmbedding, TransformerDecoderLayer, ) def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5) nn.init.constant_(m.weight[padding_idx], 0) return m
null
182,596
from argparse import Namespace import contextlib import copy import math import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from dataclasses import dataclass, field from omegaconf import MISSING, II, open_dict from typing import Any, Optional from fairseq import checkpoint_utils, tasks, utils from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.tasks import FairseqTask from fairseq.models import ( BaseFairseqModel, FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, register_model, ) from fairseq.models.wav2vec.wav2vec2 import MASKING_DISTRIBUTION_CHOICES from fairseq.modules import ( LayerNorm, PositionalEmbedding, TransformerDecoderLayer, ) def Linear(in_features, out_features, bias=True): m = nn.Linear(in_features, out_features, bias) nn.init.xavier_uniform_(m.weight) if bias: nn.init.constant_(m.bias, 0.0) return m
null
182,598
import logging import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqEncoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( LayerNorm, SinusoidalPositionalEmbedding, TransformerSentenceEncoder, ) from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import safe_hasattr def base_architecture(args): def xlm_architecture(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.share_encoder_input_output_embed = getattr( args, "share_encoder_input_output_embed", True ) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", True) args.num_segment = getattr(args, "num_segment", 1) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.sent_loss = getattr(args, "sent_loss", False) args.activation_fn = getattr(args, "activation_fn", "gelu") args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.pooler_activation_fn = getattr(args, "pooler_activation_fn", "tanh") args.apply_bert_init = getattr(args, "apply_bert_init", True) base_architecture(args)
null
182,603
import argparse import random import sys from sacrebleu import extract_ngrams def get_phrase(words, index, length): assert index < len(words) - length + 1 phr = " ".join(words[index : index + length]) for i in range(index, index + length): words.pop(index) return phr
null
182,606
import argparse import logging import math import os import sys from typing import Dict, Optional, Any, List, Tuple, Callable logger = logging.getLogger("fairseq_cli.train") import numpy as np import torch from fairseq import ( checkpoint_utils, options, quantization_utils, tasks, utils, ) from fairseq.data import iterators, data_utils from fairseq.data.plasma_utils import PlasmaStore from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.distributed import fsdp_enable_wrap, fsdp_wrap, utils as distributed_utils from fairseq.file_io import PathManager from fairseq.logging import meters, metrics, progress_bar from fairseq.model_parallel.megatron_trainer import MegatronTrainer from fairseq.trainer import Trainer from omegaconf import DictConfig, OmegaConf def _flatten_config(cfg: DictConfig): config = OmegaConf.to_container(cfg) # remove any legacy Namespaces and replace with a single "args" namespace = None for k, v in list(config.items()): if isinstance(v, argparse.Namespace): namespace = v del config[k] if namespace is not None: config["args"] = vars(namespace) return config def validate_and_save( cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr, valid_subsets: List[str], end_of_epoch: bool, ) -> Tuple[List[Optional[float]], bool]: num_updates = trainer.get_num_updates() max_update = cfg.optimization.max_update or math.inf # Stopping conditions (and an additional one based on validation loss later # on) should_stop = False if num_updates >= max_update: should_stop = True logger.info( f"Stopping training due to " f"num_updates: {num_updates} >= max_update: {max_update}" ) training_time_hours = trainer.cumulative_training_time() / (60 * 60) if ( cfg.optimization.stop_time_hours > 0 and training_time_hours > cfg.optimization.stop_time_hours ): should_stop = True logger.info( f"Stopping training due to " f"cumulative_training_time: {training_time_hours} > " f"stop_time_hours: {cfg.optimization.stop_time_hours} hour(s)" ) do_save = ( (end_of_epoch and epoch_itr.epoch % cfg.checkpoint.save_interval == 0) or should_stop or ( cfg.checkpoint.save_interval_updates > 0 and num_updates > 0 and num_updates % cfg.checkpoint.save_interval_updates == 0 and num_updates >= cfg.dataset.validate_after_updates ) ) do_validate = ( (not end_of_epoch and do_save) # validate during mid-epoch saves or (end_of_epoch and epoch_itr.epoch % cfg.dataset.validate_interval == 0) or should_stop or ( cfg.dataset.validate_interval_updates > 0 and num_updates > 0 and num_updates % cfg.dataset.validate_interval_updates == 0 ) ) and not cfg.dataset.disable_validation and num_updates >= cfg.dataset.validate_after_updates # Validate valid_losses = [None] if do_validate: valid_losses = validate(cfg, trainer, task, epoch_itr, valid_subsets) should_stop |= should_stop_early(cfg, valid_losses[0]) # Save checkpoint if do_save or should_stop: checkpoint_utils.save_checkpoint( cfg.checkpoint, trainer, epoch_itr, valid_losses[0] ) return valid_losses, should_stop def get_training_stats(stats: Dict[str, Any]) -> Dict[str, Any]: stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0) return stats def progress_bar( iterator, log_format: Optional[str] = None, log_interval: int = 100, log_file: Optional[str] = None, epoch: Optional[int] = None, prefix: Optional[str] = None, tensorboard_logdir: Optional[str] = None, default_log_format: str = "tqdm", wandb_project: Optional[str] = None, wandb_run_name: Optional[str] = None, azureml_logging: Optional[bool] = False, ): if log_format is None: log_format = default_log_format if log_file is not None: handler = logging.FileHandler(filename=log_file) logger.addHandler(handler) if log_format == "tqdm" and not sys.stderr.isatty(): log_format = "simple" if log_format == "json": bar = JsonProgressBar(iterator, epoch, prefix, log_interval) elif log_format == "none": bar = NoopProgressBar(iterator, epoch, prefix) elif log_format == "simple": bar = SimpleProgressBar(iterator, epoch, prefix, log_interval) elif log_format == "tqdm": bar = TqdmProgressBar(iterator, epoch, prefix) else: raise ValueError("Unknown log format: {}".format(log_format)) if tensorboard_logdir: try: # [FB only] custom wrapper for TensorBoard import palaas # noqa from .fb_tbmf_wrapper import FbTbmfWrapper bar = FbTbmfWrapper(bar, log_interval) except ImportError: bar = TensorboardProgressBarWrapper(bar, tensorboard_logdir) if wandb_project: bar = WandBProgressBarWrapper(bar, wandb_project, run_name=wandb_run_name) if azureml_logging: bar = AzureMLProgressBarWrapper(bar) return bar class Trainer(object): """Main class for data parallel training. This class supports synchronous distributed data parallel training, where multiple workers each have a full model replica and gradients are accumulated across workers before each update. We use :class:`~torch.nn.parallel.DistributedDataParallel` to handle communication of the gradients across workers. """ def __init__(self, cfg: FairseqConfig, task, model, criterion, quantizer=None): if isinstance(cfg, Namespace): logger.warning( "argparse.Namespace configuration is deprecated! Automatically converting to OmegaConf" ) cfg = convert_namespace_to_omegaconf(cfg) self.cfg = cfg self.task = task # catalog shared parameters shared_params = _catalog_shared_params(model) self.tpu = cfg.common.tpu self.cuda = torch.cuda.is_available() and not cfg.common.cpu and not self.tpu if self.cuda: self.device = torch.device("cuda") elif self.tpu: self.device = utils.get_tpu_device() else: self.device = torch.device("cpu") if self.is_fsdp: import fairscale if self.cfg.common.bf16: raise ValueError( "FullyShardedDataParallel is not compatible with --bf16 or " "--memory-efficient-bf16" ) if self.cfg.distributed_training.zero_sharding != "none": raise ValueError( "FullyShardedDataParallel is not compatible with --zero-sharding " "option (it's already built in)" ) if max(self.cfg.optimization.update_freq) > 1 and fairscale.__version__ < "0.4.0": raise RuntimeError( "Please update to fairscale 0.4.0 or newer when combining " "--update-freq with FullyShardedDataParallel" ) else: if ( hasattr(self.cfg.distributed_training, "cpu_offload") and self.cfg.distributed_training.cpu_offload ): raise ValueError("--cpu-offload requires --ddp-backend=fully_sharded") # copy model and criterion to current device/dtype self._criterion = criterion self._model = model if not self.is_fsdp: if cfg.common.fp16: assert not cfg.common.amp, "Cannot use fp16 and AMP together" self._criterion = self._criterion.half() self._model = self._model.half() elif cfg.common.bf16: self._criterion = self._criterion.to(dtype=torch.bfloat16) self._model = self._model.to(dtype=torch.bfloat16) elif cfg.common.amp: self._amp_retries = 0 if ( not cfg.distributed_training.pipeline_model_parallel # the DistributedFairseqModel wrapper will handle moving to device, # so only handle cases which don't use the wrapper and not self.use_distributed_wrapper ): self._criterion = self._criterion.to(device=self.device) self._model = self._model.to(device=self.device) self.pipeline_model_parallel = cfg.distributed_training.pipeline_model_parallel self.last_device = None if self.cuda and self.pipeline_model_parallel: self.last_device = torch.device( cfg.distributed_training.pipeline_devices[-1] ) # check that shared parameters are preserved after device transfer for shared_param in shared_params: ref = _get_module_by_path(self._model, shared_param[0]) for path in shared_param[1:]: logger.info( "detected shared parameter: {} <- {}".format(shared_param[0], path) ) _set_module_by_path(self._model, path, ref) self._dummy_batch = None # indicates we don't have a dummy batch at first self._lr_scheduler = None self._num_updates = 0 self._num_xla_compiles = 0 # for TPUs self._optim_history = None self._optimizer = None self._warn_once = set() self._wrapped_criterion = None self._wrapped_model = None self._ema = None # TODO(myleott): support tpu if self.cuda and self.data_parallel_world_size > 1: self._grad_norm_buf = torch.cuda.DoubleTensor(self.data_parallel_world_size) else: self._grad_norm_buf = None self.quantizer = quantizer if self.quantizer is not None: self.quantizer.set_trainer(self) # get detailed cuda environment if self.cuda: self.cuda_env = utils.CudaEnvironment() if self.data_parallel_world_size > 1: self.cuda_env_arr = distributed_utils.all_gather_list( self.cuda_env, group=distributed_utils.get_global_group() ) else: self.cuda_env_arr = [self.cuda_env] if self.data_parallel_rank == 0: utils.CudaEnvironment.pretty_print_cuda_env_list(self.cuda_env_arr) else: self.cuda_env = None self.cuda_env_arr = None metrics.log_start_time("wall", priority=790, round=0) self._start_time = time.time() self._previous_training_time = 0 self._cumulative_training_time = None def reinitialize(self): """Reinitialize the Trainer, typically after model params change.""" self._lr_scheduler = None self._optimizer = None self._wrapped_criterion = None self._wrapped_model = None def data_parallel_world_size(self): if self.cfg.distributed_training.distributed_world_size == 1: return 1 return distributed_utils.get_data_parallel_world_size() def data_parallel_process_group(self): return distributed_utils.get_data_parallel_group() def data_parallel_rank(self): if self.cfg.distributed_training.distributed_world_size == 1: return 0 return distributed_utils.get_data_parallel_rank() def is_data_parallel_master(self): # NOTE: this returns true for all model parallel replicas with data # parallel rank 0 return self.data_parallel_rank == 0 def use_distributed_wrapper(self) -> bool: return ( self.data_parallel_world_size > 1 and not self.cfg.optimization.use_bmuf ) or ( self.is_fsdp and self.cfg.distributed_training.cpu_offload ) def should_save_checkpoint_on_current_rank(self) -> bool: """Indicates whether to save checkpoints on the current DDP rank.""" if ( self.is_fsdp and self.cfg.distributed_training.use_sharded_state ) or getattr(self.cfg.model, "base_layers", 0) > 0: return True else: return self.is_data_parallel_master def always_call_state_dict_during_save_checkpoint(self) -> bool: if self.is_fsdp and not self.cfg.distributed_training.use_sharded_state: # FSDP calls communication collective when consolidating checkpoints return True else: return False def checkpoint_suffix(self) -> str: """Suffix to add to the checkpoint file name.""" if self.is_fsdp and self.cfg.distributed_training.use_sharded_state: return self.cfg.checkpoint.checkpoint_suffix + "-shard{0}".format( self.data_parallel_rank ) else: return self.cfg.checkpoint.checkpoint_suffix or "" def criterion(self): if self._wrapped_criterion is None: if utils.has_parameters(self._criterion) and self.use_distributed_wrapper: self._wrapped_criterion = models.DistributedFairseqModel( self.cfg.distributed_training, self._criterion, process_group=self.data_parallel_process_group, device=self.device, ) else: self._wrapped_criterion = self._criterion return self._wrapped_criterion def model(self): if self._wrapped_model is None: if self.use_distributed_wrapper: self._wrapped_model = models.DistributedFairseqModel( self.cfg.distributed_training, self._model, process_group=self.data_parallel_process_group, device=self.device, ) else: self._wrapped_model = self._model return self._wrapped_model def ema(self): if self._ema is None: self._build_ema() return self._ema def _build_ema(self): if self.cfg.ema.store_ema: self._ema = build_ema(self._model, self.cfg.ema, self.device) logger.info( "Exponential Moving Average Shadow Model is initialized." ) def optimizer(self): if self._optimizer is None: self._build_optimizer() return self._optimizer def lr_scheduler(self): if self._lr_scheduler is None: self._build_optimizer() # this will initialize self._lr_scheduler return self._lr_scheduler def _build_optimizer(self): params = list( filter( lambda p: p.requires_grad, chain(self.model.parameters(), self.criterion.parameters()), ) ) if self.is_fsdp and self.cfg.common.fp16: # FullyShardedDataParallel always uses MemoryEfficientFP16 wrapper, # mostly for the grad scaling. But if we don't have the # --memory-efficient-fp16 flag set, then we're effectively doing # regular --fp16 and can allow the use of optimizers that would # otherwise be unsupported by MemoryEfficientFP16Optimizer. allow_unsupported = not self.cfg.common.memory_efficient_fp16 self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer( self.cfg, params, allow_unsupported=allow_unsupported ) elif self.cfg.common.fp16 or self.cfg.common.bf16 or self.cfg.common.amp: if self.cuda and torch.cuda.get_device_capability(0)[0] < 7: logger.info( "NOTE: your device does NOT support faster training with --fp16 or --amp, " "please switch to FP32 which is likely to be faster" ) if ( self.cfg.common.memory_efficient_fp16 or self.cfg.common.memory_efficient_bf16 ): self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer( self.cfg, params ) elif self.cfg.common.amp: self._optimizer = optim.AMPOptimizer.build_optimizer(self.cfg, params) else: self._optimizer = optim.FP16Optimizer.build_optimizer(self.cfg, params) else: if self.cuda and torch.cuda.get_device_capability(0)[0] >= 7: logger.info("NOTE: your device may support faster training with --fp16 or --amp") self._optimizer = optim.build_optimizer(self.cfg.optimizer, params) if self.is_fsdp: assert ( not self.cfg.optimization.use_bmuf ), "--ddp-backend=fully_sharded is not compatible with BMUF" assert self._optimizer.supports_flat_params, ( "--ddp-backend=fully_sharded is only compatible with pointwise " "optimizers (e.g., Adam, AdamW, Adadelta, Adamax, SGD, etc.). " "However, the sharding will result in slightly different results when " "using non-pointwise optimizers (e.g., Adagrad, Adafactor, LAMB)" ) if self.cfg.optimization.use_bmuf: self._optimizer = optim.FairseqBMUF( self.cfg.bmuf, self._optimizer, ) if self.cfg.distributed_training.zero_sharding == "os": if ( self.cfg.common.fp16 and not self.cfg.common.memory_efficient_fp16 and not self.cfg.common.memory_efficient_bf16 ) and not self.cfg.common.fp16_no_flatten_grads: raise ValueError( "ZeRO is incomptabile with fp16 and flattened grads. " "Please use --fp16-no-flatten-grads" ) else: optim.shard_(self._optimizer, self.data_parallel_process_group) # We should initialize the learning rate scheduler immediately after # building the optimizer, so that the initial learning rate is set. self._lr_scheduler = lr_scheduler.build_lr_scheduler( self.cfg.lr_scheduler, self.optimizer, ) self._lr_scheduler.step_update(0) def is_fsdp(self): return self.cfg.distributed_training.ddp_backend == "fully_sharded" def consolidate_optimizer(self): """For OSS, we need to consolidate the state dict.""" if self.cfg.checkpoint.no_save_optimizer_state: return self._gathered_optim_state = None if hasattr(self.optimizer.optimizer, "consolidate_state_dict"): self.optimizer.optimizer.consolidate_state_dict() elif self.is_fsdp and not self.model.use_sharded_state: st = self.model.gather_full_optim_state_dict( self.optimizer ) # only returns on rank 0 self._gathered_optim_state = st def state_dict(self): state_dict = { "args": None, # legacy "cfg": ( OmegaConf.to_container(self.cfg, resolve=True, enum_to_str=True) if OmegaConf.is_config(self.cfg) else self.cfg ), "model": self.model.state_dict(), "criterion": ( self.criterion.state_dict() if utils.has_parameters(self.criterion) else None ), "optimizer_history": (self._optim_history or []) + [ { "criterion_name": self.get_criterion().__class__.__name__, "optimizer_name": self.optimizer.__class__.__name__, "lr_scheduler_state": self.lr_scheduler.state_dict(), "num_updates": self.get_num_updates(), } ], "task_state": self.task.state_dict() if self.task is not None else {}, "extra_state": { "metrics": metrics.state_dict(), "previous_training_time": self.cumulative_training_time(), }, } if self.cfg.ema.store_ema: # Save EMA model state as extra state state_dict["extra_state"]["ema"] = self.ema.get_model().state_dict() if self.cfg.ema.ema_fp32: # Save EMA params in fp32 state_dict["extra_state"]["ema_fp32_params"] = self.ema.fp32_params if not self.cfg.checkpoint.no_save_optimizer_state: if self._gathered_optim_state is not None: state_dict["last_optimizer_state"] = self._gathered_optim_state self._gathered_optim_state = None else: state_dict["last_optimizer_state"] = self.optimizer.state_dict() if self.is_fsdp: # save meta data for recombining checkpoint upon loading state_dict["fsdp_metadata"] = self.model.local_metadata_dict() return state_dict def save_checkpoint(self, filename, extra_state): """Save all training state in a checkpoint file.""" logger.info(f"Saving checkpoint to {filename}") # call state_dict on all ranks in case it needs internal communication state_dict = utils.move_to_cpu(self.state_dict()) state_dict["extra_state"].update(extra_state) if self.should_save_checkpoint_on_current_rank: checkpoint_utils.torch_persistent_save( state_dict, filename, async_write=self.cfg.checkpoint.write_checkpoints_asynchronously, ) logger.info(f"Finished saving checkpoint to {filename}") def load_checkpoint( self, filename, reset_optimizer=False, reset_lr_scheduler=False, optimizer_overrides=None, reset_meters=False, ): """ Load all training state from a checkpoint file. rank = 0 will load the checkpoint, and then broadcast it to all other ranks. """ extra_state, self._optim_history, last_optim_state = None, [], None logger.info(f"Preparing to load checkpoint {filename}") is_distributed = self.data_parallel_world_size > 1 bexists = PathManager.isfile(filename) if bexists: load_on_all_ranks = ( self.cfg.checkpoint.load_checkpoint_on_all_dp_ranks # TPUs don't support broadcast yet, so load checkpoints # on every worker for now or self.tpu # FSDP requires loading checkpoint shards on all ranks or (self.is_fsdp and self.cfg.distributed_training.use_sharded_state) or getattr(self.cfg.model, "base_layers", 0) > 0 ) if load_on_all_ranks or self.data_parallel_rank == 0: state = checkpoint_utils.load_checkpoint_to_cpu( filename, load_on_all_ranks=load_on_all_ranks ) last_optim_state = state.get("last_optimizer_state", None) # If doing zero_sharding, do not broadcast global optimizer # state. Later we will broadcast sharded states to each rank # to avoid memory from exploding. if ( not load_on_all_ranks and self.cfg.distributed_training.zero_sharding == "os" and "last_optimizer_state" in state and is_distributed ): state["last_optimizer_state"] = "SHARDED" else: last_optim_state = None state = None if is_distributed and not load_on_all_ranks: state = distributed_utils.broadcast_object( state, src_rank=0, group=self.data_parallel_process_group, dist_device=self.device, ) if self.data_parallel_rank > 0: last_optim_state = state.get("last_optimizer_state", None) # load model parameters try: self.model.load_state_dict( state["model"], strict=True, model_cfg=self.cfg.model ) # save memory for later steps del state["model"] if utils.has_parameters(self.get_criterion()): self.get_criterion().load_state_dict( state["criterion"], strict=True ) del state["criterion"] except Exception: raise Exception( "Cannot load model parameters from checkpoint {}; " "please ensure that the architectures match.".format(filename) ) extra_state = state["extra_state"] self._optim_history = state["optimizer_history"] if last_optim_state is not None and not reset_optimizer: # rebuild optimizer after loading model, since params may have changed self._build_optimizer() # only reload optimizer and lr_scheduler if they match last_optim = self._optim_history[-1] assert ( last_optim["criterion_name"] == self.get_criterion().__class__.__name__ ), f"Criterion does not match; please reset the optimizer (--reset-optimizer). {last_optim['criterion_name']} vs {self.get_criterion().__class__.__name__}" assert ( last_optim["optimizer_name"] == self.optimizer.__class__.__name__ ), f"Optimizer does not match; please reset the optimizer (--reset-optimizer). {last_optim['optimizer_name']} vs {self.optimizer.__class__.__name__}" if not reset_lr_scheduler: self.lr_scheduler.load_state_dict(last_optim["lr_scheduler_state"]) if self.is_fsdp and not self.model.use_sharded_state: # if use_sharded_state, the last_optim_state is already sharded, skip this last_optim_state = self.model.get_shard_from_optim_state_dict( last_optim_state ) elif not load_on_all_ranks and is_distributed: last_optim_state = self.optimizer.broadcast_global_state_dict( last_optim_state ) self.optimizer.load_state_dict(last_optim_state, optimizer_overrides) self.set_num_updates(last_optim["num_updates"]) if extra_state is not None: itr_state = extra_state["train_iterator"] if type(itr_state) == list: # assert len(itr_state) == self.data_parallel_world_size itr_state = itr_state[self.data_parallel_rank] extra_state["train_iterator"] = itr_state epoch = itr_state.get("epoch", 1) if "previous_training_time" in extra_state: self._previous_training_time = extra_state["previous_training_time"] self._start_time = time.time() self.lr_step(epoch) if ( itr_state.get("version", 1) >= 2 and itr_state.get("iterations_in_epoch", 0) == 0 ): # reset meters at start of epoch reset_meters = True if "metrics" in extra_state and not reset_meters: metrics.load_state_dict(extra_state["metrics"]) # reset TimeMeters, since their start times don't make sense anymore for meter in metrics.get_meters("default"): if isinstance(meter, meters.TimeMeter): meter.reset() if self.cfg.ema.store_ema: if "ema" not in extra_state: logger.warn( "EMA not found in checkpoint. But store_ema is True. " "EMA is re-initialized from checkpoint." ) self.ema.restore(state["model"], build_fp32_params=self.cfg.ema.ema_fp32) else: logger.info( "Loading EMA from checkpoint" ) self.ema.restore(extra_state["ema"], build_fp32_params=False) if self.cfg.ema.ema_fp32: if "ema_fp32_params" in extra_state: logger.info( "Loading EMA fp32 params from checkpoint" ) self.ema.build_fp32_params(extra_state["ema_fp32_params"]) else: logger.info( "Building EMA fp32 params from EMA model in checkpoint" ) self.ema.build_fp32_params() logger.info( "Loaded checkpoint {} (epoch {} @ {} updates)".format( filename, epoch, self.get_num_updates() ) ) else: logger.info("No existing checkpoint found {}".format(filename)) return extra_state def get_train_iterator( self, epoch, combine=True, load_dataset=True, data_selector=None, shard_batch_itr=True, disable_iterator_cache=False, ): """Return an EpochBatchIterator over the training set for a given epoch.""" if load_dataset: logger.info("loading train data for epoch {}".format(epoch)) self.task.load_dataset( self.cfg.dataset.train_subset, epoch=epoch, combine=combine, data_selector=data_selector, tpu=self.tpu, ) batch_iterator = self.task.get_batch_iterator( dataset=self.task.dataset(self.cfg.dataset.train_subset), max_tokens=self.cfg.dataset.max_tokens, max_sentences=self.cfg.dataset.batch_size, max_positions=utils.resolve_max_positions( self.task.max_positions(), self.model.max_positions(), self.cfg.dataset.max_tokens, ), ignore_invalid_inputs=True, required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple, seed=self.cfg.common.seed, num_shards=self.data_parallel_world_size if shard_batch_itr else 1, shard_id=self.data_parallel_rank if shard_batch_itr else 0, num_workers=self.cfg.dataset.num_workers, epoch=epoch, data_buffer_size=self.cfg.dataset.data_buffer_size, disable_iterator_cache=disable_iterator_cache, ) self.reset_dummy_batch(batch_iterator.first_batch) return batch_iterator def get_valid_iterator( self, subset, disable_iterator_cache=False, ): """Return an EpochBatchIterator over given validation subset for a given epoch.""" batch_iterator = self.task.get_batch_iterator( dataset=self.task.dataset(subset), max_tokens=self.cfg.dataset.max_tokens_valid, max_sentences=self.cfg.dataset.batch_size_valid, max_positions=utils.resolve_max_positions( self.task.max_positions(), self.model.max_positions(), ), ignore_invalid_inputs=self.cfg.dataset.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple, seed=self.cfg.common.seed, num_shards=self.data_parallel_world_size, shard_id=self.data_parallel_rank, num_workers=self.cfg.dataset.num_workers, # always pass a fixed "epoch" to keep validation data consistent # across training epochs epoch=1, data_buffer_size=self.cfg.dataset.data_buffer_size, disable_iterator_cache=disable_iterator_cache, ) self.reset_dummy_batch(batch_iterator.first_batch) return batch_iterator def begin_epoch(self, epoch): """Called at the beginning of each epoch.""" logger.info("begin training epoch {}".format(epoch)) self.lr_step_begin_epoch(epoch) if self.quantizer is not None: self.quantizer.begin_epoch(epoch) # task specific setup per epoch self.task.begin_epoch(epoch, self.get_model()) if self.tpu: import torch_xla.core.xla_model as xm xm.rendezvous("begin_epoch") # wait for all workers xm.mark_step() def begin_valid_epoch(self, epoch): """Called at the beginning of each validation epoch.""" # task specific setup per validation epoch self.task.begin_valid_epoch(epoch, self.get_model()) def reset_dummy_batch(self, batch): self._dummy_batch = batch def train_step(self, samples, raise_oom=False): """Do forward, backward and parameter update.""" self._set_seed() self.model.train() self.criterion.train() self.zero_grad() metrics.log_start_time("train_wall", priority=800, round=0) # If EMA is enabled through store_ema=True # and task.uses_ema is True, pass the EMA model as a keyword # argument to the task. extra_kwargs = {} if self.cfg.ema.store_ema and getattr(self.task, "uses_ema", False): extra_kwargs["ema_model"] = self.ema.get_model() # forward and backward pass logging_outputs, sample_size, ooms = [], 0, 0 for i, sample in enumerate(samples): # delayed update loop sample, is_dummy_batch = self._prepare_sample(sample) def maybe_no_sync(): """ Whenever *samples* contains more than one mini-batch, we want to accumulate gradients locally and only call all-reduce in the last backwards pass. """ if ( self.data_parallel_world_size > 1 and hasattr(self.model, "no_sync") and i < len(samples) - 1 # The no_sync context manager results in increased memory # usage with FSDP, since full-size gradients will be # accumulated on each GPU. It's typically a better tradeoff # to do the extra communication with FSDP. and not self.is_fsdp ): return self.model.no_sync() else: return contextlib.ExitStack() # dummy contextmanager try: with maybe_no_sync(): # forward and backward loss, sample_size_i, logging_output = self.task.train_step( sample=sample, model=self.model, criterion=self.criterion, optimizer=self.optimizer, update_num=self.get_num_updates(), ignore_grad=is_dummy_batch, **extra_kwargs, ) del loss logging_outputs.append(logging_output) sample_size += sample_size_i # emptying the CUDA cache after the first step can # reduce the chance of OOM if self.cuda and self.get_num_updates() == 0: torch.cuda.empty_cache() except RuntimeError as e: if "out of memory" in str(e): self._log_oom(e) if raise_oom: raise e logger.warning( "attempting to recover from OOM in forward/backward pass" ) ooms += 1 self.zero_grad() if self.cuda: torch.cuda.empty_cache() if self.cfg.distributed_training.distributed_world_size == 1: return None else: raise e if self.tpu and i < len(samples) - 1: # tpu-comment: every XLA operation before marking step is # appended to the IR graph, and processing too many batches # before marking step can lead to OOM errors. # To handle gradient accumulation use case, we explicitly # mark step here for every forward pass without a backward pass self._xla_markstep_and_send_to_cpu() if is_dummy_batch: if torch.is_tensor(sample_size): sample_size.zero_() else: sample_size *= 0.0 if torch.is_tensor(sample_size): sample_size = sample_size.float() else: sample_size = float(sample_size) # gather logging outputs from all replicas if self._sync_stats(): train_time = self._local_cumulative_training_time() logging_outputs, ( sample_size, ooms, total_train_time, ) = self._aggregate_logging_outputs( logging_outputs, sample_size, ooms, train_time, ignore=is_dummy_batch ) self._cumulative_training_time = ( total_train_time / self.data_parallel_world_size ) overflow = False try: with torch.autograd.profiler.record_function("reduce-grads"): # reduce gradients across workers self.optimizer.all_reduce_grads(self.model) if utils.has_parameters(self.criterion): self.optimizer.all_reduce_grads(self.criterion) with torch.autograd.profiler.record_function("multiply-grads"): # multiply gradients by (data_parallel_size / sample_size) since # DDP normalizes by the number of data parallel workers for # improved fp16 precision. # Thus we get (sum_of_gradients / sample_size) at the end. # In case of fp16, this step also undoes loss scaling. # (Debugging note: Some optimizers perform this scaling on the # fly, so inspecting model.parameters() or optimizer.params may # still show the original, unscaled gradients.) numer = ( self.data_parallel_world_size if not self.cfg.optimization.use_bmuf or self._sync_stats() else 1 ) self.optimizer.multiply_grads(numer / (sample_size or 1.0)) # Note: (sample_size or 1.0) handles the case of a zero gradient, in a # way that avoids CPU/device transfers in case sample_size is a GPU or # TPU object. The assumption is that the gradient itself is also 0. with torch.autograd.profiler.record_function("clip-grads"): # clip grads grad_norm = self.clip_grad_norm(self.cfg.optimization.clip_norm) # check that grad norms are consistent across workers # on tpu check tensor is slow if not self.tpu: if ( not self.cfg.optimization.use_bmuf and self.cfg.distributed_training.ddp_backend != "slow_mo" ): self._check_grad_norms(grad_norm) if not torch.isfinite(grad_norm).all(): # in case of AMP, if gradients are Nan/Inf then # optimizer step is still required if self.cfg.common.amp: overflow = True else: # check local gradnorm single GPU case, trigger NanDetector raise FloatingPointError("gradients are Nan/Inf") with torch.autograd.profiler.record_function("optimizer"): # take an optimization step self.task.optimizer_step( self.optimizer, model=self.model, update_num=self.get_num_updates() ) if self.cfg.common.amp and overflow: if self._amp_retries == self.cfg.common.amp_batch_retries: logger.info("AMP: skipping this batch.") self._amp_retries = 0 else: self._amp_retries += 1 return self.train_step(samples, raise_oom) # recursion to feed in same batch except FloatingPointError: # re-run the forward and backward pass with hooks attached to print # out where it fails self.zero_grad() with NanDetector(self.get_model()): for _, sample in enumerate(samples): sample, _ = self._prepare_sample(sample) self.task.train_step( sample, self.model, self.criterion, self.optimizer, self.get_num_updates(), ignore_grad=False, **extra_kwargs, ) raise except OverflowError as e: overflow = True logger.info( f"NOTE: gradient overflow detected, ignoring gradient, {str(e)}" ) grad_norm = torch.tensor(0.0).cuda() self.zero_grad() except RuntimeError as e: if "out of memory" in str(e): self._log_oom(e) logger.error("OOM during optimization, irrecoverable") raise e # Some distributed wrappers (e.g., SlowMo) need access to the optimizer # after the step if hasattr(self.model, "perform_additional_optimizer_actions"): if hasattr(self.optimizer, "fp32_params"): self.model.perform_additional_optimizer_actions( self.optimizer.optimizer, self.optimizer.fp32_params ) else: self.model.perform_additional_optimizer_actions( self.optimizer.optimizer ) logging_output = None if not overflow or self.cfg.distributed_training.ddp_backend == "slow_mo": self.set_num_updates(self.get_num_updates() + 1) if self.cfg.ema.store_ema: # Step EMA forward with new model. self.ema.step( self.get_model(), self.get_num_updates(), ) metrics.log_scalar( "ema_decay", self.ema.get_decay(), priority=10000, round=5, weight=0, ) if self.tpu: import torch_xla.core.xla_model as xm # mark step on TPUs self._xla_markstep_and_send_to_cpu() # only log stats every log_interval steps # this causes wps to be misreported when log_interval > 1 logging_output = {} if self.get_num_updates() % self.cfg.common.log_interval == 0: # log memory usage mem_info = xm.get_memory_info(self.device) gb_free = mem_info["kb_free"] / 1024 / 1024 gb_total = mem_info["kb_total"] / 1024 / 1024 metrics.log_scalar( "gb_free", gb_free, priority=1500, round=1, weight=0 ) metrics.log_scalar( "gb_total", gb_total, priority=1600, round=1, weight=0 ) logging_outputs = self._xla_markstep_and_send_to_cpu( logging_outputs ) logging_output = self._reduce_and_log_stats( logging_outputs, sample_size, grad_norm ) # log whenever there's an XLA compilation, since these # slow down training and may indicate opportunities for # optimization self._check_xla_compilation() else: if self.cuda and self.cuda_env is not None: # log minimum free memory over the iteration gb_used = torch.cuda.max_memory_allocated() / 1024 / 1024 / 1024 torch.cuda.reset_peak_memory_stats() gb_free = self.cuda_env.total_memory_in_GB - gb_used metrics.log_scalar( "gb_free", gb_free, priority=1500, round=1, weight=0 ) # log stats logging_output = self._reduce_and_log_stats( logging_outputs, sample_size, grad_norm ) # clear CUDA cache to reduce memory fragmentation if ( self.cuda and self.cfg.common.empty_cache_freq > 0 and ( (self.get_num_updates() + self.cfg.common.empty_cache_freq - 1) % self.cfg.common.empty_cache_freq ) == 0 ): torch.cuda.empty_cache() if self.cfg.common.fp16 or self.cfg.common.amp: metrics.log_scalar( "loss_scale", ( self.optimizer.scaler.loss_scale if self.cfg.common.fp16 else self.optimizer.scaler.get_scale() ), priority=700, round=4, weight=0, ) metrics.log_stop_time("train_wall") return logging_output def valid_step(self, sample, raise_oom=False): """Do forward pass in evaluation mode.""" if self.tpu: import torch_xla.core.xla_model as xm xm.rendezvous("valid_step") # wait for all workers # If EMA is enabled through store_ema=True # and task.uses_ema is True, pass the EMA model as a keyword # argument to the task. extra_kwargs = {} if self.cfg.ema.store_ema and getattr(self.task, "uses_ema", False): extra_kwargs["ema_model"] = self.ema.get_model() with torch.no_grad(): self.model.eval() self.criterion.eval() sample, is_dummy_batch = self._prepare_sample(sample) try: _loss, sample_size, logging_output = self.task.valid_step( sample, self.model, self.criterion, **extra_kwargs ) except RuntimeError as e: if "out of memory" in str(e): self._log_oom(e) if not raise_oom: logger.warning( "ran out of memory in validation step, retrying batch" ) for p in self.model.parameters(): if p.grad is not None: p.grad = None # free some memory if self.cuda: torch.cuda.empty_cache() return self.valid_step(sample, raise_oom=True) raise e logging_outputs = [logging_output] if is_dummy_batch: if torch.is_tensor(sample_size): sample_size.zero_() else: sample_size *= 0.0 # gather logging outputs from all replicas if self.data_parallel_world_size > 1: logging_outputs, (sample_size,) = self._aggregate_logging_outputs( logging_outputs, sample_size, ignore=is_dummy_batch, ) # log validation stats if self.tpu: logging_outputs = self._xla_markstep_and_send_to_cpu(logging_outputs) # logging_output = self._reduce_and_log_stats(logging_outputs, sample_size) return logging_outputs def zero_grad(self): self.optimizer.zero_grad() def lr_step_begin_epoch(self, epoch): """Adjust the learning rate at the beginning of the epoch.""" self.lr_scheduler.step_begin_epoch(epoch) # prefer updating the LR based on the number of steps return self.lr_step_update() def lr_step(self, epoch, val_loss=None): """Adjust the learning rate at the end of the epoch.""" self.lr_scheduler.step(epoch, val_loss) # prefer updating the LR based on the number of steps return self.lr_step_update() def lr_step_update(self): """Update the learning rate after each update.""" new_lr = self.lr_scheduler.step_update(self.get_num_updates()) if isinstance(new_lr, dict): for k, v in new_lr.items(): metrics.log_scalar(f"lr_{k}", v, weight=0, priority=300) new_lr = new_lr.get("default", next(iter(new_lr.values()))) else: metrics.log_scalar("lr", new_lr, weight=0, priority=300) return new_lr def get_lr(self): """Get the current learning rate.""" return self.optimizer.get_lr() def get_model(self): """Get the (non-wrapped) model instance.""" return self._model def get_criterion(self): """Get the (non-wrapped) criterion instance.""" return self._criterion def get_meter(self, name): """[deprecated] Get a specific meter by name.""" from fairseq import meters if "get_meter" not in self._warn_once: self._warn_once.add("get_meter") utils.deprecation_warning( "Trainer.get_meter is deprecated. Please use fairseq.metrics instead." ) train_meters = metrics.get_meters("train") if train_meters is None: train_meters = {} if name == "train_loss" and "loss" in train_meters: return train_meters["loss"] elif name == "train_nll_loss": # support for legacy train.py, which assumed this meter is # always initialized m = train_meters.get("nll_loss", None) return m or meters.AverageMeter() elif name == "wall": # support for legacy train.py, which assumed this meter is # always initialized m = metrics.get_meter("default", "wall") return m or meters.TimeMeter() elif name == "wps": m = metrics.get_meter("train", "wps") return m or meters.TimeMeter() elif name in {"valid_loss", "valid_nll_loss"}: # support for legacy train.py, which assumed these meters # are always initialized k = name[len("valid_") :] m = metrics.get_meter("valid", k) return m or meters.AverageMeter() elif name == "oom": return meters.AverageMeter() elif name in train_meters: return train_meters[name] return None def get_num_updates(self): """Get the number of parameters updates.""" return self._num_updates def set_num_updates(self, num_updates): """Set the number of parameters updates.""" self._num_updates = num_updates self.lr_step_update() if self.quantizer: self.quantizer.step_update(self._num_updates) metrics.log_scalar("num_updates", self._num_updates, weight=0, priority=200) def clip_grad_norm(self, clip_norm): def agg_norm_fn(total_norm): total_norm = total_norm.cuda().float() ** 2 total_norm = distributed_utils.all_reduce( total_norm, group=self.data_parallel_process_group ) return total_norm ** 0.5 should_agg_norm = ( self.is_fsdp and ( self.data_parallel_process_group is not None or torch.distributed.is_initialized() ) ) return self.optimizer.clip_grad_norm( clip_norm, aggregate_norm_fn=agg_norm_fn if should_agg_norm else None ) def cumulative_training_time(self): if self._cumulative_training_time is None: # single GPU return self._local_cumulative_training_time() else: return self._cumulative_training_time def _local_cumulative_training_time(self): """Aggregate training time in seconds.""" return time.time() - self._start_time + self._previous_training_time def _fp_convert_sample(self, sample): def apply_half(t): if t.dtype is torch.float32: return t.to(dtype=torch.half) return t def apply_bfloat16(t): if t.dtype is torch.float32: return t.to(dtype=torch.bfloat16) return t if self.cfg.common.fp16: sample = utils.apply_to_sample(apply_half, sample) if self.cfg.common.bf16: sample = utils.apply_to_sample(apply_bfloat16, sample) return sample def _prepare_sample(self, sample, is_dummy=False): if sample == "DUMMY": raise Exception( "Trying to use an uninitialized 'dummy' batch. This usually indicates " "that the total number of batches is smaller than the number of " "participating GPUs. Try reducing the batch size or using fewer GPUs." ) if sample is None or len(sample) == 0: assert ( self._dummy_batch is not None and len(self._dummy_batch) > 0 ), "Invalid dummy batch: {}".format(self._dummy_batch) sample, _ = self._prepare_sample(self._dummy_batch, is_dummy=True) return sample, True # Given that PCIe/NVLink bandwidth is significantly smaller than DRAM bandwidth # it makes sense to do the format conversion on the CPU and then transfer # a smaller buffer to the device. This also saves GPU memory capacity. if self.cfg.common.on_cpu_convert_precision: sample = self._fp_convert_sample(sample) if self.cuda: if self.pipeline_model_parallel: if 'target' in sample: sample['target'] = utils.move_to_cuda(sample['target'], device=self.last_device) else: sample = utils.move_to_cuda(sample) elif self.tpu and is_dummy: # the dummy batch may not be on the appropriate device sample = utils.move_to_cuda(sample, device=self.device) if not self.cfg.common.on_cpu_convert_precision: sample = self._fp_convert_sample(sample) if self._dummy_batch == "DUMMY": self._dummy_batch = sample return sample, False def _set_seed(self): # Set seed based on args.seed and the update number so that we get # reproducible results when resuming from checkpoints seed = self.cfg.common.seed + self.get_num_updates() utils.set_torch_seed(seed) def _sync_stats(self): # Return True if it's using multiple GPUs and DDP or multiple GPUs with # BMUF and it's a bmuf sync with warmup iterations completed before. if self.data_parallel_world_size == 1: return False elif self.cfg.optimization.use_bmuf: return ( self.get_num_updates() + 1 ) % self.cfg.bmuf.global_sync_iter == 0 and ( self.get_num_updates() + 1 ) > self.cfg.bmuf.warmup_iterations else: return True def _log_oom(self, exc): msg = "OOM: Ran out of memory with exception: {}".format(exc) logger.warning(msg) if torch.cuda.is_available() and hasattr(torch.cuda, "memory_summary"): for device_idx in range(torch.cuda.device_count()): logger.warning(torch.cuda.memory_summary(device=device_idx)) sys.stderr.flush() def _aggregate_logging_outputs( self, logging_outputs: List[Dict[str, Any]], *extra_stats_to_sum, ignore=False, ): if self.task.__class__.logging_outputs_can_be_summed(self.get_criterion()): return self._fast_stat_sync_sum( logging_outputs, *extra_stats_to_sum, ignore=ignore ) else: return self._all_gather_list_sync( logging_outputs, *extra_stats_to_sum, ignore=ignore ) def _all_gather_list_sync( self, logging_outputs: List[Dict[str, Any]], *extra_stats_to_sum, ignore=False, ): """ Sync logging outputs across workers. all_gather_list_sync is suitable when logging outputs are complex types. """ if self.tpu: raise NotImplementedError if ignore: logging_outputs = [] results = list( zip( *distributed_utils.all_gather_list( [logging_outputs] + list(extra_stats_to_sum), max_size=getattr(self.cfg.common, "all_gather_list_size", 16384), group=self.data_parallel_process_group, ) ) ) logging_outputs, extra_stats_to_sum = results[0], results[1:] logging_outputs = list(chain.from_iterable(logging_outputs)) extra_stats_to_sum = [sum(s) for s in extra_stats_to_sum] return logging_outputs, extra_stats_to_sum def _fast_stat_sync_sum( self, logging_outputs: List[Dict[str, Any]], *extra_stats_to_sum, ignore=False, ): """ Sync logging outputs across workers. fast_stat_sync_sum is faster than all_gather_list_sync, but is only suitable when logging outputs are scalars and can be summed. Note that *logging_outputs* cannot contain any nested dicts/lists. """ data = {} for i, stat in enumerate(extra_stats_to_sum): data["extra_stats_" + str(i)] = stat if len(logging_outputs) > 0: log_keys = list(logging_outputs[0].keys()) for k in log_keys: if not ignore: v = sum(log[k] for log in logging_outputs if k in log) else: v = logging_outputs[0][k] v = torch.zeros_like(v) if torch.is_tensor(v) else 0 data["logging_outputs_" + k] = v else: log_keys = None data = distributed_utils.all_reduce_dict( data, device=self.device, group=self.data_parallel_process_group ) extra_stats_to_sum = [ data["extra_stats_" + str(i)] for i in range(len(extra_stats_to_sum)) ] if log_keys is not None: logging_outputs = [{k: data["logging_outputs_" + k] for k in log_keys}] else: logging_outputs = [] return logging_outputs, extra_stats_to_sum def _check_grad_norms(self, grad_norm): """Check that grad norms are consistent across workers.""" if self._grad_norm_buf is not None: self._grad_norm_buf.zero_() self._grad_norm_buf[self.data_parallel_rank] = grad_norm distributed_utils.all_reduce( self._grad_norm_buf, group=self.data_parallel_process_group ) def is_consistent(tensor): max_abs_diff = torch.max(torch.abs(tensor - tensor[0])) return ( (torch.isfinite(tensor).all() and (max_abs_diff / (tensor[0] + 1e-6) < 1e-6).all()) or (self.cfg.common.amp and not torch.isfinite(tensor).all()) # in case of amp non-finite grads are fine ) if not is_consistent(self._grad_norm_buf): pretty_detail = "\n".join( "rank {:3d} = {:.8f}".format(r, n) for r, n in enumerate(self._grad_norm_buf.tolist()) ) error_detail = "grad_norm across the workers:\n{}\n".format( pretty_detail ) # use FloatingPointError to trigger NanDetector raise FloatingPointError( "Fatal error: gradients are inconsistent between workers. " "Try --ddp-backend=legacy_ddp. " "Or are you mixing up different generation of GPUs in training?" + "\n" + "-" * 80 + "\n{}\n".format(error_detail) + "-" * 80 ) def _reduce_and_log_stats(self, logging_outputs, sample_size, grad_norm=None): if grad_norm is not None and ( not torch.is_tensor(grad_norm) or torch.isfinite(grad_norm) ): metrics.log_speed("ups", 1.0, priority=100, round=2) metrics.log_scalar("gnorm", grad_norm, priority=400, round=3) if self.cfg.optimization.clip_norm > 0: metrics.log_scalar( "clip", torch.where( grad_norm > self.cfg.optimization.clip_norm, grad_norm.new_tensor(100), grad_norm.new_tensor(0), ), priority=500, round=1, ) with metrics.aggregate() as agg: if logging_outputs is not None: self.task.reduce_metrics(logging_outputs, self.get_criterion()) del logging_outputs # extra warning for criterions that don't properly log a loss value if "loss" not in agg: if "loss" not in self._warn_once: self._warn_once.add("loss") logger.warning( "Criterion.reduce_metrics did not log a 'loss' value, " "which may break some functionality" ) metrics.log_scalar("loss", -1) # support legacy interface if self.tpu: logging_output = {} else: logging_output = agg.get_smoothed_values() logging_output["sample_size"] = sample_size for key_to_delete in ["ppl", "wps", "wpb", "bsz"]: if key_to_delete in logging_output: del logging_output[key_to_delete] return logging_output def _check_xla_compilation(self): import torch_xla.debug.metrics as met compile_stats = met.metric_data("CompileTime") if compile_stats is None: return num_xla_compiles = compile_stats[0] if num_xla_compiles > self._num_xla_compiles: logger.warning( "XLA compilation detected on device #{}; too many of these can lead " "to slow training, but we expect a few in the beginning".format( self.cfg.distributed_training.distributed_rank ) ) self._num_xla_compiles = num_xla_compiles def _xla_markstep_and_send_to_cpu(self, data=None): import torch_xla.core.xla_model as xm xm.mark_step() if data is not None: from fairseq.utils import xla_device_to_cpu return xla_device_to_cpu(data) The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train( cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr ) -> Tuple[List[Optional[float]], bool]` to solve the following problem: Train the model for one epoch and return validation losses. Here is the function: def train( cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr ) -> Tuple[List[Optional[float]], bool]: """Train the model for one epoch and return validation losses.""" # Initialize data iterator itr = epoch_itr.next_epoch_itr( fix_batches_to_gpus=cfg.distributed_training.fix_batches_to_gpus, shuffle=(epoch_itr.next_epoch_idx > cfg.dataset.curriculum), ) update_freq = ( cfg.optimization.update_freq[epoch_itr.epoch - 1] if epoch_itr.epoch <= len(cfg.optimization.update_freq) else cfg.optimization.update_freq[-1] ) itr = iterators.GroupedIterator(itr, update_freq) if cfg.common.tpu: itr = utils.tpu_data_loader(itr) progress = progress_bar.progress_bar( itr, log_format=cfg.common.log_format, log_file=cfg.common.log_file, log_interval=cfg.common.log_interval, epoch=epoch_itr.epoch, tensorboard_logdir=( cfg.common.tensorboard_logdir if distributed_utils.is_master(cfg.distributed_training) else None ), default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"), wandb_project=( cfg.common.wandb_project if distributed_utils.is_master(cfg.distributed_training) else None ), wandb_run_name=os.environ.get( "WANDB_NAME", os.path.basename(cfg.checkpoint.save_dir) ), azureml_logging=( cfg.common.azureml_logging if distributed_utils.is_master(cfg.distributed_training) else False ), ) progress.update_config(_flatten_config(cfg)) trainer.begin_epoch(epoch_itr.epoch) valid_subsets = cfg.dataset.valid_subset.split(",") should_stop = False num_updates = trainer.get_num_updates() logger.info("Start iterating over samples") for i, samples in enumerate(progress): with metrics.aggregate("train_inner"), torch.autograd.profiler.record_function( "train_step-%d" % i ): log_output = trainer.train_step(samples) if log_output is not None: # not OOM, overflow, ... # log mid-epoch stats num_updates = trainer.get_num_updates() if num_updates % cfg.common.log_interval == 0: stats = get_training_stats(metrics.get_smoothed_values("train_inner")) progress.log(stats, tag="train_inner", step=num_updates) # reset mid-epoch stats after each log interval # the end-of-epoch stats will still be preserved metrics.reset_meters("train_inner") end_of_epoch = not itr.has_next() valid_losses, should_stop = validate_and_save( cfg, trainer, task, epoch_itr, valid_subsets, end_of_epoch ) if should_stop: break # log end-of-epoch stats logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch)) stats = get_training_stats(metrics.get_smoothed_values("train")) progress.print(stats, tag="train", step=num_updates) # reset epoch-level meters metrics.reset_meters("train") return valid_losses, should_stop
Train the model for one epoch and return validation losses.
182,607
import argparse import logging import math import os import sys from typing import Dict, Optional, Any, List, Tuple, Callable logger = logging.getLogger("fairseq_cli.train") import numpy as np import torch from fairseq import ( checkpoint_utils, options, quantization_utils, tasks, utils, ) from fairseq.data import iterators, data_utils from fairseq.data.plasma_utils import PlasmaStore from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.distributed import fsdp_enable_wrap, fsdp_wrap, utils as distributed_utils from fairseq.file_io import PathManager from fairseq.logging import meters, metrics, progress_bar from fairseq.model_parallel.megatron_trainer import MegatronTrainer from fairseq.trainer import Trainer from omegaconf import DictConfig, OmegaConf def main(cfg: FairseqConfig) -> None: if isinstance(cfg, argparse.Namespace): cfg = convert_namespace_to_omegaconf(cfg) utils.import_user_module(cfg.common) if distributed_utils.is_master(cfg.distributed_training) and "job_logging_cfg" in cfg: # make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126) logging.config.dictConfig(OmegaConf.to_container(cfg.job_logging_cfg)) assert ( cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None ), "Must specify batch size either with --max-tokens or --batch-size" metrics.reset() if cfg.common.log_file is not None: handler = logging.FileHandler(filename=cfg.common.log_file) logger.addHandler(handler) np.random.seed(cfg.common.seed) utils.set_torch_seed(cfg.common.seed) if distributed_utils.is_master(cfg.distributed_training): checkpoint_utils.verify_checkpoint_directory(cfg.checkpoint.save_dir) # Print args logger.info(cfg) if cfg.checkpoint.write_checkpoints_asynchronously: try: import iopath # noqa: F401 except ImportError: logging.exception( "Asynchronous checkpoint writing is specified but iopath is " "not installed: `pip install iopath`" ) return # Setup task, e.g., translation, language modeling, etc. task = tasks.setup_task(cfg.task) assert cfg.criterion, "Please specify criterion to train a model" # Build model and criterion if cfg.distributed_training.ddp_backend == "fully_sharded": with fsdp_enable_wrap(cfg.distributed_training): model = fsdp_wrap(task.build_model(cfg.model)) else: model = task.build_model(cfg.model) criterion = task.build_criterion(cfg.criterion) logger.info(model) logger.info("task: {}".format(task.__class__.__name__)) logger.info("model: {}".format(model.__class__.__name__)) logger.info("criterion: {}".format(criterion.__class__.__name__)) logger.info( "num. shared model params: {:,} (num. trained: {:,})".format( sum(p.numel() for p in model.parameters() if not getattr(p, "expert", False)), sum(p.numel() for p in model.parameters() if not getattr(p, "expert", False) and p.requires_grad) ) ) logger.info( "num. expert model params: {} (num. trained: {})".format( sum(p.numel() for p in model.parameters() if getattr(p, "expert", False)), sum(p.numel() for p in model.parameters() if getattr(p, "expert", False) and p.requires_grad), ) ) # Load valid dataset (we load training data below, based on the latest checkpoint) # We load the valid dataset AFTER building the model data_utils.raise_if_valid_subsets_unintentionally_ignored(cfg) if cfg.dataset.combine_valid_subsets: task.load_dataset("valid", combine=True, epoch=1) else: for valid_sub_split in cfg.dataset.valid_subset.split(","): task.load_dataset(valid_sub_split, combine=False, epoch=1) # (optionally) Configure quantization if cfg.common.quantization_config_path is not None: quantizer = quantization_utils.Quantizer( config_path=cfg.common.quantization_config_path, max_epoch=cfg.optimization.max_epoch, max_update=cfg.optimization.max_update, ) else: quantizer = None # Build trainer if cfg.common.model_parallel_size == 1: trainer = Trainer(cfg, task, model, criterion, quantizer) else: trainer = MegatronTrainer(cfg, task, model, criterion) logger.info( "training on {} devices (GPUs/TPUs)".format( cfg.distributed_training.distributed_world_size ) ) logger.info( "max tokens per device = {} and max sentences per device = {}".format( cfg.dataset.max_tokens, cfg.dataset.batch_size, ) ) # Load the latest checkpoint if one is available and restore the # corresponding train iterator extra_state, epoch_itr = checkpoint_utils.load_checkpoint( cfg.checkpoint, trainer, # don't cache epoch iterators for sharded datasets disable_iterator_cache=task.has_sharded_data("train"), ) if cfg.common.tpu: import torch_xla.core.xla_model as xm xm.rendezvous("load_checkpoint") # wait for all workers max_epoch = cfg.optimization.max_epoch or math.inf lr = trainer.get_lr() train_meter = meters.StopwatchMeter() train_meter.start() while epoch_itr.next_epoch_idx <= max_epoch: if lr <= cfg.optimization.stop_min_lr: logger.info( f"stopping training because current learning rate ({lr}) is smaller " "than or equal to minimum learning rate " f"(--stop-min-lr={cfg.optimization.stop_min_lr})" ) break # train for one epoch valid_losses, should_stop = train(cfg, trainer, task, epoch_itr) if should_stop: break # only use first validation loss to update the learning rate lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0]) epoch_itr = trainer.get_train_iterator( epoch_itr.next_epoch_idx, # sharded data: get train iterator for next epoch load_dataset=task.has_sharded_data("train"), # don't cache epoch iterators for sharded datasets disable_iterator_cache=task.has_sharded_data("train"), ) train_meter.stop() logger.info("done training in {:.1f} seconds".format(train_meter.sum)) if getattr(epoch_itr, "should_close_after_finished", False): epoch_itr.close() # ioPath implementation to wait for all asynchronous file writes to complete. if cfg.checkpoint.write_checkpoints_asynchronously: logger.info( "ioPath PathManager waiting for all asynchronous checkpoint " "writes to finish." ) PathManager.async_close() logger.info("ioPath PathManager finished waiting.") class PlasmaStore: def __init__(self, path=DEFAULT_PLASMA_PATH, nbytes: int = GB100): self.server = self.start(path, nbytes) def __del__(self): self.server.kill() def start(path=DEFAULT_PLASMA_PATH, nbytes: int = GB100) -> subprocess.Popen: if not PYARROW_AVAILABLE: raise ImportError("please run pip install pyarrow to use --use_plasma_view") # best practice is to allocate more space than we need. The limitation seems to be the size of /dev/shm _server = subprocess.Popen(["plasma_store", "-m", str(nbytes), "-s", path]) plasma.connect(path, num_retries=200) # If we can't connect we fail immediately return _server def convert_namespace_to_omegaconf(args: Namespace) -> DictConfig: """Convert a flat argparse.Namespace to a structured DictConfig.""" # Here we are using field values provided in args to override counterparts inside config object overrides, deletes = override_module_args(args) # configs will be in fairseq/config after installation config_path = os.path.join("..", "config") GlobalHydra.instance().clear() with initialize(config_path=config_path): try: composed_cfg = compose("config", overrides=overrides, strict=False) except: logger.error("Error when composing. Overrides: " + str(overrides)) raise for k in deletes: composed_cfg[k] = None cfg = OmegaConf.create( OmegaConf.to_container(composed_cfg, resolve=True, enum_to_str=True) ) # hack to be able to set Namespace in dict config. this should be removed when we update to newer # omegaconf version that supports object flags, or when we migrate all existing models from omegaconf import _utils with omegaconf_no_object_check(): if cfg.task is None and getattr(args, "task", None): cfg.task = Namespace(**vars(args)) from fairseq.tasks import TASK_REGISTRY _set_legacy_defaults(cfg.task, TASK_REGISTRY[args.task]) cfg.task._name = args.task if cfg.model is None and getattr(args, "arch", None): cfg.model = Namespace(**vars(args)) from fairseq.models import ARCH_MODEL_REGISTRY _set_legacy_defaults(cfg.model, ARCH_MODEL_REGISTRY[args.arch]) cfg.model._name = args.arch if cfg.optimizer is None and getattr(args, "optimizer", None): cfg.optimizer = Namespace(**vars(args)) from fairseq.optim import OPTIMIZER_REGISTRY _set_legacy_defaults(cfg.optimizer, OPTIMIZER_REGISTRY[args.optimizer]) cfg.optimizer._name = args.optimizer if cfg.lr_scheduler is None and getattr(args, "lr_scheduler", None): cfg.lr_scheduler = Namespace(**vars(args)) from fairseq.optim.lr_scheduler import LR_SCHEDULER_REGISTRY _set_legacy_defaults( cfg.lr_scheduler, LR_SCHEDULER_REGISTRY[args.lr_scheduler] ) cfg.lr_scheduler._name = args.lr_scheduler if cfg.criterion is None and getattr(args, "criterion", None): cfg.criterion = Namespace(**vars(args)) from fairseq.criterions import CRITERION_REGISTRY _set_legacy_defaults(cfg.criterion, CRITERION_REGISTRY[args.criterion]) cfg.criterion._name = args.criterion OmegaConf.set_struct(cfg, True) return cfg def cli_main( modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None ) -> None: parser = options.get_training_parser() args = options.parse_args_and_arch(parser, modify_parser=modify_parser) cfg = convert_namespace_to_omegaconf(args) if cfg.common.use_plasma_view: server = PlasmaStore(path=cfg.common.plasma_path) logger.info(f"Started plasma server pid {server.server.pid} {cfg.common.plasma_path}") if args.profile: with torch.cuda.profiler.profile(): with torch.autograd.profiler.emit_nvtx(): distributed_utils.call_main(cfg, main) else: distributed_utils.call_main(cfg, main) # if cfg.common.use_plasma_view: # server.server.kill()
null
182,609
import ast import fileinput import logging import math import os import sys import time from argparse import Namespace from collections import namedtuple import numpy as np import torch from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.token_generation_constraints import pack_constraints, unpack_constraints from fairseq_cli.generate import get_symbols_to_strip_from_output Batch = namedtuple("Batch", "ids src_tokens src_lengths constraints") def pack_constraints(batch_constraints: List[List[torch.Tensor]]) -> torch.Tensor: """Takes a list of list of constraints in tensor form (a list of tensor constraints for each sentence) and transforms it into a packed Tensor. For example, here is a batch of size 3 with 3, 0, and 1 constraints: [ [ [3 1 2], [3], [4 5 6 7], ] [], [ [1 8 9 10 1 4 11 12], ] ] Its corresponding packed structure is: [ [ 3 3 1 2 0 3 0 4 5 6 7 0], [ 0 0 0 0 0 0 0 0 0 0 0 0], [ 1 1 8 9 10 1 4 11 12 0 0 0] ] The packed tensor has shape (batch size, maxlen), where maxlen is defined below. Each row contains concatenated constraint tokens for that sentence, with 0 appended after each constraint. The first item in each row is the number of constraints for that sentence. So maxlen is the maximum of (number of constraints) + (sum length of constraints) + 1. across all sentences in the batch. """ # The maximum word length of concatenated constraints for any sentence max_constraints_len = 1 for sentence_constraints in batch_constraints: if len(sentence_constraints): # number of constraints, plus sum of constrain lens, plus a zero after each constraints_len = ( 1 + sum([c.size(0) for c in sentence_constraints]) + len(sentence_constraints) ) max_constraints_len = max(max_constraints_len, constraints_len) batch_size = len(batch_constraints) constraints_tensor = torch.zeros((batch_size, max_constraints_len)).long() for i, sentence_constraints in enumerate(batch_constraints): constraints_tensor[i, 0] = len(sentence_constraints) offset = 1 for j, constraint in enumerate(sentence_constraints): this_len = constraint.size(0) constraints_tensor[i, offset : offset + this_len] = constraint offset += this_len + 1 return constraints_tensor.long() def make_batches(lines, cfg, task, max_positions, encode_fn): def encode_fn_target(x): return encode_fn(x) if cfg.generation.constraints: # Strip (tab-delimited) contraints, if present, from input lines, # store them in batch_constraints batch_constraints = [list() for _ in lines] for i, line in enumerate(lines): if "\t" in line: lines[i], *batch_constraints[i] = line.split("\t") # Convert each List[str] to List[Tensor] for i, constraint_list in enumerate(batch_constraints): batch_constraints[i] = [ task.target_dictionary.encode_line( encode_fn_target(constraint), append_eos=False, add_if_not_exist=False, ) for constraint in constraint_list ] if cfg.generation.constraints: constraints_tensor = pack_constraints(batch_constraints) else: constraints_tensor = None tokens, lengths = task.get_interactive_tokens_and_lengths(lines, encode_fn, truncate_length=cfg.generation.truncate_length) itr = task.get_batch_iterator( dataset=task.build_dataset_for_inference( tokens, lengths, constraints=constraints_tensor ), max_tokens=cfg.dataset.max_tokens, max_sentences=cfg.dataset.batch_size, max_positions=max_positions, ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test, ).next_epoch_itr(shuffle=False) for batch in itr: ids = batch["id"] src_tokens = batch["net_input"]["src_tokens"] src_lengths = batch["net_input"]["src_lengths"] constraints = batch.get("constraints", None) yield Batch( ids=ids, src_tokens=src_tokens, src_lengths=src_lengths, constraints=constraints, )
null
182,610
import ast import fileinput import logging import math import os import sys import time from argparse import Namespace from collections import namedtuple import numpy as np import torch from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.token_generation_constraints import pack_constraints, unpack_constraints from fairseq_cli.generate import get_symbols_to_strip_from_output def main(cfg: FairseqConfig): if isinstance(cfg, Namespace): cfg = convert_namespace_to_omegaconf(cfg) start_time = time.time() total_translate_time = 0 utils.import_user_module(cfg.common) if cfg.interactive.buffer_size < 1: cfg.interactive.buffer_size = 1 if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None: cfg.dataset.batch_size = 1 assert ( not cfg.generation.sampling or cfg.generation.nbest == cfg.generation.beam ), "--sampling requires --nbest to be equal to --beam" assert ( not cfg.dataset.batch_size or cfg.dataset.batch_size <= cfg.interactive.buffer_size ), "--batch-size cannot be larger than --buffer-size" logger.info(cfg) # Fix seed for stochastic decoding if cfg.common.seed is not None and not cfg.generation.no_seed_provided: np.random.seed(cfg.common.seed) utils.set_torch_seed(cfg.common.seed) use_cuda = torch.cuda.is_available() and not cfg.common.cpu # Setup task, e.g., translation task = tasks.setup_task(cfg.task) # Load ensemble overrides = ast.literal_eval(cfg.common_eval.model_overrides) logger.info("loading model(s) from {}".format(cfg.common_eval.path)) models, _model_args = checkpoint_utils.load_model_ensemble( utils.split_paths(cfg.common_eval.path), arg_overrides=overrides, task=task, suffix=cfg.checkpoint.checkpoint_suffix, strict=(cfg.checkpoint.checkpoint_shard_count == 1), num_shards=cfg.checkpoint.checkpoint_shard_count, ) # Set dictionaries src_dict = task.source_dictionary tgt_dict = task.target_dictionary # Optimize ensemble for generation for model in models: if model is None: continue if cfg.common.fp16: model.half() if use_cuda and not cfg.distributed_training.pipeline_model_parallel: model.cuda() model.prepare_for_inference_(cfg) # Initialize generator generator = task.build_generator(models, cfg.generation) # Handle tokenization and BPE tokenizer = task.build_tokenizer(cfg.tokenizer) bpe = task.build_bpe(cfg.bpe) def encode_fn(x): if tokenizer is not None: x = tokenizer.encode(x) if bpe is not None: x = bpe.encode(x) return x def decode_fn(x): if bpe is not None: x = bpe.decode(x) if tokenizer is not None: x = tokenizer.decode(x) return x # Load alignment dictionary for unknown word replacement # (None if no unknown word replacement, empty if no path to align dictionary) align_dict = utils.load_align_dict(cfg.generation.replace_unk) max_positions = utils.resolve_max_positions( task.max_positions(), *[model.max_positions() for model in models] ) if cfg.generation.constraints: logger.warning( "NOTE: Constrained decoding currently assumes a shared subword vocabulary." ) if cfg.interactive.buffer_size > 1: logger.info("Sentence buffer size: %s", cfg.interactive.buffer_size) logger.info("NOTE: hypothesis and token scores are output in base 2") logger.info("Type the input sentence and press return:") start_id = 0 for inputs in buffered_read(cfg.interactive.input, cfg.interactive.buffer_size): results = [] for batch in make_batches(inputs, cfg, task, max_positions, encode_fn): bsz = batch.src_tokens.size(0) src_tokens = batch.src_tokens src_lengths = batch.src_lengths constraints = batch.constraints if use_cuda: src_tokens = src_tokens.cuda() src_lengths = src_lengths.cuda() if constraints is not None: constraints = constraints.cuda() sample = { "net_input": { "src_tokens": src_tokens, "src_lengths": src_lengths, }, } translate_start_time = time.time() translations = task.inference_step( generator, models, sample, constraints=constraints ) translate_time = time.time() - translate_start_time total_translate_time += translate_time list_constraints = [[] for _ in range(bsz)] if cfg.generation.constraints: list_constraints = [unpack_constraints(c) for c in constraints] for i, (id, hypos) in enumerate(zip(batch.ids.tolist(), translations)): src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad()) constraints = list_constraints[i] results.append( ( start_id + id, src_tokens_i, hypos, { "constraints": constraints, "time": translate_time / len(translations), }, ) ) # sort output to match input order for id_, src_tokens, hypos, info in sorted(results, key=lambda x: x[0]): src_str = '' if src_dict is not None: src_str = src_dict.string(src_tokens, cfg.common_eval.post_process) print("S-{}\t{}".format(id_, src_str)) print("W-{}\t{:.3f}\tseconds".format(id_, info["time"])) for constraint in info["constraints"]: print( "C-{}\t{}".format( id_, tgt_dict.string(constraint, cfg.common_eval.post_process) ) ) # Process top predictions for hypo in hypos[: min(len(hypos), cfg.generation.nbest)]: hypo_tokens, hypo_str, alignment = utils.post_process_prediction( hypo_tokens=hypo["tokens"].int().cpu(), src_str=src_str, alignment=hypo["alignment"], align_dict=align_dict, tgt_dict=tgt_dict, remove_bpe=cfg.common_eval.post_process, extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator), ) detok_hypo_str = decode_fn(hypo_str) score = hypo["score"] / math.log(2) # convert to base 2 # original hypothesis (after tokenization and BPE) print("H-{}\t{}\t{}".format(id_, score, hypo_str)) # detokenized hypothesis print("D-{}\t{}\t{}".format(id_, score, detok_hypo_str)) print( "P-{}\t{}".format( id_, " ".join( map( lambda x: "{:.4f}".format(x), # convert from base e to base 2 hypo["positional_scores"].div_(math.log(2)).tolist(), ) ), ) ) if cfg.generation.print_alignment: alignment_str = " ".join( ["{}-{}".format(src, tgt) for src, tgt in alignment] ) print("A-{}\t{}".format(id_, alignment_str)) # update running id_ counter start_id += len(inputs) logger.info( "Total time: {:.3f} seconds; translation time: {:.3f}".format( time.time() - start_time, total_translate_time ) ) def convert_namespace_to_omegaconf(args: Namespace) -> DictConfig: """Convert a flat argparse.Namespace to a structured DictConfig.""" # Here we are using field values provided in args to override counterparts inside config object overrides, deletes = override_module_args(args) # configs will be in fairseq/config after installation config_path = os.path.join("..", "config") GlobalHydra.instance().clear() with initialize(config_path=config_path): try: composed_cfg = compose("config", overrides=overrides, strict=False) except: logger.error("Error when composing. Overrides: " + str(overrides)) raise for k in deletes: composed_cfg[k] = None cfg = OmegaConf.create( OmegaConf.to_container(composed_cfg, resolve=True, enum_to_str=True) ) # hack to be able to set Namespace in dict config. this should be removed when we update to newer # omegaconf version that supports object flags, or when we migrate all existing models from omegaconf import _utils with omegaconf_no_object_check(): if cfg.task is None and getattr(args, "task", None): cfg.task = Namespace(**vars(args)) from fairseq.tasks import TASK_REGISTRY _set_legacy_defaults(cfg.task, TASK_REGISTRY[args.task]) cfg.task._name = args.task if cfg.model is None and getattr(args, "arch", None): cfg.model = Namespace(**vars(args)) from fairseq.models import ARCH_MODEL_REGISTRY _set_legacy_defaults(cfg.model, ARCH_MODEL_REGISTRY[args.arch]) cfg.model._name = args.arch if cfg.optimizer is None and getattr(args, "optimizer", None): cfg.optimizer = Namespace(**vars(args)) from fairseq.optim import OPTIMIZER_REGISTRY _set_legacy_defaults(cfg.optimizer, OPTIMIZER_REGISTRY[args.optimizer]) cfg.optimizer._name = args.optimizer if cfg.lr_scheduler is None and getattr(args, "lr_scheduler", None): cfg.lr_scheduler = Namespace(**vars(args)) from fairseq.optim.lr_scheduler import LR_SCHEDULER_REGISTRY _set_legacy_defaults( cfg.lr_scheduler, LR_SCHEDULER_REGISTRY[args.lr_scheduler] ) cfg.lr_scheduler._name = args.lr_scheduler if cfg.criterion is None and getattr(args, "criterion", None): cfg.criterion = Namespace(**vars(args)) from fairseq.criterions import CRITERION_REGISTRY _set_legacy_defaults(cfg.criterion, CRITERION_REGISTRY[args.criterion]) cfg.criterion._name = args.criterion OmegaConf.set_struct(cfg, True) return cfg def cli_main(): parser = options.get_interactive_generation_parser() args = options.parse_args_and_arch(parser) distributed_utils.call_main(convert_namespace_to_omegaconf(args), main)
null
182,611
import logging import os import sys from argparse import Namespace from itertools import chain import torch from fairseq import checkpoint_utils, distributed_utils, options, utils from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.logging import metrics, progress_bar from fairseq.utils import reset_logging from omegaconf import DictConfig def main(cfg: DictConfig, override_args=None): if isinstance(cfg, Namespace): cfg = convert_namespace_to_omegaconf(cfg) utils.import_user_module(cfg.common) reset_logging() assert ( cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None ), "Must specify batch size either with --max-tokens or --batch-size" use_fp16 = cfg.common.fp16 use_cuda = torch.cuda.is_available() and not cfg.common.cpu if use_cuda: torch.cuda.set_device(cfg.distributed_training.device_id) if cfg.distributed_training.distributed_world_size > 1: data_parallel_world_size = distributed_utils.get_data_parallel_world_size() data_parallel_rank = distributed_utils.get_data_parallel_rank() else: data_parallel_world_size = 1 data_parallel_rank = 0 if override_args is not None: overrides = vars(override_args) overrides.update(eval(getattr(override_args, "model_overrides", "{}"))) else: overrides = None # Load ensemble logger.info("loading model(s) from {}".format(cfg.common_eval.path)) models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task( [cfg.common_eval.path], arg_overrides=overrides, suffix=cfg.checkpoint.checkpoint_suffix, ) model = models[0] # Move models to GPU for model in models: model.eval() if use_fp16: model.half() if use_cuda: model.cuda() # Print args logger.info(saved_cfg) # Build criterion criterion = task.build_criterion(saved_cfg.criterion) criterion.eval() for subset in cfg.dataset.valid_subset.split(","): try: task.load_dataset(subset, combine=False, epoch=1, task_cfg=saved_cfg.task) dataset = task.dataset(subset) except KeyError: raise Exception("Cannot find dataset: " + subset) # Initialize data iterator itr = task.get_batch_iterator( dataset=dataset, max_tokens=cfg.dataset.max_tokens, max_sentences=cfg.dataset.batch_size, max_positions=utils.resolve_max_positions( task.max_positions(), *[m.max_positions() for m in models], ), ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=cfg.dataset.required_batch_size_multiple, seed=cfg.common.seed, num_shards=data_parallel_world_size, shard_id=data_parallel_rank, num_workers=cfg.dataset.num_workers, data_buffer_size=cfg.dataset.data_buffer_size, ).next_epoch_itr(shuffle=False) progress = progress_bar.progress_bar( itr, log_format=cfg.common.log_format, log_interval=cfg.common.log_interval, prefix=f"valid on '{subset}' subset", default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"), ) log_outputs = [] for i, sample in enumerate(progress): sample = utils.move_to_cuda(sample) if use_cuda else sample _loss, _sample_size, log_output = task.valid_step(sample, model, criterion) progress.log(log_output, step=i) log_outputs.append(log_output) if data_parallel_world_size > 1: log_outputs = distributed_utils.all_gather_list( log_outputs, max_size=cfg.common.all_gather_list_size, group=distributed_utils.get_data_parallel_group(), ) log_outputs = list(chain.from_iterable(log_outputs)) with metrics.aggregate() as agg: task.reduce_metrics(log_outputs, criterion) log_output = agg.get_smoothed_values() progress.print(log_output, tag=subset, step=i) def convert_namespace_to_omegaconf(args: Namespace) -> DictConfig: """Convert a flat argparse.Namespace to a structured DictConfig.""" # Here we are using field values provided in args to override counterparts inside config object overrides, deletes = override_module_args(args) # configs will be in fairseq/config after installation config_path = os.path.join("..", "config") GlobalHydra.instance().clear() with initialize(config_path=config_path): try: composed_cfg = compose("config", overrides=overrides, strict=False) except: logger.error("Error when composing. Overrides: " + str(overrides)) raise for k in deletes: composed_cfg[k] = None cfg = OmegaConf.create( OmegaConf.to_container(composed_cfg, resolve=True, enum_to_str=True) ) # hack to be able to set Namespace in dict config. this should be removed when we update to newer # omegaconf version that supports object flags, or when we migrate all existing models from omegaconf import _utils with omegaconf_no_object_check(): if cfg.task is None and getattr(args, "task", None): cfg.task = Namespace(**vars(args)) from fairseq.tasks import TASK_REGISTRY _set_legacy_defaults(cfg.task, TASK_REGISTRY[args.task]) cfg.task._name = args.task if cfg.model is None and getattr(args, "arch", None): cfg.model = Namespace(**vars(args)) from fairseq.models import ARCH_MODEL_REGISTRY _set_legacy_defaults(cfg.model, ARCH_MODEL_REGISTRY[args.arch]) cfg.model._name = args.arch if cfg.optimizer is None and getattr(args, "optimizer", None): cfg.optimizer = Namespace(**vars(args)) from fairseq.optim import OPTIMIZER_REGISTRY _set_legacy_defaults(cfg.optimizer, OPTIMIZER_REGISTRY[args.optimizer]) cfg.optimizer._name = args.optimizer if cfg.lr_scheduler is None and getattr(args, "lr_scheduler", None): cfg.lr_scheduler = Namespace(**vars(args)) from fairseq.optim.lr_scheduler import LR_SCHEDULER_REGISTRY _set_legacy_defaults( cfg.lr_scheduler, LR_SCHEDULER_REGISTRY[args.lr_scheduler] ) cfg.lr_scheduler._name = args.lr_scheduler if cfg.criterion is None and getattr(args, "criterion", None): cfg.criterion = Namespace(**vars(args)) from fairseq.criterions import CRITERION_REGISTRY _set_legacy_defaults(cfg.criterion, CRITERION_REGISTRY[args.criterion]) cfg.criterion._name = args.criterion OmegaConf.set_struct(cfg, True) return cfg def cli_main(): parser = options.get_validation_parser() args = options.parse_args_and_arch(parser) # only override args that are explicitly given on the command line override_parser = options.get_validation_parser() override_args = options.parse_args_and_arch( override_parser, suppress_defaults=True ) distributed_utils.call_main( convert_namespace_to_omegaconf(args), main, override_args=override_args )
null
182,612
import logging import os import shutil import sys from collections import Counter from itertools import zip_longest from multiprocessing import Pool from fairseq import options, tasks, utils from fairseq.binarizer import Binarizer from fairseq.data import indexed_dataset from fairseq.file_chunker_utils import find_offsets def dataset_dest_file(args, output_prefix, lang, extension): base = dataset_dest_prefix(args, output_prefix, lang) return "{}.{}".format(base, extension) class Binarizer: def binarize( filename, dict, consumer, tokenize=tokenize_line, append_eos=True, reverse_order=False, offset=0, end=-1, already_numberized=False, ) -> Dict[str, int]: nseq, ntok = 0, 0 replaced = Counter() def replaced_consumer(word, idx): if idx == dict.unk_index and word != dict.unk_word: replaced.update([word]) with Chunker( PathManager.get_local_path(filename), offset, end ) as line_iterator: for line in line_iterator: if already_numberized: id_strings = line.strip().split() id_list = [int(id_string) for id_string in id_strings] if reverse_order: id_list.reverse() if append_eos: id_list.append(dict.eos()) ids = torch.IntTensor(id_list) else: ids = dict.encode_line( line=line, line_tokenizer=tokenize, add_if_not_exist=False, consumer=replaced_consumer, append_eos=append_eos, reverse_order=reverse_order, ) nseq += 1 ntok += len(ids) consumer(ids) return { "nseq": nseq, "nunk": sum(replaced.values()), "ntok": ntok, "replaced": replaced, } def binarize_alignments( filename, alignment_parser, consumer, offset=0, end=-1 ) -> Dict[str, int]: nseq = 0 with Chunker( PathManager.get_local_path(filename), offset, end ) as line_iterator: for line in line_iterator: ids = alignment_parser(line) nseq += 1 consumer(ids) return {"nseq": nseq} def binarize(args, filename, vocab, output_prefix, lang, offset, end, append_eos=True): ds = indexed_dataset.make_builder( dataset_dest_file(args, output_prefix, lang, "bin"), impl=args.dataset_impl, vocab_size=len(vocab), ) def consumer(tensor): ds.add_item(tensor) res = Binarizer.binarize( filename, vocab, consumer, append_eos=append_eos, offset=offset, end=end ) ds.finalize(dataset_dest_file(args, output_prefix, lang, "idx")) return res
null
182,613
import logging import os import shutil import sys from collections import Counter from itertools import zip_longest from multiprocessing import Pool from fairseq import options, tasks, utils from fairseq.binarizer import Binarizer from fairseq.data import indexed_dataset from fairseq.file_chunker_utils import find_offsets def dataset_dest_file(args, output_prefix, lang, extension): base = dataset_dest_prefix(args, output_prefix, lang) return "{}.{}".format(base, extension) class Binarizer: def binarize( filename, dict, consumer, tokenize=tokenize_line, append_eos=True, reverse_order=False, offset=0, end=-1, already_numberized=False, ) -> Dict[str, int]: nseq, ntok = 0, 0 replaced = Counter() def replaced_consumer(word, idx): if idx == dict.unk_index and word != dict.unk_word: replaced.update([word]) with Chunker( PathManager.get_local_path(filename), offset, end ) as line_iterator: for line in line_iterator: if already_numberized: id_strings = line.strip().split() id_list = [int(id_string) for id_string in id_strings] if reverse_order: id_list.reverse() if append_eos: id_list.append(dict.eos()) ids = torch.IntTensor(id_list) else: ids = dict.encode_line( line=line, line_tokenizer=tokenize, add_if_not_exist=False, consumer=replaced_consumer, append_eos=append_eos, reverse_order=reverse_order, ) nseq += 1 ntok += len(ids) consumer(ids) return { "nseq": nseq, "nunk": sum(replaced.values()), "ntok": ntok, "replaced": replaced, } def binarize_alignments( filename, alignment_parser, consumer, offset=0, end=-1 ) -> Dict[str, int]: nseq = 0 with Chunker( PathManager.get_local_path(filename), offset, end ) as line_iterator: for line in line_iterator: ids = alignment_parser(line) nseq += 1 consumer(ids) return {"nseq": nseq} def binarize_alignments(args, filename, parse_alignment, output_prefix, offset, end): ds = indexed_dataset.make_builder( dataset_dest_file(args, output_prefix, None, "bin"), impl=args.dataset_impl, vocab_size=None, ) def consumer(tensor): ds.add_item(tensor) res = Binarizer.binarize_alignments( filename, parse_alignment, consumer, offset=offset, end=end ) ds.finalize(dataset_dest_file(args, output_prefix, None, "idx")) return res
null
182,614
import logging import os import shutil import sys from collections import Counter from itertools import zip_longest from multiprocessing import Pool from fairseq import options, tasks, utils from fairseq.binarizer import Binarizer from fairseq.data import indexed_dataset from fairseq.file_chunker_utils import find_offsets def main(args): def cli_main(): parser = options.get_preprocessing_parser() args = parser.parse_args() main(args)
null
182,616
import logging import os from fairseq.dataclass.initialize import add_defaults, hydra_init from fairseq_cli.train import main as pre_main from fairseq import distributed_utils, metrics from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.utils import omegaconf_no_object_check from fairseq.utils import reset_logging import hydra from hydra.core.hydra_config import HydraConfig import torch from omegaconf import OmegaConf, open_dict logger = logging.getLogger("fairseq_cli.hydra_train") def hydra_main(cfg: FairseqConfig) -> float: def hydra_init(cfg_name="config") -> None: def cli_main(): try: from hydra._internal.utils import get_args cfg_name = get_args().config_name or "config" except: logger.warning("Failed to get config name from hydra args") cfg_name = "config" hydra_init(cfg_name) hydra_main()
null
182,617
import logging import math import os import sys from argparse import Namespace from typing import Iterable, List, Optional import torch import fairseq from fairseq import checkpoint_utils, distributed_utils, options, tasks, utils from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.logging import progress_bar from fairseq.logging.meters import StopwatchMeter from fairseq.sequence_scorer import SequenceScorer from omegaconf import DictConfig logger = logging.getLogger("fairseq_cli.eval_lm") class WordStat(object): def __init__(self, word, is_bpe): self.word = word self.is_bpe = is_bpe self.log_prob = 0 self.next_word_prob = 0 self.count = 0 self.missing_next_words = 0 def add(self, log_prob, next_word_prob): """increments counters for the sum of log probs of current word and next word (given context ending at current word). Since the next word might be at the end of the example, or it might be not counted because it is not an ending subword unit, also keeps track of how many of those we have seen""" if next_word_prob is not None: self.next_word_prob += next_word_prob else: self.missing_next_words += 1 self.log_prob += log_prob self.count += 1 def __str__(self): return "{}\t{}\t{}\t{}\t{}\t{}".format( self.word, self.count, self.log_prob, self.is_bpe, self.next_word_prob, self.count - self.missing_next_words, ) class StopwatchMeter(Meter): """Computes the sum/avg duration of some event in seconds""" def __init__(self, round: Optional[int] = None): self.round = round self.sum = 0 self.n = 0 self.start_time = None def start(self): self.start_time = time.perf_counter() def stop(self, n=1, prehook=None): if self.start_time is not None: if prehook is not None: prehook() delta = time.perf_counter() - self.start_time self.sum = self.sum + delta self.n = type_as(self.n, n) + n def reset(self): self.sum = 0 # cumulative time during which stopwatch was active self.n = 0 # total n across all start/stop self.start() def state_dict(self): return { "sum": self.sum, "n": self.n, "round": self.round, } def load_state_dict(self, state_dict): self.sum = state_dict["sum"] self.n = state_dict["n"] self.start_time = None self.round = state_dict.get("round", None) def avg(self): return self.sum / self.n if self.n > 0 else self.sum def elapsed_time(self): if self.start_time is None: return 0.0 return time.perf_counter() - self.start_time def smoothed_value(self) -> float: val = self.avg if self.sum > 0 else self.elapsed_time if self.round is not None and val is not None: val = safe_round(val, self.round) return val class SequenceScorer(object): """Scores the target for a given source sentence.""" def __init__( self, tgt_dict, softmax_batch=None, compute_alignment=False, eos=None, symbols_to_strip_from_output=None, ): self.pad = tgt_dict.pad() self.eos = tgt_dict.eos() if eos is None else eos self.softmax_batch = softmax_batch or sys.maxsize assert self.softmax_batch > 0 self.compute_alignment = compute_alignment self.symbols_to_strip_from_output = ( symbols_to_strip_from_output.union({self.eos}) if symbols_to_strip_from_output is not None else {self.eos} ) def generate(self, models, sample, **kwargs): """Score a batch of translations.""" net_input = sample["net_input"] def batch_for_softmax(dec_out, target): # assumes decoder_out[0] is the only thing needed (may not be correct for future models!) first, rest = dec_out[0], dec_out[1:] bsz, tsz, dim = first.shape if bsz * tsz < self.softmax_batch: yield dec_out, target, True else: flat = first.contiguous().view(1, -1, dim) flat_tgt = target.contiguous().view(flat.shape[:-1]) s = 0 while s < flat.size(1): e = s + self.softmax_batch yield (flat[:, s:e],) + rest, flat_tgt[:, s:e], False s = e def gather_target_probs(probs, target): probs = probs.gather( dim=2, index=target.unsqueeze(-1), ) return probs orig_target = sample["target"] # compute scores for each model in the ensemble avg_probs = None avg_attn = None for model in models: model.eval() decoder_out = model(**net_input) attn = decoder_out[1] if len(decoder_out) > 1 else None if type(attn) is dict: attn = attn.get("attn", None) batched = batch_for_softmax(decoder_out, orig_target) probs, idx = None, 0 for bd, tgt, is_single in batched: sample["target"] = tgt curr_prob = model.get_normalized_probs( bd, log_probs=len(models) == 1, sample=sample ).data if is_single: probs = gather_target_probs(curr_prob, orig_target) else: if probs is None: probs = curr_prob.new(orig_target.numel()) step = curr_prob.size(0) * curr_prob.size(1) end = step + idx tgt_probs = gather_target_probs( curr_prob.view(tgt.shape + (curr_prob.size(-1),)), tgt ) probs[idx:end] = tgt_probs.view(-1) idx = end sample["target"] = orig_target probs = probs.view(sample["target"].shape) if avg_probs is None: avg_probs = probs else: avg_probs.add_(probs) if attn is not None: if torch.is_tensor(attn): attn = attn.data else: attn = attn[0] if avg_attn is None: avg_attn = attn else: avg_attn.add_(attn) if len(models) > 1: avg_probs.div_(len(models)) avg_probs.log_() if avg_attn is not None: avg_attn.div_(len(models)) bsz = avg_probs.size(0) hypos = [] start_idxs = sample["start_indices"] if "start_indices" in sample else [0] * bsz for i in range(bsz): # remove padding from ref ref = ( utils.strip_pad(sample["target"][i, start_idxs[i] :], self.pad) if sample["target"] is not None else None ) tgt_len = ref.numel() avg_probs_i = avg_probs[i][start_idxs[i] : start_idxs[i] + tgt_len] score_i = avg_probs_i.sum() / tgt_len if avg_attn is not None: avg_attn_i = avg_attn[i] if self.compute_alignment: alignment = utils.extract_hard_alignment( avg_attn_i, sample["net_input"]["src_tokens"][i], sample["target"][i], self.pad, self.eos, ) else: alignment = None else: avg_attn_i = alignment = None hypos.append( [ { "tokens": ref, "score": score_i, "attention": avg_attn_i, "alignment": alignment, "positional_scores": avg_probs_i, } ] ) return hypos The provided code snippet includes necessary dependencies for implementing the `eval_lm` function. Write a Python function `def eval_lm( models: List[fairseq.models.FairseqModel], source_dictionary: fairseq.data.Dictionary, batch_iterator: Iterable, post_process: Optional[str] = None, output_word_probs: bool = False, output_word_stats: bool = False, target_dictionary: Optional[fairseq.data.Dictionary] = None, softmax_batch: int = 0, remove_bos_token: bool = False, device: Optional[torch.device] = None, )` to solve the following problem: Args: models (List[~fairseq.models.FairseqModel]): list of models to evaluate. Models are essentially `nn.Module` instances, but must be compatible with fairseq's `SequenceScorer`. source_dictionary (~fairseq.data.Dictionary): dictionary for applying any relevant post processing or outputing word probs/stats. batch_iterator (Iterable): yield batches of data post_process (Optional[str]): post-process text by removing BPE, letter segmentation, etc. Valid options can be found in fairseq.data.utils.post_process, although not all options are implemented here. output_word_probs (Optional[bool]): output words and their predicted log probabilities output_word_stats (Optional[bool]): output word statistics such as word count and average probability target_dictionary (Optional[~fairseq.data.Dictionary]): output dictionary (defaults to *source_dictionary*) softmax_batch (Optional[bool]): if BxT is more than this, will batch the softmax over vocab to this amount of tokens, in order to fit into GPU memory remove_bos_token (Optional[bool]): if True, confirm that the first token is the beginning-of-sentence symbol (according to the relevant dictionary) and remove it from the output device (Optional[torch.device]): device to use for evaluation (defaults to device of first model parameter) Here is the function: def eval_lm( models: List[fairseq.models.FairseqModel], source_dictionary: fairseq.data.Dictionary, batch_iterator: Iterable, post_process: Optional[str] = None, output_word_probs: bool = False, output_word_stats: bool = False, target_dictionary: Optional[fairseq.data.Dictionary] = None, softmax_batch: int = 0, remove_bos_token: bool = False, device: Optional[torch.device] = None, ): """ Args: models (List[~fairseq.models.FairseqModel]): list of models to evaluate. Models are essentially `nn.Module` instances, but must be compatible with fairseq's `SequenceScorer`. source_dictionary (~fairseq.data.Dictionary): dictionary for applying any relevant post processing or outputing word probs/stats. batch_iterator (Iterable): yield batches of data post_process (Optional[str]): post-process text by removing BPE, letter segmentation, etc. Valid options can be found in fairseq.data.utils.post_process, although not all options are implemented here. output_word_probs (Optional[bool]): output words and their predicted log probabilities output_word_stats (Optional[bool]): output word statistics such as word count and average probability target_dictionary (Optional[~fairseq.data.Dictionary]): output dictionary (defaults to *source_dictionary*) softmax_batch (Optional[bool]): if BxT is more than this, will batch the softmax over vocab to this amount of tokens, in order to fit into GPU memory remove_bos_token (Optional[bool]): if True, confirm that the first token is the beginning-of-sentence symbol (according to the relevant dictionary) and remove it from the output device (Optional[torch.device]): device to use for evaluation (defaults to device of first model parameter) """ if target_dictionary is None: target_dictionary = source_dictionary if device is None: device = next(models[0].parameters()).device gen_timer = StopwatchMeter() scorer = SequenceScorer(target_dictionary, softmax_batch) score_sum = 0.0 count = 0 if post_process is not None: if post_process in {"subword_nmt", "@@ "}: bpe_cont = post_process.rstrip() bpe_toks = { i for i in range(len(source_dictionary)) if source_dictionary[i].endswith(bpe_cont) } else: raise NotImplementedError( "--post-process={post_process} is not implemented" ) bpe_len = len(bpe_cont) else: bpe_toks = None bpe_len = 0 word_stats = dict() for sample in batch_iterator: if "net_input" not in sample: continue sample = utils.move_to_cuda(sample, device=device) gen_timer.start() hypos = scorer.generate(models, sample) gen_timer.stop(sample["ntokens"]) for i, hypos_i in enumerate(hypos): hypo = hypos_i[0] sample_id = sample["id"][i] tokens = hypo["tokens"] tgt_len = tokens.numel() pos_scores = hypo["positional_scores"].float() if remove_bos_token: assert hypo["tokens"][0].item() == target_dictionary.bos() tokens = tokens[1:] pos_scores = pos_scores[1:] skipped_toks = 0 if bpe_toks is not None: for i in range(tgt_len - 1): if tokens[i].item() in bpe_toks: skipped_toks += 1 pos_scores[i + 1] += pos_scores[i] pos_scores[i] = 0 inf_scores = pos_scores.eq(float("inf")) | pos_scores.eq(float("-inf")) if inf_scores.any(): logger.info( "skipping tokens with inf scores:", target_dictionary.string(tokens[inf_scores.nonzero()]), ) pos_scores = pos_scores[(~inf_scores).nonzero()] score_sum += pos_scores.sum().cpu() count += pos_scores.numel() - skipped_toks if output_word_probs or output_word_stats: w = "" word_prob = [] is_bpe = False for i in range(len(tokens)): w_ind = tokens[i].item() w += source_dictionary[w_ind] if bpe_toks is not None and w_ind in bpe_toks: w = w[:-bpe_len] is_bpe = True else: word_prob.append((w, pos_scores[i].item())) next_prob = None ind = i + 1 while ind < len(tokens): if pos_scores[ind].item() != 0: next_prob = pos_scores[ind] break ind += 1 word_stats.setdefault(w, WordStat(w, is_bpe)).add( pos_scores[i].item(), next_prob ) is_bpe = False w = "" if output_word_probs: logger.info( str(int(sample_id)) + " " + ( "\t".join( "{} [{:2f}]".format(x[0], x[1]) for x in word_prob ) ) ) avg_nll_loss = ( -score_sum / count / math.log(2) if count > 0 else 0 ) # convert to base 2 logger.info( "Evaluated {:,} tokens in {:.1f}s ({:.2f} tokens/s)".format( gen_timer.n, gen_timer.sum, 1.0 / gen_timer.avg if gen_timer.avg > 0 else 0 ) ) if output_word_stats: for ws in sorted(word_stats.values(), key=lambda x: x.count, reverse=True): logger.info(ws) return { "loss": avg_nll_loss, "perplexity": 2 ** avg_nll_loss, }
Args: models (List[~fairseq.models.FairseqModel]): list of models to evaluate. Models are essentially `nn.Module` instances, but must be compatible with fairseq's `SequenceScorer`. source_dictionary (~fairseq.data.Dictionary): dictionary for applying any relevant post processing or outputing word probs/stats. batch_iterator (Iterable): yield batches of data post_process (Optional[str]): post-process text by removing BPE, letter segmentation, etc. Valid options can be found in fairseq.data.utils.post_process, although not all options are implemented here. output_word_probs (Optional[bool]): output words and their predicted log probabilities output_word_stats (Optional[bool]): output word statistics such as word count and average probability target_dictionary (Optional[~fairseq.data.Dictionary]): output dictionary (defaults to *source_dictionary*) softmax_batch (Optional[bool]): if BxT is more than this, will batch the softmax over vocab to this amount of tokens, in order to fit into GPU memory remove_bos_token (Optional[bool]): if True, confirm that the first token is the beginning-of-sentence symbol (according to the relevant dictionary) and remove it from the output device (Optional[torch.device]): device to use for evaluation (defaults to device of first model parameter)
182,619
import ast import logging import math import os import sys from argparse import Namespace from itertools import chain import numpy as np import torch from fairseq import checkpoint_utils, options, scoring, tasks, utils from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.logging import progress_bar from fairseq.logging.meters import StopwatchMeter, TimeMeter from omegaconf import DictConfig def get_symbols_to_strip_from_output(generator): if hasattr(generator, "symbols_to_strip_from_output"): return generator.symbols_to_strip_from_output else: return {generator.eos} def progress_bar( iterator, log_format: Optional[str] = None, log_interval: int = 100, log_file: Optional[str] = None, epoch: Optional[int] = None, prefix: Optional[str] = None, tensorboard_logdir: Optional[str] = None, default_log_format: str = "tqdm", wandb_project: Optional[str] = None, wandb_run_name: Optional[str] = None, azureml_logging: Optional[bool] = False, ): if log_format is None: log_format = default_log_format if log_file is not None: handler = logging.FileHandler(filename=log_file) logger.addHandler(handler) if log_format == "tqdm" and not sys.stderr.isatty(): log_format = "simple" if log_format == "json": bar = JsonProgressBar(iterator, epoch, prefix, log_interval) elif log_format == "none": bar = NoopProgressBar(iterator, epoch, prefix) elif log_format == "simple": bar = SimpleProgressBar(iterator, epoch, prefix, log_interval) elif log_format == "tqdm": bar = TqdmProgressBar(iterator, epoch, prefix) else: raise ValueError("Unknown log format: {}".format(log_format)) if tensorboard_logdir: try: # [FB only] custom wrapper for TensorBoard import palaas # noqa from .fb_tbmf_wrapper import FbTbmfWrapper bar = FbTbmfWrapper(bar, log_interval) except ImportError: bar = TensorboardProgressBarWrapper(bar, tensorboard_logdir) if wandb_project: bar = WandBProgressBarWrapper(bar, wandb_project, run_name=wandb_run_name) if azureml_logging: bar = AzureMLProgressBarWrapper(bar) return bar class TimeMeter(Meter): """Computes the average occurrence of some event per second""" def __init__( self, init: int = 0, n: int = 0, round: Optional[int] = None, ): self.round = round self.reset(init, n) def reset(self, init=0, n=0): self.init = init self.start = time.perf_counter() self.n = n self.i = 0 def update(self, val=1): self.n = type_as(self.n, val) + val self.i += 1 def state_dict(self): return { "init": self.elapsed_time, "n": self.n, "round": self.round, } def load_state_dict(self, state_dict): if "start" in state_dict: # backwards compatibility for old state_dicts self.reset(init=state_dict["init"]) else: self.reset(init=state_dict["init"], n=state_dict["n"]) self.round = state_dict.get("round", None) def avg(self): return self.n / self.elapsed_time def elapsed_time(self): return self.init + (time.perf_counter() - self.start) def smoothed_value(self) -> float: val = self.avg if self.round is not None and val is not None: val = safe_round(val, self.round) return val class StopwatchMeter(Meter): """Computes the sum/avg duration of some event in seconds""" def __init__(self, round: Optional[int] = None): self.round = round self.sum = 0 self.n = 0 self.start_time = None def start(self): self.start_time = time.perf_counter() def stop(self, n=1, prehook=None): if self.start_time is not None: if prehook is not None: prehook() delta = time.perf_counter() - self.start_time self.sum = self.sum + delta self.n = type_as(self.n, n) + n def reset(self): self.sum = 0 # cumulative time during which stopwatch was active self.n = 0 # total n across all start/stop self.start() def state_dict(self): return { "sum": self.sum, "n": self.n, "round": self.round, } def load_state_dict(self, state_dict): self.sum = state_dict["sum"] self.n = state_dict["n"] self.start_time = None self.round = state_dict.get("round", None) def avg(self): return self.sum / self.n if self.n > 0 else self.sum def elapsed_time(self): if self.start_time is None: return 0.0 return time.perf_counter() - self.start_time def smoothed_value(self) -> float: val = self.avg if self.sum > 0 else self.elapsed_time if self.round is not None and val is not None: val = safe_round(val, self.round) return val def _main(cfg: DictConfig, output_file): logging.basicConfig( format="%(asctime)s | %(levelname)s | %(name)s | %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=os.environ.get("LOGLEVEL", "INFO").upper(), stream=output_file, ) logger = logging.getLogger("fairseq_cli.generate") utils.import_user_module(cfg.common) if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None: cfg.dataset.max_tokens = 12000 logger.info(cfg) # Fix seed for stochastic decoding if cfg.common.seed is not None and not cfg.generation.no_seed_provided: np.random.seed(cfg.common.seed) utils.set_torch_seed(cfg.common.seed) use_cuda = torch.cuda.is_available() and not cfg.common.cpu # Load dataset splits task = tasks.setup_task(cfg.task) # Set dictionaries try: src_dict = getattr(task, "source_dictionary", None) except NotImplementedError: src_dict = None tgt_dict = task.target_dictionary overrides = ast.literal_eval(cfg.common_eval.model_overrides) # Load ensemble logger.info("loading model(s) from {}".format(cfg.common_eval.path)) models, saved_cfg = checkpoint_utils.load_model_ensemble( utils.split_paths(cfg.common_eval.path), arg_overrides=overrides, task=task, suffix=cfg.checkpoint.checkpoint_suffix, strict=(cfg.checkpoint.checkpoint_shard_count == 1), num_shards=cfg.checkpoint.checkpoint_shard_count, ) # loading the dataset should happen after the checkpoint has been loaded so we can give it the saved task config task.load_dataset(cfg.dataset.gen_subset, task_cfg=saved_cfg.task) if cfg.generation.lm_path is not None: overrides["data"] = cfg.task.data try: lms, _ = checkpoint_utils.load_model_ensemble( [cfg.generation.lm_path], arg_overrides=overrides, task=None ) except: logger.warning( f"Failed to load language model! Please make sure that the language model dict is the same " f"as target dict and is located in the data dir ({cfg.task.data})" ) raise assert len(lms) == 1 else: lms = [None] # Optimize ensemble for generation for model in chain(models, lms): if model is None: continue if cfg.common.fp16: model.half() if use_cuda and not cfg.distributed_training.pipeline_model_parallel: model.cuda() model.prepare_for_inference_(cfg) # Load alignment dictionary for unknown word replacement # (None if no unknown word replacement, empty if no path to align dictionary) align_dict = utils.load_align_dict(cfg.generation.replace_unk) # Load dataset (possibly sharded) itr = task.get_batch_iterator( dataset=task.dataset(cfg.dataset.gen_subset), max_tokens=cfg.dataset.max_tokens, max_sentences=cfg.dataset.batch_size, max_positions=utils.resolve_max_positions( task.max_positions(), *[m.max_positions() for m in models] ), ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=cfg.dataset.required_batch_size_multiple, seed=cfg.common.seed, num_shards=cfg.distributed_training.distributed_world_size, shard_id=cfg.distributed_training.distributed_rank, num_workers=cfg.dataset.num_workers, data_buffer_size=cfg.dataset.data_buffer_size, ).next_epoch_itr(shuffle=False) progress = progress_bar.progress_bar( itr, log_format=cfg.common.log_format, log_interval=cfg.common.log_interval, default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"), ) # Initialize generator gen_timer = StopwatchMeter() extra_gen_cls_kwargs = {"lm_model": lms[0], "lm_weight": cfg.generation.lm_weight} generator = task.build_generator( models, cfg.generation, extra_gen_cls_kwargs=extra_gen_cls_kwargs ) # Handle tokenization and BPE tokenizer = task.build_tokenizer(cfg.tokenizer) bpe = task.build_bpe(cfg.bpe) def decode_fn(x): if bpe is not None: x = bpe.decode(x) if tokenizer is not None: x = tokenizer.decode(x) return x scorer = scoring.build_scorer(cfg.scoring, tgt_dict) num_sentences = 0 has_target = True wps_meter = TimeMeter() for sample in progress: sample = utils.move_to_cuda(sample) if use_cuda else sample if "net_input" not in sample: continue prefix_tokens = None if cfg.generation.prefix_size > 0: prefix_tokens = sample["target"][:, : cfg.generation.prefix_size] constraints = None if "constraints" in sample: constraints = sample["constraints"] gen_timer.start() hypos = task.inference_step( generator, models, sample, prefix_tokens=prefix_tokens, constraints=constraints, ) num_generated_tokens = sum(len(h[0]["tokens"]) for h in hypos) gen_timer.stop(num_generated_tokens) for i, sample_id in enumerate(sample["id"].tolist()): has_target = sample["target"] is not None # Remove padding if "src_tokens" in sample["net_input"]: src_tokens = utils.strip_pad( sample["net_input"]["src_tokens"][i, :], tgt_dict.pad() ) else: src_tokens = None target_tokens = None if has_target: target_tokens = ( utils.strip_pad(sample["target"][i, :], tgt_dict.pad()).int().cpu() ) # Either retrieve the original sentences or regenerate them from tokens. if align_dict is not None: src_str = task.dataset(cfg.dataset.gen_subset).src.get_original_text( sample_id ) target_str = task.dataset(cfg.dataset.gen_subset).tgt.get_original_text( sample_id ) else: if src_dict is not None: src_str = src_dict.string(src_tokens, cfg.common_eval.post_process) else: src_str = "" if has_target: target_str = tgt_dict.string( target_tokens, cfg.common_eval.post_process, escape_unk=True, extra_symbols_to_ignore=get_symbols_to_strip_from_output( generator ), ) src_str = decode_fn(src_str) if has_target: target_str = decode_fn(target_str) if not cfg.common_eval.quiet: if src_dict is not None: print("S-{}\t{}".format(sample_id, src_str), file=output_file) if has_target: print("T-{}\t{}".format(sample_id, target_str), file=output_file) # Process top predictions for j, hypo in enumerate(hypos[i][: cfg.generation.nbest]): hypo_tokens, hypo_str, alignment = utils.post_process_prediction( hypo_tokens=hypo["tokens"].int().cpu(), src_str=src_str, alignment=hypo["alignment"], align_dict=align_dict, tgt_dict=tgt_dict, remove_bpe=cfg.common_eval.post_process, extra_symbols_to_ignore=get_symbols_to_strip_from_output(generator), ) detok_hypo_str = decode_fn(hypo_str) if not cfg.common_eval.quiet: score = hypo["score"] / math.log(2) # convert to base 2 # original hypothesis (after tokenization and BPE) print( "H-{}\t{}\t{}".format(sample_id, score, hypo_str), file=output_file, ) # detokenized hypothesis print( "D-{}\t{}\t{}".format(sample_id, score, detok_hypo_str), file=output_file, ) print( "P-{}\t{}".format( sample_id, " ".join( map( lambda x: "{:.4f}".format(x), # convert from base e to base 2 hypo["positional_scores"] .div_(math.log(2)) .tolist(), ) ), ), file=output_file, ) if cfg.generation.print_alignment == "hard": print( "A-{}\t{}".format( sample_id, " ".join( [ "{}-{}".format(src_idx, tgt_idx) for src_idx, tgt_idx in alignment ] ), ), file=output_file, ) if cfg.generation.print_alignment == "soft": print( "A-{}\t{}".format( sample_id, " ".join( [ ",".join(src_probs) for src_probs in alignment ] ), ), file=output_file, ) if cfg.generation.print_step: print( "I-{}\t{}".format(sample_id, hypo["steps"]), file=output_file, ) if cfg.generation.retain_iter_history: for step, h in enumerate(hypo["history"]): _, h_str, _ = utils.post_process_prediction( hypo_tokens=h["tokens"].int().cpu(), src_str=src_str, alignment=None, align_dict=None, tgt_dict=tgt_dict, remove_bpe=None, ) print( "E-{}_{}\t{}".format(sample_id, step, h_str), file=output_file, ) # Score only the top hypothesis if has_target and j == 0: if align_dict is not None or cfg.common_eval.post_process is not None: # Convert back to tokens for evaluation with unk replacement and/or without BPE target_tokens = tgt_dict.encode_line( target_str, add_if_not_exist=True ) hypo_tokens = tgt_dict.encode_line( detok_hypo_str, add_if_not_exist=True ) if hasattr(scorer, "add_string"): scorer.add_string(target_str, detok_hypo_str) else: scorer.add(target_tokens, hypo_tokens) wps_meter.update(num_generated_tokens) progress.log({"wps": round(wps_meter.avg)}) num_sentences += ( sample["nsentences"] if "nsentences" in sample else sample["id"].numel() ) logger.info("NOTE: hypothesis and token scores are output in base 2") logger.info( "Translated {:,} sentences ({:,} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)".format( num_sentences, gen_timer.n, gen_timer.sum, num_sentences / gen_timer.sum, 1.0 / gen_timer.avg, ) ) if has_target: if cfg.bpe and not cfg.generation.sacrebleu: if cfg.common_eval.post_process: logger.warning( "BLEU score is being computed by splitting detokenized string on spaces, this is probably not what you want. Use --sacrebleu for standard 13a BLEU tokenization" ) else: logger.warning( "If you are using BPE on the target side, the BLEU score is computed on BPE tokens, not on proper words. Use --sacrebleu for standard 13a BLEU tokenization" ) # use print to be consistent with other main outputs: S-, H-, T-, D- and so on print( "Generate {} with beam={}: {}".format( cfg.dataset.gen_subset, cfg.generation.beam, scorer.result_string() ), file=output_file, ) return scorer
null
182,620
import ast import logging import math import os import sys from argparse import Namespace from itertools import chain import numpy as np import torch from fairseq import checkpoint_utils, options, scoring, tasks, utils from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.logging import progress_bar from fairseq.logging.meters import StopwatchMeter, TimeMeter from omegaconf import DictConfig def main(cfg: DictConfig): if isinstance(cfg, Namespace): cfg = convert_namespace_to_omegaconf(cfg) assert cfg.common_eval.path is not None, "--path required for generation!" assert ( not cfg.generation.sampling or cfg.generation.nbest == cfg.generation.beam ), "--sampling requires --nbest to be equal to --beam" assert ( cfg.generation.replace_unk is None or cfg.dataset.dataset_impl == "raw" ), "--replace-unk requires a raw text dataset (--dataset-impl=raw)" if cfg.common_eval.results_path is not None: os.makedirs(cfg.common_eval.results_path, exist_ok=True) output_path = os.path.join( cfg.common_eval.results_path, "generate-{}.txt".format(cfg.dataset.gen_subset), ) with open(output_path, "w", buffering=1, encoding="utf-8") as h: return _main(cfg, h) else: return _main(cfg, sys.stdout) def cli_main(): parser = options.get_generation_parser() # TODO: replace this workaround with refactoring of `AudioPretraining` parser.add_argument( '--arch', '-a', metavar='ARCH', default="wav2vec2", help='Model architecture. For constructing tasks that rely on ' 'model args (e.g. `AudioPretraining`)' ) args = options.parse_args_and_arch(parser) main(args)
null
182,621
import math import warnings from typing import Dict, Optional, Tuple import torch from torch import Tensor, nn from torch.nn import Parameter import torch.nn.functional as F def gelu_accurate(x): if not hasattr(gelu_accurate, "_a"): gelu_accurate._a = math.sqrt(2 / math.pi) return ( 0.5 * x * (1 + torch.tanh(gelu_accurate._a * (x + 0.044715 * torch.pow(x, 3)))) ) def gelu(x: torch.Tensor) -> torch.Tensor: return torch.nn.functional.gelu(x.float()).type_as(x) The provided code snippet includes necessary dependencies for implementing the `get_activation_fn` function. Write a Python function `def get_activation_fn(activation: str)` to solve the following problem: Returns the activation function corresponding to `activation` Here is the function: def get_activation_fn(activation: str): """Returns the activation function corresponding to `activation`""" if activation == "relu": return F.relu elif activation == "gelu": return gelu elif activation == "gelu_fast": warnings.warn( "--activation-fn=gelu_fast has been renamed to gelu_accurate" ) return gelu_accurate elif activation == "gelu_accurate": return gelu_accurate elif activation == "tanh": return torch.tanh elif activation == "linear": return lambda x: x elif activation == "glu": return lambda x: x else: raise RuntimeError("--activation-fn {} not supported".format(activation))
Returns the activation function corresponding to `activation`
182,622
import math import warnings from typing import Dict, Optional, Tuple import torch from torch import Tensor, nn from torch.nn import Parameter import torch.nn.functional as F class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__( self, embed_dim, num_heads, kdim=None, vdim=None, dropout=0.0, bias=True, add_bias_kv=False, add_zero_attn=False, self_attention=False, encoder_decoder_attention=False, q_noise=0.0, qn_block_size=8, has_relative_attention_bias=False, num_buckets=32, max_distance=128, gru_rel_pos=False, rescale_init=False, ): super().__init__() self.embed_dim = embed_dim self.kdim = kdim if kdim is not None else embed_dim self.vdim = vdim if vdim is not None else embed_dim self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim self.num_heads = num_heads self.dropout_module = nn.Dropout(dropout) self.has_relative_attention_bias = has_relative_attention_bias self.num_buckets = num_buckets self.max_distance = max_distance if self.has_relative_attention_bias: self.relative_attention_bias = nn.Embedding(num_buckets, num_heads) self.head_dim = embed_dim // num_heads self.q_head_dim = self.head_dim self.k_head_dim = self.head_dim assert ( self.head_dim * num_heads == self.embed_dim ), "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.self_attention = self_attention self.encoder_decoder_attention = encoder_decoder_attention assert not self.self_attention or self.qkv_same_dim, ( "Self-attention requires query, key and " "value to be of the same size" ) k_bias = True if rescale_init: k_bias = False k_embed_dim = embed_dim q_embed_dim = embed_dim self.k_proj = quant_noise( nn.Linear(self.kdim, k_embed_dim, bias=k_bias), q_noise, qn_block_size ) self.v_proj = quant_noise( nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size ) self.q_proj = quant_noise( nn.Linear(embed_dim, q_embed_dim, bias=bias), q_noise, qn_block_size ) self.out_proj = quant_noise( nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size ) if add_bias_kv: self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn self.gru_rel_pos = gru_rel_pos if self.gru_rel_pos: self.grep_linear = nn.Linear(self.q_head_dim, 8) self.grep_a = nn.Parameter(torch.ones(1, num_heads, 1, 1)) self.reset_parameters() def reset_parameters(self): if self.qkv_same_dim: # Empirically observed the convergence to be much better with # the scaled initialization nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2)) nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2)) else: nn.init.xavier_uniform_(self.k_proj.weight) nn.init.xavier_uniform_(self.v_proj.weight) nn.init.xavier_uniform_(self.q_proj.weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.out_proj.bias is not None: nn.init.constant_(self.out_proj.bias, 0.0) if self.bias_k is not None: nn.init.xavier_normal_(self.bias_k) if self.bias_v is not None: nn.init.xavier_normal_(self.bias_v) if self.has_relative_attention_bias: nn.init.xavier_normal_(self.relative_attention_bias.weight) def _relative_positions_bucket(self, relative_positions, bidirectional=True): num_buckets = self.num_buckets max_distance = self.max_distance relative_buckets = 0 if bidirectional: num_buckets = num_buckets // 2 relative_buckets += (relative_positions > 0).to(torch.long) * num_buckets relative_positions = torch.abs(relative_positions) else: relative_positions = -torch.min(relative_positions, torch.zeros_like(relative_positions)) max_exact = num_buckets // 2 is_small = relative_positions < max_exact relative_postion_if_large = max_exact + ( torch.log(relative_positions.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact) ).to(torch.long) relative_postion_if_large = torch.min( relative_postion_if_large, torch.full_like(relative_postion_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_positions, relative_postion_if_large) return relative_buckets def compute_bias(self, query_length, key_length): context_position = torch.arange(query_length, dtype=torch.long)[:, None] memory_position = torch.arange(key_length, dtype=torch.long)[None, :] relative_position = memory_position - context_position relative_position_bucket = self._relative_positions_bucket( relative_position, bidirectional=True ) relative_position_bucket = relative_position_bucket.to(self.relative_attention_bias.weight.device) values = self.relative_attention_bias(relative_position_bucket) values = values.permute([2, 0, 1]) return values def forward( self, query, key: Optional[Tensor], value: Optional[Tensor], key_padding_mask: Optional[Tensor] = None, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None, need_weights: bool = True, static_kv: bool = False, attn_mask: Optional[Tensor] = None, before_softmax: bool = False, need_head_weights: bool = False, position_bias: Optional[Tensor] = None ) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]: """Input shape: Time x Batch x Channel Args: key_padding_mask (ByteTensor, optional): mask to exclude keys that are pads, of shape `(batch, src_len)`, where padding elements are indicated by 1s. need_weights (bool, optional): return the attention weights, averaged over heads (default: False). attn_mask (ByteTensor, optional): typically used to implement causal attention, where the mask prevents the attention from looking forward in time (default: None). before_softmax (bool, optional): return the raw attention weights and values before the attention softmax. need_head_weights (bool, optional): return the attention weights for each head. Implies *need_weights*. Default: return the average attention weights over all heads. """ if need_head_weights: need_weights = True is_tpu = query.device.type == "xla" tgt_len, bsz, embed_dim = query.size() src_len = tgt_len assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] if key is not None: src_len, key_bsz, _ = key.size() if not torch.jit.is_scripting(): assert key_bsz == bsz assert value is not None assert src_len, bsz == value.shape[:2] if self.has_relative_attention_bias and position_bias is None: position_bias = self.compute_bias(tgt_len, src_len) position_bias = position_bias.unsqueeze(0).repeat(bsz, 1, 1, 1).view(bsz * self.num_heads, tgt_len, src_len) if ( not is_tpu # don't use PyTorch version on TPUs and incremental_state is None and not static_kv # A workaround for quantization to work. Otherwise JIT compilation # treats bias in linear module as method. and not torch.jit.is_scripting() and self.q_head_dim == self.head_dim ): assert key is not None and value is not None assert attn_mask is None attn_mask_rel_pos = None if position_bias is not None: attn_mask_rel_pos = position_bias if self.gru_rel_pos: query_layer = query.transpose(0, 1) new_x_shape = query_layer.size()[:-1] + (self.num_heads, -1) query_layer = query_layer.view(*new_x_shape) query_layer = query_layer.permute(0, 2, 1, 3) _B, _H, _L, __ = query_layer.size() gate_a, gate_b = torch.sigmoid(self.grep_linear(query_layer).view( _B, _H, _L, 2, 4).sum(-1, keepdim=False)).chunk(2, dim=-1) gate_a_1 = gate_a * (gate_b * self.grep_a - 1.0) + 2.0 attn_mask_rel_pos = gate_a_1.view(bsz * self.num_heads, -1, 1) * position_bias attn_mask_rel_pos = attn_mask_rel_pos.view((-1, tgt_len, tgt_len)) k_proj_bias = self.k_proj.bias if k_proj_bias is None: k_proj_bias = torch.zeros_like(self.q_proj.bias) x, attn = F.multi_head_attention_forward( query, key, value, self.embed_dim, self.num_heads, torch.empty([0]), torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)), self.bias_k, self.bias_v, self.add_zero_attn, self.dropout_module.p, self.out_proj.weight, self.out_proj.bias, self.training, # self.training or self.dropout_module.apply_during_inference, key_padding_mask, need_weights, attn_mask_rel_pos, use_separate_proj_weight=True, q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj.weight, v_proj_weight=self.v_proj.weight, ) return x, attn, position_bias if incremental_state is not None: saved_state = self._get_input_buffer(incremental_state) if saved_state is not None and "prev_key" in saved_state: # previous time steps are cached - no need to recompute # key and value if they are static if static_kv: assert self.encoder_decoder_attention and not self.self_attention key = value = None else: saved_state = None if self.self_attention: q = self.q_proj(query) k = self.k_proj(query) v = self.v_proj(query) elif self.encoder_decoder_attention: # encoder-decoder attention q = self.q_proj(query) if key is None: assert value is None k = v = None else: k = self.k_proj(key) v = self.v_proj(key) else: assert key is not None and value is not None q = self.q_proj(query) k = self.k_proj(key) v = self.v_proj(value) q *= self.scaling if self.bias_k is not None: assert self.bias_v is not None k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)]) v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)]) if attn_mask is not None: attn_mask = torch.cat( [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 ) if key_padding_mask is not None: key_padding_mask = torch.cat( [ key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1), ], dim=1, ) q = ( q.contiguous() .view(tgt_len, bsz * self.num_heads, self.q_head_dim) .transpose(0, 1) ) if k is not None: k = ( k.contiguous() .view(-1, bsz * self.num_heads, self.k_head_dim) .transpose(0, 1) ) if v is not None: v = ( v.contiguous() .view(-1, bsz * self.num_heads, self.head_dim) .transpose(0, 1) ) if saved_state is not None: # saved states are stored with shape (bsz, num_heads, seq_len, head_dim) if "prev_key" in saved_state: _prev_key = saved_state["prev_key"] assert _prev_key is not None prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: k = prev_key else: assert k is not None k = torch.cat([prev_key, k], dim=1) src_len = k.size(1) if "prev_value" in saved_state: _prev_value = saved_state["prev_value"] assert _prev_value is not None prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim) if static_kv: v = prev_value else: assert v is not None v = torch.cat([prev_value, v], dim=1) prev_key_padding_mask: Optional[Tensor] = None if "prev_key_padding_mask" in saved_state: prev_key_padding_mask = saved_state["prev_key_padding_mask"] assert k is not None and v is not None key_padding_mask = MultiheadAttention._append_prev_key_padding_mask( key_padding_mask=key_padding_mask, prev_key_padding_mask=prev_key_padding_mask, batch_size=bsz, src_len=k.size(1), static_kv=static_kv, ) saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim) saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim) saved_state["prev_key_padding_mask"] = key_padding_mask # In this branch incremental_state is never None assert incremental_state is not None incremental_state = self._set_input_buffer(incremental_state, saved_state) assert k is not None assert k.size(1) == src_len # This is part of a workaround to get around fork/join parallelism # not supporting Optional types. if key_padding_mask is not None and key_padding_mask.dim() == 0: key_padding_mask = None if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len if self.add_zero_attn: assert v is not None src_len += 1 k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1) v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1) if attn_mask is not None: attn_mask = torch.cat( [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1 ) if key_padding_mask is not None: key_padding_mask = torch.cat( [ key_padding_mask, torch.zeros(key_padding_mask.size(0), 1).type_as( key_padding_mask ), ], dim=1, ) attn_weights = torch.bmm(q, k.transpose(1, 2)) attn_weights = self.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz) assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] if attn_mask is not None: attn_mask = attn_mask.unsqueeze(0) attn_weights += attn_mask if key_padding_mask is not None: # don't attend to padding symbols attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) if not is_tpu: attn_weights = attn_weights.masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf"), ) else: attn_weights = attn_weights.transpose(0, 2) attn_weights = attn_weights.masked_fill(key_padding_mask, float("-inf")) attn_weights = attn_weights.transpose(0, 2) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if before_softmax: return attn_weights, v, position_bias if position_bias is not None: if self.gru_rel_pos == 1: query_layer = q.view(bsz, self.num_heads, tgt_len, self.q_head_dim) _B, _H, _L, __ = query_layer.size() gate_a, gate_b = torch.sigmoid(self.grep_linear(query_layer).view( _B, _H, _L, 2, 4).sum(-1, keepdim=False)).chunk(2, dim=-1) gate_a_1 = gate_a * (gate_b * self.grep_a - 1.0) + 2.0 position_bias = gate_a_1.view(bsz * self.num_heads, -1, 1) * position_bias position_bias = position_bias.view(attn_weights.size()) attn_weights = attn_weights + position_bias attn_weights_float = F.softmax( attn_weights, dim=-1 ) attn_weights = attn_weights_float.type_as(attn_weights) attn_probs = self.dropout_module(attn_weights) assert v is not None attn = torch.bmm(attn_probs, v) assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim] attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn = self.out_proj(attn) attn_weights: Optional[Tensor] = None if need_weights: attn_weights = attn_weights_float.view( bsz, self.num_heads, tgt_len, src_len ).transpose(1, 0) if not need_head_weights: # average attention weights over heads attn_weights = attn_weights.mean(dim=0) return attn, attn_weights, position_bias def _append_prev_key_padding_mask( key_padding_mask: Optional[Tensor], prev_key_padding_mask: Optional[Tensor], batch_size: int, src_len: int, static_kv: bool, ) -> Optional[Tensor]: # saved key padding masks have shape (bsz, seq_len) if prev_key_padding_mask is not None and static_kv: new_key_padding_mask = prev_key_padding_mask elif prev_key_padding_mask is not None and key_padding_mask is not None: new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1 ) # During incremental decoding, as the padding token enters and # leaves the frame, there will be a time when prev or current # is None elif prev_key_padding_mask is not None: if src_len > prev_key_padding_mask.size(1): filler = torch.zeros( (batch_size, src_len - prev_key_padding_mask.size(1)), device=prev_key_padding_mask.device, ) new_key_padding_mask = torch.cat( [prev_key_padding_mask.float(), filler.float()], dim=1 ) else: new_key_padding_mask = prev_key_padding_mask.float() elif key_padding_mask is not None: if src_len > key_padding_mask.size(1): filler = torch.zeros( (batch_size, src_len - key_padding_mask.size(1)), device=key_padding_mask.device, ) new_key_padding_mask = torch.cat( [filler.float(), key_padding_mask.float()], dim=1 ) else: new_key_padding_mask = key_padding_mask.float() else: new_key_padding_mask = prev_key_padding_mask return new_key_padding_mask def _get_input_buffer( self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] ) -> Dict[str, Optional[Tensor]]: result = self.get_incremental_state(incremental_state, "attn_state") if result is not None: return result else: empty_result: Dict[str, Optional[Tensor]] = {} return empty_result def _set_input_buffer( self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], buffer: Dict[str, Optional[Tensor]], ): return self.set_incremental_state(incremental_state, "attn_state", buffer) def apply_sparse_mask(self, attn_weights, tgt_len: int, src_len: int, bsz: int): return attn_weights The provided code snippet includes necessary dependencies for implementing the `init_bert_params` function. Write a Python function `def init_bert_params(module)` to solve the following problem: Initialize the weights specific to the BERT Model. This overrides the default initializations depending on the specified arguments. 1. If normal_init_linear_weights is set then weights of linear layer will be initialized using the normal distribution and bais will be set to the specified value. 2. If normal_init_embed_weights is set then weights of embedding layer will be initialized using the normal distribution. 3. If normal_init_proj_weights is set then weights of in_project_weight for MultiHeadAttention initialized using the normal distribution (to be validated). Here is the function: def init_bert_params(module): """ Initialize the weights specific to the BERT Model. This overrides the default initializations depending on the specified arguments. 1. If normal_init_linear_weights is set then weights of linear layer will be initialized using the normal distribution and bais will be set to the specified value. 2. If normal_init_embed_weights is set then weights of embedding layer will be initialized using the normal distribution. 3. If normal_init_proj_weights is set then weights of in_project_weight for MultiHeadAttention initialized using the normal distribution (to be validated). """ def normal_(data): # with FSDP, module params will be on CUDA, so we cast them back to CPU # so that the RNG is consistent with and without FSDP data.copy_( data.cpu().normal_(mean=0.0, std=0.02).to(data.device) ) if isinstance(module, nn.Linear): normal_(module.weight.data) if module.bias is not None: module.bias.data.zero_() if isinstance(module, nn.Embedding): normal_(module.weight.data) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() if isinstance(module, MultiheadAttention): normal_(module.q_proj.weight.data) normal_(module.k_proj.weight.data) normal_(module.v_proj.weight.data)
Initialize the weights specific to the BERT Model. This overrides the default initializations depending on the specified arguments. 1. If normal_init_linear_weights is set then weights of linear layer will be initialized using the normal distribution and bais will be set to the specified value. 2. If normal_init_embed_weights is set then weights of embedding layer will be initialized using the normal distribution. 3. If normal_init_proj_weights is set then weights of in_project_weight for MultiHeadAttention initialized using the normal distribution (to be validated).
182,623
import math import warnings from typing import Dict, Optional, Tuple import torch from torch import Tensor, nn from torch.nn import Parameter import torch.nn.functional as F The provided code snippet includes necessary dependencies for implementing the `quant_noise` function. Write a Python function `def quant_noise(module, p, block_size)` to solve the following problem: Wraps modules and applies quantization noise to the weights for subsequent quantization with Iterative Product Quantization as described in "Training with Quantization Noise for Extreme Model Compression" Args: - module: nn.Module - p: amount of Quantization Noise - block_size: size of the blocks for subsequent quantization with iPQ Remarks: - Module weights must have the right sizes wrt the block size - Only Linear, Embedding and Conv2d modules are supported for the moment - For more detail on how to quantize by blocks with convolutional weights, see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks" - We implement the simplest form of noise here as stated in the paper which consists in randomly dropping blocks Here is the function: def quant_noise(module, p, block_size): """ Wraps modules and applies quantization noise to the weights for subsequent quantization with Iterative Product Quantization as described in "Training with Quantization Noise for Extreme Model Compression" Args: - module: nn.Module - p: amount of Quantization Noise - block_size: size of the blocks for subsequent quantization with iPQ Remarks: - Module weights must have the right sizes wrt the block size - Only Linear, Embedding and Conv2d modules are supported for the moment - For more detail on how to quantize by blocks with convolutional weights, see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks" - We implement the simplest form of noise here as stated in the paper which consists in randomly dropping blocks """ # if no quantization noise, don't register hook if p <= 0: return module # supported modules assert isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d)) # test whether module.weight has the right sizes wrt block_size is_conv = module.weight.ndim == 4 # 2D matrix if not is_conv: assert ( module.weight.size(1) % block_size == 0 ), "Input features must be a multiple of block sizes" # 4D matrix else: # 1x1 convolutions if module.kernel_size == (1, 1): assert ( module.in_channels % block_size == 0 ), "Input channels must be a multiple of block sizes" # regular convolutions else: k = module.kernel_size[0] * module.kernel_size[1] assert k % block_size == 0, "Kernel size must be a multiple of block size" def _forward_pre_hook(mod, input): # no noise for evaluation if mod.training: if not is_conv: # gather weight and sizes weight = mod.weight in_features = weight.size(1) out_features = weight.size(0) # split weight matrix into blocks and randomly drop selected blocks mask = torch.zeros( in_features // block_size * out_features, device=weight.device ) mask.bernoulli_(p) mask = mask.repeat_interleave(block_size, -1).view(-1, in_features) else: # gather weight and sizes weight = mod.weight in_channels = mod.in_channels out_channels = mod.out_channels # split weight matrix into blocks and randomly drop selected blocks if mod.kernel_size == (1, 1): mask = torch.zeros( int(in_channels // block_size * out_channels), device=weight.device, ) mask.bernoulli_(p) mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels) else: mask = torch.zeros( weight.size(0), weight.size(1), device=weight.device ) mask.bernoulli_(p) mask = ( mask.unsqueeze(2) .unsqueeze(3) .repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1]) ) # scale weights and apply mask mask = mask.to( torch.bool ) # x.bool() is not currently supported in TorchScript s = 1 / (1 - p) mod.weight.data = s * weight.masked_fill(mask, 0) module.register_forward_pre_hook(_forward_pre_hook) return module
Wraps modules and applies quantization noise to the weights for subsequent quantization with Iterative Product Quantization as described in "Training with Quantization Noise for Extreme Model Compression" Args: - module: nn.Module - p: amount of Quantization Noise - block_size: size of the blocks for subsequent quantization with iPQ Remarks: - Module weights must have the right sizes wrt the block size - Only Linear, Embedding and Conv2d modules are supported for the moment - For more detail on how to quantize by blocks with convolutional weights, see "And the Bit Goes Down: Revisiting the Quantization of Neural Networks" - We implement the simplest form of noise here as stated in the paper which consists in randomly dropping blocks
182,624
import math import logging from typing import List, Optional, Tuple import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from torch.nn import LayerNorm from modules import ( Fp32GroupNorm, Fp32LayerNorm, GradMultiply, MultiheadAttention, SamePad, init_bert_params, get_activation_fn, TransposeLast, GLU_Linear, ) The provided code snippet includes necessary dependencies for implementing the `compute_mask_indices` function. Write a Python function `def compute_mask_indices( shape: Tuple[int, int], padding_mask: Optional[torch.Tensor], mask_prob: float, mask_length: int, mask_type: str = "static", mask_other: float = 0.0, min_masks: int = 0, no_overlap: bool = False, min_space: int = 0, ) -> np.ndarray` to solve the following problem: Computes random mask spans for a given shape Args: shape: the the shape for which to compute masks. should be of size 2 where first element is batch size and 2nd is timesteps padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by number of timesteps divided by length of mask span to mask approximately this percentage of all elements. however due to overlaps, the actual number will be smaller (unless no_overlap is True) mask_type: how to compute mask lengths static = fixed size uniform = sample from uniform distribution [mask_other, mask_length*2] normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element poisson = sample from possion distribution with lambda = mask length min_masks: minimum number of masked spans no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans Here is the function: def compute_mask_indices( shape: Tuple[int, int], padding_mask: Optional[torch.Tensor], mask_prob: float, mask_length: int, mask_type: str = "static", mask_other: float = 0.0, min_masks: int = 0, no_overlap: bool = False, min_space: int = 0, ) -> np.ndarray: """ Computes random mask spans for a given shape Args: shape: the the shape for which to compute masks. should be of size 2 where first element is batch size and 2nd is timesteps padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by number of timesteps divided by length of mask span to mask approximately this percentage of all elements. however due to overlaps, the actual number will be smaller (unless no_overlap is True) mask_type: how to compute mask lengths static = fixed size uniform = sample from uniform distribution [mask_other, mask_length*2] normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element poisson = sample from possion distribution with lambda = mask length min_masks: minimum number of masked spans no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans """ bsz, all_sz = shape mask = np.full((bsz, all_sz), False) all_num_mask = int( # add a random number for probabilistic rounding mask_prob * all_sz / float(mask_length) + np.random.rand() ) all_num_mask = max(min_masks, all_num_mask) mask_idcs = [] for i in range(bsz): if padding_mask is not None: sz = all_sz - padding_mask[i].long().sum().item() num_mask = int( # add a random number for probabilistic rounding mask_prob * sz / float(mask_length) + np.random.rand() ) num_mask = max(min_masks, num_mask) else: sz = all_sz num_mask = all_num_mask if mask_type == "static": lengths = np.full(num_mask, mask_length) elif mask_type == "uniform": lengths = np.random.randint(mask_other, mask_length * 2 + 1, size=num_mask) elif mask_type == "normal": lengths = np.random.normal(mask_length, mask_other, size=num_mask) lengths = [max(1, int(round(x))) for x in lengths] elif mask_type == "poisson": lengths = np.random.poisson(mask_length, size=num_mask) lengths = [int(round(x)) for x in lengths] else: raise Exception("unknown mask selection " + mask_type) if sum(lengths) == 0: lengths[0] = min(mask_length, sz - 1) if no_overlap: mask_idc = [] def arrange(s, e, length, keep_length): span_start = np.random.randint(s, e - length) mask_idc.extend(span_start + i for i in range(length)) new_parts = [] if span_start - s - min_space >= keep_length: new_parts.append((s, span_start - min_space + 1)) if e - span_start - keep_length - min_space > keep_length: new_parts.append((span_start + length + min_space, e)) return new_parts parts = [(0, sz)] min_length = min(lengths) for length in sorted(lengths, reverse=True): lens = np.fromiter( (e - s if e - s >= length + min_space else 0 for s, e in parts), np.int, ) l_sum = np.sum(lens) if l_sum == 0: break probs = lens / np.sum(lens) c = np.random.choice(len(parts), p=probs) s, e = parts.pop(c) parts.extend(arrange(s, e, length, min_length)) mask_idc = np.asarray(mask_idc) else: min_len = min(lengths) if sz - min_len <= num_mask: min_len = sz - num_mask - 1 mask_idc = np.random.choice(sz - min_len, num_mask, replace=False) mask_idc = np.asarray( [ mask_idc[j] + offset for j in range(len(mask_idc)) for offset in range(lengths[j]) ] ) mask_idcs.append(np.unique(mask_idc[mask_idc < sz])) min_len = min([len(m) for m in mask_idcs]) for i, mask_idc in enumerate(mask_idcs): if len(mask_idc) > min_len: mask_idc = np.random.choice(mask_idc, min_len, replace=False) mask[i, mask_idc] = True return mask
Computes random mask spans for a given shape Args: shape: the the shape for which to compute masks. should be of size 2 where first element is batch size and 2nd is timesteps padding_mask: optional padding mask of the same size as shape, which will prevent masking padded elements mask_prob: probability for each token to be chosen as start of the span to be masked. this will be multiplied by number of timesteps divided by length of mask span to mask approximately this percentage of all elements. however due to overlaps, the actual number will be smaller (unless no_overlap is True) mask_type: how to compute mask lengths static = fixed size uniform = sample from uniform distribution [mask_other, mask_length*2] normal = sample from normal distribution with mean mask_length and stdev mask_other. mask is min 1 element poisson = sample from possion distribution with lambda = mask length min_masks: minimum number of masked spans no_overlap: if false, will switch to an alternative recursive algorithm that prevents spans from overlapping min_space: only used if no_overlap is True, this is how many elements to keep unmasked between spans
182,625
import os from PIL import Image import xml.etree.ElementTree as ET import numpy as np import json from PIL import Image from shutil import copyfile def convert(ROOT, TRACK, SPLIT): coco_data = { "images": [], "annotations": [], "categories": [{"id": 1, "name": "table"}, ], } DATA_DIR = f"{ROOT}/{TRACK}/{SPLIT}" prefix = "cTDaR_t0" if TRACK == "trackA_archival" else "cTDaR_t1" print(TRACK, SPLIT, prefix) table_count = 0 for file in sorted(os.listdir(DATA_DIR)): if file.startswith(prefix) and file.endswith(".jpg"): img = Image.open(os.path.join(DATA_DIR, file)) coco_data["images"].append( { "file_name": file, "height": img.height, "width": img.width, "id": int(file[7:-4]), } ) elif file.startswith(prefix) and file.endswith(".xml"): # print(file) tree = ET.parse(os.path.join(DATA_DIR, file)) root = tree.getroot() assert len(root.findall("./table/Coords")) > 0 for table_id in range(len(root.findall("./table/Coords"))): four_points = root.findall("./table/Coords")[table_id].attrib["points"] four_points = list(map(lambda x: x.split(","), four_points.split())) four_points = [[int(j) for j in i] for i in four_points] segmentation = [j for i in four_points for j in i] bbox = [ four_points[0][0], four_points[0][1], four_points[2][0] - four_points[0][0], four_points[2][1] - four_points[0][1], ] coco_data["annotations"].append( { "segmentation": [segmentation], "area": bbox[2] * bbox[3], "iscrowd": 0, "image_id": int(file[7:-4]), "bbox": bbox, "category_id": 1, "id": table_count, } ) table_count += 1 with open(f"{ROOT}/{TRACK}/{SPLIT}.json", "w") as f: json.dump(coco_data, f)
null
182,626
import os from PIL import Image import xml.etree.ElementTree as ET import numpy as np import json from PIL import Image from shutil import copyfile def clean_img(DATA_DIR): for file in sorted(os.listdir(DATA_DIR)): if file.endswith(".JPG"): os.rename(os.path.join(DATA_DIR, file), os.path.join(DATA_DIR, file.replace(".JPG", ".jpg"))) elif file.endswith(".TIFF"): img = Image.open(os.path.join(DATA_DIR, file)) img.save(os.path.join(DATA_DIR, file.replace(".TIFF", ".jpg"))) os.remove(os.path.join(DATA_DIR, file)) elif file.endswith(".png"): img = Image.open(os.path.join(DATA_DIR, file)) img.save(os.path.join(DATA_DIR, file.replace(".png", ".jpg"))) os.remove(os.path.join(DATA_DIR, file))
null
182,627
import warnings import math import torch from functools import partial import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as checkpoint from timm.models.layers import drop_path, to_2tuple, trunc_normal_ def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), **kwargs } class BEiT(nn.Module): """ Vision Transformer with support for patch or hybrid CNN input stage """ def __init__(self, img_size=[224, 224], patch_size=16, in_chans=3, num_classes=80, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., hybrid_backbone=None, norm_layer=None, init_values=None, use_abs_pos_emb=False, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, use_checkpoint=True, pretrained=None, out_features=None, ): super(BEiT, self).__init__() norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.use_checkpoint = use_checkpoint if hybrid_backbone is not None: self.patch_embed = HybridEmbed( hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim) else: self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.out_features = out_features self.out_indices = [int(name[5:]) for name in out_features] self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) # self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if use_abs_pos_emb: self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) else: self.pos_embed = None self.pos_drop = nn.Dropout(p=drop_rate) self.use_shared_rel_pos_bias = use_shared_rel_pos_bias if use_shared_rel_pos_bias: self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads) else: self.rel_pos_bias = None dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.use_rel_pos_bias = use_rel_pos_bias self.blocks = nn.ModuleList([ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None) for i in range(depth)]) # trunc_normal_(self.mask_token, std=.02) if patch_size == 16: self.fpn1 = nn.Sequential( nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), # nn.SyncBatchNorm(embed_dim), nn.BatchNorm2d(embed_dim), nn.GELU(), nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), ) self.fpn2 = nn.Sequential( nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), ) self.fpn3 = nn.Identity() self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2) elif patch_size == 8: self.fpn1 = nn.Sequential( nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), ) self.fpn2 = nn.Identity() self.fpn3 = nn.Sequential( nn.MaxPool2d(kernel_size=2, stride=2), ) self.fpn4 = nn.Sequential( nn.MaxPool2d(kernel_size=4, stride=4), ) if self.pos_embed is not None: trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) self.apply(self._init_weights) self.fix_init_weight() def fix_init_weight(self): def rescale(param, layer_id): param.div_(math.sqrt(2.0 * layer_id)) for layer_id, layer in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) ''' def init_weights(self): """Initialize the weights in backbone. Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None. """ logger = get_root_logger() if self.pos_embed is not None: trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) self.apply(self._init_weights) self.fix_init_weight() if self.init_cfg is None: logger.warn(f'No pre-trained weights for ' f'{self.__class__.__name__}, ' f'training start from scratch') else: assert 'checkpoint' in self.init_cfg, f'Only support ' \ f'specify `Pretrained` in ' \ f'`init_cfg` in ' \ f'{self.__class__.__name__} ' logger.info(f"Will load ckpt from {self.init_cfg['checkpoint']}") load_checkpoint(self, filename=self.init_cfg['checkpoint'], strict=False, logger=logger, beit_spec_expand_rel_pos = self.use_rel_pos_bias, ) ''' def get_num_layers(self): return len(self.blocks) def no_weight_decay(self): return {'pos_embed', 'cls_token'} def forward_features(self, x): B, C, H, W = x.shape x, (Hp, Wp) = self.patch_embed(x, self.pos_embed[:, 1:, :] if self.pos_embed is not None else None) # Hp, Wp are HW for patches batch_size, seq_len, _ = x.size() cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks if self.pos_embed is not None: cls_tokens = cls_tokens + self.pos_embed[:, :1, :] x = torch.cat((cls_tokens, x), dim=1) x = self.pos_drop(x) features = [] training_window_size = torch.tensor([Hp, Wp]) rel_pos_bias = self.rel_pos_bias(training_window_size) if self.rel_pos_bias is not None else None for i, blk in enumerate(self.blocks): if self.use_checkpoint: x = checkpoint.checkpoint(blk, x, rel_pos_bias, training_window_size) else: x = blk(x, rel_pos_bias=rel_pos_bias, training_window_size=training_window_size) if i in self.out_indices: xp = x[:, 1:, :].permute(0, 2, 1).reshape(B, -1, Hp, Wp) features.append(xp.contiguous()) ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4] for i in range(len(features)): features[i] = ops[i](features[i]) feat_out = {} for name, value in zip(self.out_features, features): feat_out[name] = value return feat_out def forward(self, x): x = self.forward_features(x) return x def beit_base_patch16(pretrained=False, **kwargs): model = BEiT( patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), init_values=None, **kwargs) model.default_cfg = _cfg() return model
null
182,628
import warnings import math import torch from functools import partial import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as checkpoint from timm.models.layers import drop_path, to_2tuple, trunc_normal_ def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), **kwargs } class BEiT(nn.Module): """ Vision Transformer with support for patch or hybrid CNN input stage """ def __init__(self, img_size=[224, 224], patch_size=16, in_chans=3, num_classes=80, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., hybrid_backbone=None, norm_layer=None, init_values=None, use_abs_pos_emb=False, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, use_checkpoint=True, pretrained=None, out_features=None, ): super(BEiT, self).__init__() norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.use_checkpoint = use_checkpoint if hybrid_backbone is not None: self.patch_embed = HybridEmbed( hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim) else: self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.out_features = out_features self.out_indices = [int(name[5:]) for name in out_features] self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) # self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if use_abs_pos_emb: self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) else: self.pos_embed = None self.pos_drop = nn.Dropout(p=drop_rate) self.use_shared_rel_pos_bias = use_shared_rel_pos_bias if use_shared_rel_pos_bias: self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads) else: self.rel_pos_bias = None dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.use_rel_pos_bias = use_rel_pos_bias self.blocks = nn.ModuleList([ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None) for i in range(depth)]) # trunc_normal_(self.mask_token, std=.02) if patch_size == 16: self.fpn1 = nn.Sequential( nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), # nn.SyncBatchNorm(embed_dim), nn.BatchNorm2d(embed_dim), nn.GELU(), nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), ) self.fpn2 = nn.Sequential( nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), ) self.fpn3 = nn.Identity() self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2) elif patch_size == 8: self.fpn1 = nn.Sequential( nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), ) self.fpn2 = nn.Identity() self.fpn3 = nn.Sequential( nn.MaxPool2d(kernel_size=2, stride=2), ) self.fpn4 = nn.Sequential( nn.MaxPool2d(kernel_size=4, stride=4), ) if self.pos_embed is not None: trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) self.apply(self._init_weights) self.fix_init_weight() def fix_init_weight(self): def rescale(param, layer_id): param.div_(math.sqrt(2.0 * layer_id)) for layer_id, layer in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) ''' def init_weights(self): """Initialize the weights in backbone. Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None. """ logger = get_root_logger() if self.pos_embed is not None: trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) self.apply(self._init_weights) self.fix_init_weight() if self.init_cfg is None: logger.warn(f'No pre-trained weights for ' f'{self.__class__.__name__}, ' f'training start from scratch') else: assert 'checkpoint' in self.init_cfg, f'Only support ' \ f'specify `Pretrained` in ' \ f'`init_cfg` in ' \ f'{self.__class__.__name__} ' logger.info(f"Will load ckpt from {self.init_cfg['checkpoint']}") load_checkpoint(self, filename=self.init_cfg['checkpoint'], strict=False, logger=logger, beit_spec_expand_rel_pos = self.use_rel_pos_bias, ) ''' def get_num_layers(self): return len(self.blocks) def no_weight_decay(self): return {'pos_embed', 'cls_token'} def forward_features(self, x): B, C, H, W = x.shape x, (Hp, Wp) = self.patch_embed(x, self.pos_embed[:, 1:, :] if self.pos_embed is not None else None) # Hp, Wp are HW for patches batch_size, seq_len, _ = x.size() cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks if self.pos_embed is not None: cls_tokens = cls_tokens + self.pos_embed[:, :1, :] x = torch.cat((cls_tokens, x), dim=1) x = self.pos_drop(x) features = [] training_window_size = torch.tensor([Hp, Wp]) rel_pos_bias = self.rel_pos_bias(training_window_size) if self.rel_pos_bias is not None else None for i, blk in enumerate(self.blocks): if self.use_checkpoint: x = checkpoint.checkpoint(blk, x, rel_pos_bias, training_window_size) else: x = blk(x, rel_pos_bias=rel_pos_bias, training_window_size=training_window_size) if i in self.out_indices: xp = x[:, 1:, :].permute(0, 2, 1).reshape(B, -1, Hp, Wp) features.append(xp.contiguous()) ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4] for i in range(len(features)): features[i] = ops[i](features[i]) feat_out = {} for name, value in zip(self.out_features, features): feat_out[name] = value return feat_out def forward(self, x): x = self.forward_features(x) return x def beit_large_patch16(pretrained=False, **kwargs): model = BEiT( patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), init_values=None, **kwargs) model.default_cfg = _cfg() return model
null
182,629
import warnings import math import torch from functools import partial import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as checkpoint from timm.models.layers import drop_path, to_2tuple, trunc_normal_ def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), **kwargs } class BEiT(nn.Module): """ Vision Transformer with support for patch or hybrid CNN input stage """ def __init__(self, img_size=[224, 224], patch_size=16, in_chans=3, num_classes=80, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., hybrid_backbone=None, norm_layer=None, init_values=None, use_abs_pos_emb=False, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, use_checkpoint=True, pretrained=None, out_features=None, ): super(BEiT, self).__init__() norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.use_checkpoint = use_checkpoint if hybrid_backbone is not None: self.patch_embed = HybridEmbed( hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim) else: self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.out_features = out_features self.out_indices = [int(name[5:]) for name in out_features] self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) # self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if use_abs_pos_emb: self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) else: self.pos_embed = None self.pos_drop = nn.Dropout(p=drop_rate) self.use_shared_rel_pos_bias = use_shared_rel_pos_bias if use_shared_rel_pos_bias: self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads) else: self.rel_pos_bias = None dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.use_rel_pos_bias = use_rel_pos_bias self.blocks = nn.ModuleList([ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None) for i in range(depth)]) # trunc_normal_(self.mask_token, std=.02) if patch_size == 16: self.fpn1 = nn.Sequential( nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), # nn.SyncBatchNorm(embed_dim), nn.BatchNorm2d(embed_dim), nn.GELU(), nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), ) self.fpn2 = nn.Sequential( nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), ) self.fpn3 = nn.Identity() self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2) elif patch_size == 8: self.fpn1 = nn.Sequential( nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), ) self.fpn2 = nn.Identity() self.fpn3 = nn.Sequential( nn.MaxPool2d(kernel_size=2, stride=2), ) self.fpn4 = nn.Sequential( nn.MaxPool2d(kernel_size=4, stride=4), ) if self.pos_embed is not None: trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) self.apply(self._init_weights) self.fix_init_weight() def fix_init_weight(self): def rescale(param, layer_id): param.div_(math.sqrt(2.0 * layer_id)) for layer_id, layer in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) ''' def init_weights(self): """Initialize the weights in backbone. Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None. """ logger = get_root_logger() if self.pos_embed is not None: trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) self.apply(self._init_weights) self.fix_init_weight() if self.init_cfg is None: logger.warn(f'No pre-trained weights for ' f'{self.__class__.__name__}, ' f'training start from scratch') else: assert 'checkpoint' in self.init_cfg, f'Only support ' \ f'specify `Pretrained` in ' \ f'`init_cfg` in ' \ f'{self.__class__.__name__} ' logger.info(f"Will load ckpt from {self.init_cfg['checkpoint']}") load_checkpoint(self, filename=self.init_cfg['checkpoint'], strict=False, logger=logger, beit_spec_expand_rel_pos = self.use_rel_pos_bias, ) ''' def get_num_layers(self): return len(self.blocks) def no_weight_decay(self): return {'pos_embed', 'cls_token'} def forward_features(self, x): B, C, H, W = x.shape x, (Hp, Wp) = self.patch_embed(x, self.pos_embed[:, 1:, :] if self.pos_embed is not None else None) # Hp, Wp are HW for patches batch_size, seq_len, _ = x.size() cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks if self.pos_embed is not None: cls_tokens = cls_tokens + self.pos_embed[:, :1, :] x = torch.cat((cls_tokens, x), dim=1) x = self.pos_drop(x) features = [] training_window_size = torch.tensor([Hp, Wp]) rel_pos_bias = self.rel_pos_bias(training_window_size) if self.rel_pos_bias is not None else None for i, blk in enumerate(self.blocks): if self.use_checkpoint: x = checkpoint.checkpoint(blk, x, rel_pos_bias, training_window_size) else: x = blk(x, rel_pos_bias=rel_pos_bias, training_window_size=training_window_size) if i in self.out_indices: xp = x[:, 1:, :].permute(0, 2, 1).reshape(B, -1, Hp, Wp) features.append(xp.contiguous()) ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4] for i in range(len(features)): features[i] = ops[i](features[i]) feat_out = {} for name, value in zip(self.out_features, features): feat_out[name] = value return feat_out def forward(self, x): x = self.forward_features(x) return x def dit_base_patch16(pretrained=False, **kwargs): model = BEiT( patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), init_values=0.1, **kwargs) model.default_cfg = _cfg() return model
null
182,630
import warnings import math import torch from functools import partial import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as checkpoint from timm.models.layers import drop_path, to_2tuple, trunc_normal_ def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), **kwargs } class BEiT(nn.Module): """ Vision Transformer with support for patch or hybrid CNN input stage """ def __init__(self, img_size=[224, 224], patch_size=16, in_chans=3, num_classes=80, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., hybrid_backbone=None, norm_layer=None, init_values=None, use_abs_pos_emb=False, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, use_checkpoint=True, pretrained=None, out_features=None, ): super(BEiT, self).__init__() norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6) self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.use_checkpoint = use_checkpoint if hybrid_backbone is not None: self.patch_embed = HybridEmbed( hybrid_backbone, img_size=img_size, in_chans=in_chans, embed_dim=embed_dim) else: self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.out_features = out_features self.out_indices = [int(name[5:]) for name in out_features] self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) # self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if use_abs_pos_emb: self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) else: self.pos_embed = None self.pos_drop = nn.Dropout(p=drop_rate) self.use_shared_rel_pos_bias = use_shared_rel_pos_bias if use_shared_rel_pos_bias: self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads) else: self.rel_pos_bias = None dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.use_rel_pos_bias = use_rel_pos_bias self.blocks = nn.ModuleList([ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None) for i in range(depth)]) # trunc_normal_(self.mask_token, std=.02) if patch_size == 16: self.fpn1 = nn.Sequential( nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), # nn.SyncBatchNorm(embed_dim), nn.BatchNorm2d(embed_dim), nn.GELU(), nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), ) self.fpn2 = nn.Sequential( nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), ) self.fpn3 = nn.Identity() self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2) elif patch_size == 8: self.fpn1 = nn.Sequential( nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), ) self.fpn2 = nn.Identity() self.fpn3 = nn.Sequential( nn.MaxPool2d(kernel_size=2, stride=2), ) self.fpn4 = nn.Sequential( nn.MaxPool2d(kernel_size=4, stride=4), ) if self.pos_embed is not None: trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) self.apply(self._init_weights) self.fix_init_weight() def fix_init_weight(self): def rescale(param, layer_id): param.div_(math.sqrt(2.0 * layer_id)) for layer_id, layer in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) ''' def init_weights(self): """Initialize the weights in backbone. Args: pretrained (str, optional): Path to pre-trained weights. Defaults to None. """ logger = get_root_logger() if self.pos_embed is not None: trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) self.apply(self._init_weights) self.fix_init_weight() if self.init_cfg is None: logger.warn(f'No pre-trained weights for ' f'{self.__class__.__name__}, ' f'training start from scratch') else: assert 'checkpoint' in self.init_cfg, f'Only support ' \ f'specify `Pretrained` in ' \ f'`init_cfg` in ' \ f'{self.__class__.__name__} ' logger.info(f"Will load ckpt from {self.init_cfg['checkpoint']}") load_checkpoint(self, filename=self.init_cfg['checkpoint'], strict=False, logger=logger, beit_spec_expand_rel_pos = self.use_rel_pos_bias, ) ''' def get_num_layers(self): return len(self.blocks) def no_weight_decay(self): return {'pos_embed', 'cls_token'} def forward_features(self, x): B, C, H, W = x.shape x, (Hp, Wp) = self.patch_embed(x, self.pos_embed[:, 1:, :] if self.pos_embed is not None else None) # Hp, Wp are HW for patches batch_size, seq_len, _ = x.size() cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks if self.pos_embed is not None: cls_tokens = cls_tokens + self.pos_embed[:, :1, :] x = torch.cat((cls_tokens, x), dim=1) x = self.pos_drop(x) features = [] training_window_size = torch.tensor([Hp, Wp]) rel_pos_bias = self.rel_pos_bias(training_window_size) if self.rel_pos_bias is not None else None for i, blk in enumerate(self.blocks): if self.use_checkpoint: x = checkpoint.checkpoint(blk, x, rel_pos_bias, training_window_size) else: x = blk(x, rel_pos_bias=rel_pos_bias, training_window_size=training_window_size) if i in self.out_indices: xp = x[:, 1:, :].permute(0, 2, 1).reshape(B, -1, Hp, Wp) features.append(xp.contiguous()) ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4] for i in range(len(features)): features[i] = ops[i](features[i]) feat_out = {} for name, value in zip(self.out_features, features): feat_out[name] = value return feat_out def forward(self, x): x = self.forward_features(x) return x def dit_large_patch16(pretrained=False, **kwargs): model = BEiT( patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), init_values=1e-5, **kwargs) model.default_cfg = _cfg() return model
null
182,631
import torch from detectron2.layers import ( ShapeSpec, ) from detectron2.modeling import Backbone, BACKBONE_REGISTRY, FPN from detectron2.modeling.backbone.fpn import LastLevelP6P7, LastLevelMaxPool from .beit import beit_base_patch16, dit_base_patch16, dit_large_patch16, beit_large_patch16 from .deit import deit_base_patch16, mae_base_patch16 def build_VIT_backbone(cfg): """ Create a VIT instance from config. Args: cfg: a detectron2 CfgNode Returns: A VIT backbone instance. """ # fmt: off name = cfg.MODEL.VIT.NAME out_features = cfg.MODEL.VIT.OUT_FEATURES drop_path = cfg.MODEL.VIT.DROP_PATH img_size = cfg.MODEL.VIT.IMG_SIZE pos_type = cfg.MODEL.VIT.POS_TYPE model_kwargs = eval(str(cfg.MODEL.VIT.MODEL_KWARGS).replace("`", "")) return VIT_Backbone(name, out_features, drop_path, img_size, pos_type, model_kwargs) The provided code snippet includes necessary dependencies for implementing the `build_vit_fpn_backbone` function. Write a Python function `def build_vit_fpn_backbone(cfg, input_shape: ShapeSpec)` to solve the following problem: Create a VIT w/ FPN backbone. Args: cfg: a detectron2 CfgNode Returns: backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. Here is the function: def build_vit_fpn_backbone(cfg, input_shape: ShapeSpec): """ Create a VIT w/ FPN backbone. Args: cfg: a detectron2 CfgNode Returns: backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`. """ bottom_up = build_VIT_backbone(cfg) in_features = cfg.MODEL.FPN.IN_FEATURES out_channels = cfg.MODEL.FPN.OUT_CHANNELS backbone = FPN( bottom_up=bottom_up, in_features=in_features, out_channels=out_channels, norm=cfg.MODEL.FPN.NORM, top_block=LastLevelMaxPool(), fuse_type=cfg.MODEL.FPN.FUSE_TYPE, ) return backbone
Create a VIT w/ FPN backbone. Args: cfg: a detectron2 CfgNode Returns: backbone (Backbone): backbone module, must be a subclass of :class:`Backbone`.
182,632
import copy import logging import numpy as np import torch from detectron2.data import detection_utils as utils from detectron2.data import transforms as T The provided code snippet includes necessary dependencies for implementing the `build_transform_gen` function. Write a Python function `def build_transform_gen(cfg, is_train)` to solve the following problem: Create a list of :class:`TransformGen` from config. Returns: list[TransformGen] Here is the function: def build_transform_gen(cfg, is_train): """ Create a list of :class:`TransformGen` from config. Returns: list[TransformGen] """ if is_train: min_size = cfg.INPUT.MIN_SIZE_TRAIN max_size = cfg.INPUT.MAX_SIZE_TRAIN sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING else: min_size = cfg.INPUT.MIN_SIZE_TEST max_size = cfg.INPUT.MAX_SIZE_TEST sample_style = "choice" if sample_style == "range": assert len(min_size) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format(len(min_size)) logger = logging.getLogger(__name__) tfm_gens = [] if is_train: tfm_gens.append(T.RandomFlip()) tfm_gens.append(T.ResizeShortestEdge(min_size, max_size, sample_style)) if is_train: logger.info("TransformGens used in training: " + str(tfm_gens)) return tfm_gens
Create a list of :class:`TransformGen` from config. Returns: list[TransformGen]
182,633
from detectron2.config import CfgNode as CN The provided code snippet includes necessary dependencies for implementing the `add_vit_config` function. Write a Python function `def add_vit_config(cfg)` to solve the following problem: Add config for VIT. Here is the function: def add_vit_config(cfg): """ Add config for VIT. """ _C = cfg _C.MODEL.VIT = CN() # CoaT model name. _C.MODEL.VIT.NAME = "" # Output features from CoaT backbone. _C.MODEL.VIT.OUT_FEATURES = ["layer3", "layer5", "layer7", "layer11"] _C.MODEL.VIT.IMG_SIZE = [224, 224] _C.MODEL.VIT.POS_TYPE = "shared_rel" _C.MODEL.VIT.DROP_PATH = 0. _C.MODEL.VIT.MODEL_KWARGS = "{}" _C.SOLVER.OPTIMIZER = "ADAMW" _C.SOLVER.BACKBONE_MULTIPLIER = 1.0 _C.AUG = CN() _C.AUG.DETR = False
Add config for VIT.
182,634
import argparse import logging import os import sys import weakref from collections import OrderedDict from typing import Optional import torch from fvcore.nn.precise_bn import get_bn_modules from omegaconf import OmegaConf from torch.nn.parallel import DistributedDataParallel import detectron2.data.transforms as T from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import CfgNode, LazyConfig from detectron2.data import ( MetadataCatalog, build_detection_test_loader, build_detection_train_loader, ) from detectron2.evaluation import ( DatasetEvaluator, inference_on_dataset, print_csv_format, verify_results, ) from detectron2.modeling import build_model from detectron2.solver import build_lr_scheduler, build_optimizer from detectron2.utils import comm from detectron2.utils.collect_env import collect_env_info from detectron2.utils.env import seed_all_rng from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter from detectron2.utils.file_io import PathManager from detectron2.utils.logger import setup_logger from detectron2.engine import hooks from detectron2.engine.train_loop import AMPTrainer, SimpleTrainer, TrainerBase from .mycheckpointer import MyDetectionCheckpointer from typing import Any, Dict, List, Set import itertools from detectron2.solver.build import maybe_add_gradient_clipping from .dataset_mapper import DetrDatasetMapper from .icdar_evaluation import ICDAREvaluator from detectron2.evaluation import COCOEvaluator The provided code snippet includes necessary dependencies for implementing the `create_ddp_model` function. Write a Python function `def create_ddp_model(model, *, fp16_compression=False, **kwargs)` to solve the following problem: Create a DistributedDataParallel model if there are >1 processes. Args: model: a torch.nn.Module fp16_compression: add fp16 compression hooks to the ddp object. See more at https://pytorch.org/docs/stable/ddp_comm_hooks.html#torch.distributed.algorithms.ddp_comm_hooks.default_hooks.fp16_compress_hook kwargs: other arguments of :module:`torch.nn.parallel.DistributedDataParallel`. Here is the function: def create_ddp_model(model, *, fp16_compression=False, **kwargs): """ Create a DistributedDataParallel model if there are >1 processes. Args: model: a torch.nn.Module fp16_compression: add fp16 compression hooks to the ddp object. See more at https://pytorch.org/docs/stable/ddp_comm_hooks.html#torch.distributed.algorithms.ddp_comm_hooks.default_hooks.fp16_compress_hook kwargs: other arguments of :module:`torch.nn.parallel.DistributedDataParallel`. """ # noqa if comm.get_world_size() == 1: return model if "device_ids" not in kwargs: kwargs["device_ids"] = [comm.get_local_rank()] ddp = DistributedDataParallel(model, **kwargs) if fp16_compression: from torch.distributed.algorithms.ddp_comm_hooks import default as comm_hooks ddp.register_comm_hook(state=None, hook=comm_hooks.fp16_compress_hook) return ddp
Create a DistributedDataParallel model if there are >1 processes. Args: model: a torch.nn.Module fp16_compression: add fp16 compression hooks to the ddp object. See more at https://pytorch.org/docs/stable/ddp_comm_hooks.html#torch.distributed.algorithms.ddp_comm_hooks.default_hooks.fp16_compress_hook kwargs: other arguments of :module:`torch.nn.parallel.DistributedDataParallel`.
182,635
import argparse import logging import os import sys import weakref from collections import OrderedDict from typing import Optional import torch from fvcore.nn.precise_bn import get_bn_modules from omegaconf import OmegaConf from torch.nn.parallel import DistributedDataParallel import detectron2.data.transforms as T from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import CfgNode, LazyConfig from detectron2.data import ( MetadataCatalog, build_detection_test_loader, build_detection_train_loader, ) from detectron2.evaluation import ( DatasetEvaluator, inference_on_dataset, print_csv_format, verify_results, ) from detectron2.modeling import build_model from detectron2.solver import build_lr_scheduler, build_optimizer from detectron2.utils import comm from detectron2.utils.collect_env import collect_env_info from detectron2.utils.env import seed_all_rng from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter from detectron2.utils.file_io import PathManager from detectron2.utils.logger import setup_logger from detectron2.engine import hooks from detectron2.engine.train_loop import AMPTrainer, SimpleTrainer, TrainerBase from .mycheckpointer import MyDetectionCheckpointer from typing import Any, Dict, List, Set import itertools from detectron2.solver.build import maybe_add_gradient_clipping from .dataset_mapper import DetrDatasetMapper from .icdar_evaluation import ICDAREvaluator from detectron2.evaluation import COCOEvaluator The provided code snippet includes necessary dependencies for implementing the `default_argument_parser` function. Write a Python function `def default_argument_parser(epilog=None)` to solve the following problem: Create a parser with some common arguments used by detectron2 users. Args: epilog (str): epilog passed to ArgumentParser describing the usage. Returns: argparse.ArgumentParser: Here is the function: def default_argument_parser(epilog=None): """ Create a parser with some common arguments used by detectron2 users. Args: epilog (str): epilog passed to ArgumentParser describing the usage. Returns: argparse.ArgumentParser: """ parser = argparse.ArgumentParser( epilog=epilog or f""" Examples: Run on single machine: $ {sys.argv[0]} --num-gpus 8 --config-file cfg.yaml Change some config options: $ {sys.argv[0]} --config-file cfg.yaml MODEL.WEIGHTS /path/to/weight.pth SOLVER.BASE_LR 0.001 Run on multiple machines: (machine0)$ {sys.argv[0]} --machine-rank 0 --num-machines 2 --dist-url <URL> [--other-flags] (machine1)$ {sys.argv[0]} --machine-rank 1 --num-machines 2 --dist-url <URL> [--other-flags] """, formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file") parser.add_argument( "--resume", action="store_true", help="Whether to attempt to resume from the checkpoint directory. " "See documentation of `MyTrainer.resume_or_load()` for what it means.", ) parser.add_argument("--eval-only", action="store_true", help="perform evaluation only") parser.add_argument("--num-gpus", type=int, default=1, help="number of gpus *per machine*") parser.add_argument("--num-machines", type=int, default=1, help="total number of machines") parser.add_argument( "--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)" ) # PyTorch still may leave orphan processes in multi-gpu training. # Therefore we use a deterministic way to obtain port, # so that users are aware of orphan processes by seeing the port occupied. port = 2 ** 15 + 2 ** 14 + hash(os.getuid() if sys.platform != "win32" else 1) % 2 ** 14 parser.add_argument( "--dist-url", default="tcp://127.0.0.1:{}".format(port), help="initialization URL for pytorch distributed backend. See " "https://pytorch.org/docs/stable/distributed.html for details.", ) parser.add_argument( "opts", help=""" Modify config options at the end of the command. For Yacs configs, use space-separated "PATH.KEY VALUE" pairs. For python-based LazyConfig, use "path.key=value". """.strip(), default=None, nargs=argparse.REMAINDER, ) return parser
Create a parser with some common arguments used by detectron2 users. Args: epilog (str): epilog passed to ArgumentParser describing the usage. Returns: argparse.ArgumentParser:
182,636
import argparse import logging import os import sys import weakref from collections import OrderedDict from typing import Optional import torch from fvcore.nn.precise_bn import get_bn_modules from omegaconf import OmegaConf from torch.nn.parallel import DistributedDataParallel import detectron2.data.transforms as T from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import CfgNode, LazyConfig from detectron2.data import ( MetadataCatalog, build_detection_test_loader, build_detection_train_loader, ) from detectron2.evaluation import ( DatasetEvaluator, inference_on_dataset, print_csv_format, verify_results, ) from detectron2.modeling import build_model from detectron2.solver import build_lr_scheduler, build_optimizer from detectron2.utils import comm from detectron2.utils.collect_env import collect_env_info from detectron2.utils.env import seed_all_rng from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter from detectron2.utils.file_io import PathManager from detectron2.utils.logger import setup_logger from detectron2.engine import hooks from detectron2.engine.train_loop import AMPTrainer, SimpleTrainer, TrainerBase from .mycheckpointer import MyDetectionCheckpointer from typing import Any, Dict, List, Set import itertools from detectron2.solver.build import maybe_add_gradient_clipping from .dataset_mapper import DetrDatasetMapper from .icdar_evaluation import ICDAREvaluator from detectron2.evaluation import COCOEvaluator def _try_get_key(cfg, *keys, default=None): """ Try select keys from cfg until the first key that exists. Otherwise return default. """ if isinstance(cfg, CfgNode): cfg = OmegaConf.create(cfg.dump()) for k in keys: none = object() p = OmegaConf.select(cfg, k, default=none) if p is not none: return p return default def _highlight(code, filename): try: import pygments except ImportError: return code from pygments.lexers import Python3Lexer, YamlLexer from pygments.formatters import Terminal256Formatter lexer = Python3Lexer() if filename.endswith(".py") else YamlLexer() code = pygments.highlight(code, lexer, Terminal256Formatter(style="monokai")) return code The provided code snippet includes necessary dependencies for implementing the `default_setup` function. Write a Python function `def default_setup(cfg, args)` to solve the following problem: Perform some basic common setups at the beginning of a job, including: 1. Set up the detectron2 logger 2. Log basic information about environment, cmdline arguments, and config 3. Backup the config to the output directory Args: cfg (CfgNode or omegaconf.DictConfig): the full config to be used args (argparse.NameSpace): the command line arguments to be logged Here is the function: def default_setup(cfg, args): """ Perform some basic common setups at the beginning of a job, including: 1. Set up the detectron2 logger 2. Log basic information about environment, cmdline arguments, and config 3. Backup the config to the output directory Args: cfg (CfgNode or omegaconf.DictConfig): the full config to be used args (argparse.NameSpace): the command line arguments to be logged """ output_dir = _try_get_key(cfg, "OUTPUT_DIR", "output_dir", "train.output_dir") if comm.is_main_process() and output_dir: PathManager.mkdirs(output_dir) rank = comm.get_rank() setup_logger(output_dir, distributed_rank=rank, name="fvcore") logger = setup_logger(output_dir, distributed_rank=rank) logger.info("Rank of current process: {}. World size: {}".format(rank, comm.get_world_size())) logger.info("Environment info:\n" + collect_env_info()) logger.info("Command line arguments: " + str(args)) if hasattr(args, "config_file") and args.config_file != "": logger.info( "Contents of args.config_file={}:\n{}".format( args.config_file, _highlight(PathManager.open(args.config_file, "r").read(), args.config_file), ) ) if comm.is_main_process() and output_dir: # Note: some of our scripts may expect the existence of # config.yaml in output directory path = os.path.join(output_dir, "config.yaml") if isinstance(cfg, CfgNode): logger.info("Running with full config:\n{}".format(_highlight(cfg.dump(), ".yaml"))) with PathManager.open(path, "w") as f: f.write(cfg.dump()) else: LazyConfig.save(cfg, path) logger.info("Full config saved to {}".format(path)) # make sure each worker has a different, yet deterministic seed if specified seed = _try_get_key(cfg, "SEED", "train.seed", default=-1) seed_all_rng(None if seed < 0 else seed + rank) # cudnn benchmark has large overhead. It shouldn't be used considering the small size of # typical validation set. if not (hasattr(args, "eval_only") and args.eval_only): torch.backends.cudnn.benchmark = _try_get_key( cfg, "CUDNN_BENCHMARK", "train.cudnn_benchmark", default=False )
Perform some basic common setups at the beginning of a job, including: 1. Set up the detectron2 logger 2. Log basic information about environment, cmdline arguments, and config 3. Backup the config to the output directory Args: cfg (CfgNode or omegaconf.DictConfig): the full config to be used args (argparse.NameSpace): the command line arguments to be logged
182,637
import argparse import logging import os import sys import weakref from collections import OrderedDict from typing import Optional import torch from fvcore.nn.precise_bn import get_bn_modules from omegaconf import OmegaConf from torch.nn.parallel import DistributedDataParallel import detectron2.data.transforms as T from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import CfgNode, LazyConfig from detectron2.data import ( MetadataCatalog, build_detection_test_loader, build_detection_train_loader, ) from detectron2.evaluation import ( DatasetEvaluator, inference_on_dataset, print_csv_format, verify_results, ) from detectron2.modeling import build_model from detectron2.solver import build_lr_scheduler, build_optimizer from detectron2.utils import comm from detectron2.utils.collect_env import collect_env_info from detectron2.utils.env import seed_all_rng from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter from detectron2.utils.file_io import PathManager from detectron2.utils.logger import setup_logger from detectron2.engine import hooks from detectron2.engine.train_loop import AMPTrainer, SimpleTrainer, TrainerBase from .mycheckpointer import MyDetectionCheckpointer from typing import Any, Dict, List, Set import itertools from detectron2.solver.build import maybe_add_gradient_clipping from .dataset_mapper import DetrDatasetMapper from .icdar_evaluation import ICDAREvaluator from detectron2.evaluation import COCOEvaluator The provided code snippet includes necessary dependencies for implementing the `default_writers` function. Write a Python function `def default_writers(output_dir: str, max_iter: Optional[int] = None)` to solve the following problem: Build a list of :class:`EventWriter` to be used. It now consists of a :class:`CommonMetricPrinter`, :class:`TensorboardXWriter` and :class:`JSONWriter`. Args: output_dir: directory to store JSON metrics and tensorboard events max_iter: the total number of iterations Returns: list[EventWriter]: a list of :class:`EventWriter` objects. Here is the function: def default_writers(output_dir: str, max_iter: Optional[int] = None): """ Build a list of :class:`EventWriter` to be used. It now consists of a :class:`CommonMetricPrinter`, :class:`TensorboardXWriter` and :class:`JSONWriter`. Args: output_dir: directory to store JSON metrics and tensorboard events max_iter: the total number of iterations Returns: list[EventWriter]: a list of :class:`EventWriter` objects. """ PathManager.mkdirs(output_dir) return [ # It may not always print what you want to see, since it prints "common" metrics only. CommonMetricPrinter(max_iter), JSONWriter(os.path.join(output_dir, "metrics.json")), TensorboardXWriter(output_dir), ]
Build a list of :class:`EventWriter` to be used. It now consists of a :class:`CommonMetricPrinter`, :class:`TensorboardXWriter` and :class:`JSONWriter`. Args: output_dir: directory to store JSON metrics and tensorboard events max_iter: the total number of iterations Returns: list[EventWriter]: a list of :class:`EventWriter` objects.
182,638
from detectron2.checkpoint import DetectionCheckpointer from typing import Any import torch import torch.nn as nn from fvcore.common.checkpoint import _IncompatibleKeys, _strip_prefix_if_present, TORCH_VERSION, quantization, \ ObserverBase, FakeQuantizeBase from torch import distributed as dist from scipy import interpolate import numpy as np import torch.nn.functional as F def append_prefix(k): prefix = 'backbone.bottom_up.backbone.' return prefix + k if not k.startswith(prefix) else k def modify_ckpt_state(model, state_dict, logger=None): # reshape absolute position embedding for Swin if state_dict.get(append_prefix('absolute_pos_embed')) is not None: absolute_pos_embed = state_dict[append_prefix('absolute_pos_embed')] N1, L, C1 = absolute_pos_embed.size() N2, C2, H, W = model.backbone.bottom_up.backbone.absolute_pos_embed.size() if N1 != N2 or C1 != C2 or L != H * W: logger.warning("Error in loading absolute_pos_embed, pass") else: state_dict[append_prefix('absolute_pos_embed')] = absolute_pos_embed.view(N2, H, W, C2).permute(0, 3, 1, 2) def get_dist_info(): if dist.is_available() and dist.is_initialized(): rank = dist.get_rank() world_size = dist.get_world_size() else: rank = 0 world_size = 1 return rank, world_size rank, _ = get_dist_info() all_keys = list(state_dict.keys()) for key in all_keys: if "relative_position_index" in key: state_dict.pop(key) if "relative_position_bias_table" in key: rel_pos_bias = state_dict[key] src_num_pos, num_attn_heads = rel_pos_bias.size() if key not in model.state_dict(): continue dst_num_pos, _ = model.state_dict()[key].size() dst_patch_shape = model.backbone.bottom_up.backbone.patch_embed.patch_shape if dst_patch_shape[0] != dst_patch_shape[1]: raise NotImplementedError() num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * (dst_patch_shape[1] * 2 - 1) src_size = int((src_num_pos - num_extra_tokens) ** 0.5) dst_size = int((dst_num_pos - num_extra_tokens) ** 0.5) if src_size != dst_size: if rank == 0: print("Position interpolate for %s from %dx%d to %dx%d" % ( key, src_size, src_size, dst_size, dst_size)) extra_tokens = rel_pos_bias[-num_extra_tokens:, :] rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :] def geometric_progression(a, r, n): return a * (1.0 - r ** n) / (1.0 - r) left, right = 1.01, 1.5 while right - left > 1e-6: q = (left + right) / 2.0 gp = geometric_progression(1, q, src_size // 2) if gp > dst_size // 2: right = q else: left = q # if q > 1.13492: # q = 1.13492 dis = [] cur = 1 for i in range(src_size // 2): dis.append(cur) cur += q ** (i + 1) r_ids = [-_ for _ in reversed(dis)] x = r_ids + [0] + dis y = r_ids + [0] + dis t = dst_size // 2.0 dx = np.arange(-t, t + 0.1, 1.0) dy = np.arange(-t, t + 0.1, 1.0) if rank == 0: print("x = {}".format(x)) print("dx = {}".format(dx)) all_rel_pos_bias = [] for i in range(num_attn_heads): z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy() f = interpolate.interp2d(x, y, z, kind='cubic') all_rel_pos_bias.append( torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to(rel_pos_bias.device)) rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1) new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0) state_dict[key] = new_rel_pos_bias if append_prefix('pos_embed') in state_dict: pos_embed_checkpoint = state_dict[append_prefix('pos_embed')] embedding_size = pos_embed_checkpoint.shape[-1] num_patches = model.backbone.bottom_up.backbone.patch_embed.num_patches num_extra_tokens = model.backbone.bottom_up.backbone.pos_embed.shape[-2] - num_patches # height (== width) for the checkpoint position embedding orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) # height (== width) for the new position embedding # new_size = int(num_patches ** 0.5) new_size_w = model.backbone.bottom_up.backbone.patch_embed.num_patches_w new_size_h = model.backbone.bottom_up.backbone.patch_embed.num_patches_h # class_token and dist_token are kept unchanged if orig_size != new_size_h or orig_size != new_size_w: if rank == 0: print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size_w, new_size_h)) extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] # only the position tokens are interpolated pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) pos_tokens = torch.nn.functional.interpolate( pos_tokens, size=(new_size_w, new_size_h), mode='bicubic', align_corners=False) pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) state_dict[append_prefix('pos_embed')] = new_pos_embed # interpolate position bias table if needed relative_position_bias_table_keys = [k for k in state_dict.keys() if "relative_position_bias_table" in k] for table_key in relative_position_bias_table_keys: table_pretrained = state_dict[table_key] if table_key not in model.state_dict(): continue table_current = model.state_dict()[table_key] L1, nH1 = table_pretrained.size() L2, nH2 = table_current.size() if nH1 != nH2: logger.warning(f"Error in loading {table_key}, pass") else: if L1 != L2: S1 = int(L1 ** 0.5) S2 = int(L2 ** 0.5) table_pretrained_resized = F.interpolate( table_pretrained.permute(1, 0).view(1, nH1, S1, S1), size=(S2, S2), mode='bicubic') state_dict[table_key] = table_pretrained_resized.view(nH2, L2).permute(1, 0) if append_prefix('rel_pos_bias.relative_position_bias_table') in state_dict and \ model.backbone.bottom_up.backbone.use_rel_pos_bias and \ not model.backbone.bottom_up.backbone.use_shared_rel_pos_bias and \ append_prefix('blocks.0.attn.relative_position_bias_table') not in state_dict: logger.info("[BEIT] Expand the shared relative position embedding to each transformer block. ") num_layers = model.backbone.bottom_up.backbone.get_num_layers() rel_pos_bias = state_dict[append_prefix("rel_pos_bias.relative_position_bias_table")] for i in range(num_layers): state_dict["blocks.%d.attn.relative_position_bias_table" % i] = rel_pos_bias.clone() state_dict.pop(append_prefix("rel_pos_bias.relative_position_bias_table")) return state_dict
null
182,639
import warnings import math import torch import torch.nn as nn import torch.utils.checkpoint as checkpoint from timm.models.layers import trunc_normal_, drop_path, to_2tuple from functools import partial def _cfg(url='', **kwargs): class ViT(nn.Module): def __init__(self, model_name='vit_base_patch16_224', img_size=384, patch_size=16, in_chans=3, embed_dim=1024, depth=24, num_heads=16, num_classes=19, mlp_ratio=4., qkv_bias=True, qk_scale=None, drop_rate=0.1, attn_drop_rate=0., drop_path_rate=0., hybrid_backbone=None, norm_layer=partial(nn.LayerNorm, eps=1e-6), norm_cfg=None, pos_embed_interp=False, random_init=False, align_corners=False, use_checkpoint=False, num_extra_tokens=1, out_features=None, **kwargs, ): def fix_init_weight(self): def rescale(param, layer_id): def _init_weights(self, m): def get_num_layers(self): def no_weight_decay(self): def _conv_filter(self, state_dict, patch_size=16): def to_2D(self, x): def to_1D(self, x): def interpolate_pos_encoding(self, x, w, h): def prepare_tokens(self, x, mask=None): def forward_features(self, x): def forward(self, x): def deit_base_patch16(pretrained=False, **kwargs): model = ViT( patch_size=16, drop_rate=0., embed_dim=768, depth=12, num_heads=12, num_classes=1000, mlp_ratio=4., qkv_bias=True, use_checkpoint=True, num_extra_tokens=2, **kwargs) model.default_cfg = _cfg() return model
null
182,640
import warnings import math import torch import torch.nn as nn import torch.utils.checkpoint as checkpoint from timm.models.layers import trunc_normal_, drop_path, to_2tuple from functools import partial def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), **kwargs } class ViT(nn.Module): """ Vision Transformer with support for patch or hybrid CNN input stage """ def __init__(self, model_name='vit_base_patch16_224', img_size=384, patch_size=16, in_chans=3, embed_dim=1024, depth=24, num_heads=16, num_classes=19, mlp_ratio=4., qkv_bias=True, qk_scale=None, drop_rate=0.1, attn_drop_rate=0., drop_path_rate=0., hybrid_backbone=None, norm_layer=partial(nn.LayerNorm, eps=1e-6), norm_cfg=None, pos_embed_interp=False, random_init=False, align_corners=False, use_checkpoint=False, num_extra_tokens=1, out_features=None, **kwargs, ): super(ViT, self).__init__() self.model_name = model_name self.img_size = img_size self.patch_size = patch_size self.in_chans = in_chans self.embed_dim = embed_dim self.depth = depth self.num_heads = num_heads self.num_classes = num_classes self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.qk_scale = qk_scale self.drop_rate = drop_rate self.attn_drop_rate = attn_drop_rate self.drop_path_rate = drop_path_rate self.hybrid_backbone = hybrid_backbone self.norm_layer = norm_layer self.norm_cfg = norm_cfg self.pos_embed_interp = pos_embed_interp self.random_init = random_init self.align_corners = align_corners self.use_checkpoint = use_checkpoint self.num_extra_tokens = num_extra_tokens self.out_features = out_features self.out_indices = [int(name[5:]) for name in out_features] # self.num_stages = self.depth # self.out_indices = tuple(range(self.num_stages)) if self.hybrid_backbone is not None: self.patch_embed = HybridEmbed( self.hybrid_backbone, img_size=self.img_size, in_chans=self.in_chans, embed_dim=self.embed_dim) else: self.patch_embed = PatchEmbed( img_size=self.img_size, patch_size=self.patch_size, in_chans=self.in_chans, embed_dim=self.embed_dim) self.num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) if self.num_extra_tokens == 2: self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) self.pos_embed = nn.Parameter(torch.zeros( 1, self.num_patches + self.num_extra_tokens, self.embed_dim)) self.pos_drop = nn.Dropout(p=self.drop_rate) # self.num_extra_tokens = self.pos_embed.shape[-2] - self.num_patches dpr = [x.item() for x in torch.linspace(0, self.drop_path_rate, self.depth)] # stochastic depth decay rule self.blocks = nn.ModuleList([ Block( dim=self.embed_dim, num_heads=self.num_heads, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, qk_scale=self.qk_scale, drop=self.drop_rate, attn_drop=self.attn_drop_rate, drop_path=dpr[i], norm_layer=self.norm_layer) for i in range(self.depth)]) # NOTE as per official impl, we could have a pre-logits representation dense layer + tanh here # self.repr = nn.Linear(embed_dim, representation_size) # self.repr_act = nn.Tanh() if patch_size == 16: self.fpn1 = nn.Sequential( nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), nn.SyncBatchNorm(embed_dim), nn.GELU(), nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), ) self.fpn2 = nn.Sequential( nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), ) self.fpn3 = nn.Identity() self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2) elif patch_size == 8: self.fpn1 = nn.Sequential( nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), ) self.fpn2 = nn.Identity() self.fpn3 = nn.Sequential( nn.MaxPool2d(kernel_size=2, stride=2), ) self.fpn4 = nn.Sequential( nn.MaxPool2d(kernel_size=4, stride=4), ) trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) if self.num_extra_tokens==2: trunc_normal_(self.dist_token, std=0.2) self.apply(self._init_weights) # self.fix_init_weight() def fix_init_weight(self): def rescale(param, layer_id): param.div_(math.sqrt(2.0 * layer_id)) for layer_id, layer in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) ''' def init_weights(self): logger = get_root_logger() trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) self.apply(self._init_weights) if self.init_cfg is None: logger.warn(f'No pre-trained weights for ' f'{self.__class__.__name__}, ' f'training start from scratch') else: assert 'checkpoint' in self.init_cfg, f'Only support ' \ f'specify `Pretrained` in ' \ f'`init_cfg` in ' \ f'{self.__class__.__name__} ' logger.info(f"Will load ckpt from {self.init_cfg['checkpoint']}") load_checkpoint(self, filename=self.init_cfg['checkpoint'], strict=False, logger=logger) ''' def get_num_layers(self): return len(self.blocks) def no_weight_decay(self): return {'pos_embed', 'cls_token'} def _conv_filter(self, state_dict, patch_size=16): """ convert patch embedding weight from manual patchify + linear proj to conv""" out_dict = {} for k, v in state_dict.items(): if 'patch_embed.proj.weight' in k: v = v.reshape((v.shape[0], 3, patch_size, patch_size)) out_dict[k] = v return out_dict def to_2D(self, x): n, hw, c = x.shape h = w = int(math.sqrt(hw)) x = x.transpose(1, 2).reshape(n, c, h, w) return x def to_1D(self, x): n, c, h, w = x.shape x = x.reshape(n, c, -1).transpose(1, 2) return x def interpolate_pos_encoding(self, x, w, h): npatch = x.shape[1] - self.num_extra_tokens N = self.pos_embed.shape[1] - self.num_extra_tokens if npatch == N and w == h: return self.pos_embed class_ORdist_pos_embed = self.pos_embed[:, 0:self.num_extra_tokens] patch_pos_embed = self.pos_embed[:, self.num_extra_tokens:] dim = x.shape[-1] w0 = w // self.patch_embed.patch_size[0] h0 = h // self.patch_embed.patch_size[1] # we add a small number to avoid floating point error in the interpolation # see discussion at https://github.com/facebookresearch/dino/issues/8 w0, h0 = w0 + 0.1, h0 + 0.1 patch_pos_embed = nn.functional.interpolate( patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2), scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)), mode='bicubic', ) assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1] patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_ORdist_pos_embed, patch_pos_embed), dim=1) def prepare_tokens(self, x, mask=None): B, nc, w, h = x.shape # patch linear embedding x = self.patch_embed(x) # mask image modeling if mask is not None: x = self.mask_model(x, mask) x = x.flatten(2).transpose(1, 2) # add the [CLS] token to the embed patch tokens all_tokens = [self.cls_token.expand(B, -1, -1)] if self.num_extra_tokens == 2: dist_tokens = self.dist_token.expand(B, -1, -1) all_tokens.append(dist_tokens) all_tokens.append(x) x = torch.cat(all_tokens, dim=1) # add positional encoding to each token x = x + self.interpolate_pos_encoding(x, w, h) return self.pos_drop(x) def forward_features(self, x): # print(f"==========shape of x is {x.shape}==========") B, _, H, W = x.shape Hp, Wp = H // self.patch_size, W // self.patch_size x = self.prepare_tokens(x) features = [] for i, blk in enumerate(self.blocks): if self.use_checkpoint: x = checkpoint.checkpoint(blk, x) else: x = blk(x) if i in self.out_indices: xp = x[:, self.num_extra_tokens:, :].permute(0, 2, 1).reshape(B, -1, Hp, Wp) features.append(xp.contiguous()) ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4] for i in range(len(features)): features[i] = ops[i](features[i]) feat_out = {} for name, value in zip(self.out_features, features): feat_out[name] = value return feat_out def forward(self, x): x = self.forward_features(x) return x def mae_base_patch16(pretrained=False, **kwargs): model = ViT( patch_size=16, drop_rate=0., embed_dim=768, depth=12, num_heads=12, num_classes=1000, mlp_ratio=4., qkv_bias=True, use_checkpoint=True, num_extra_tokens=1, **kwargs) model.default_cfg = _cfg() return model
null
182,641
from collections import Iterable import numpy as np from shapely.geometry import Polygon def flatten(lis): for item in lis: if isinstance(item, Iterable) and not isinstance(item, str): for x in flatten(item): yield x else: yield item
null
182,642
from collections import Iterable import numpy as np from shapely.geometry import Polygon def compute_poly_iou(list1, list2): a1 = np.array(list1, dtype=int).reshape(-1, 2) poly1 = Polygon(a1) poly1_clean = poly1.buffer(0) a2 = np.array(list2, dtype=int).reshape(-1, 2) poly2 = Polygon(a2) poly2_clean = poly2.buffer(0) try: # iou = poly1.intersection(poly2).area / poly1.union(poly2).area iou = poly1_clean.intersection(poly2_clean).area / poly1_clean.union(poly2_clean).area except ZeroDivisionError: iou = 0 return iou
null
182,643
import os import xml.dom.minidom reg_gt_path_archival = os.path.abspath("data/test") import xml.dom.minidom from os.path import join as osj from .data_structure import * class eval: STR = "-str" REG = "-reg" DEFAULT_ENCODING = "UTF-8" # reg_gt_path = "./annotations/trackA/" # str_gt_path = "./annotations/trackB/" # reg_gt_path = os.path.abspath("data/test") # reg_gt_path_archival = os.path.abspath("data/test") # reg_gt_path_modern = os.path.abspath("data/test") # str_gt_path_1 = os.path.abspath("data/test") # str_gt_path_2 = os.path.abspath("data/test") # str_gt_path_archival = os.path.abspath("data/test") # str_gt_path_modern = os.path.abspath("data/test") # dummyDom = xml.dom.minidom.parse("./dummyXML.xml") def __init__(self, track, res_path): self.return_result = None self.reg = True self.str = False self.resultFile = res_path self.inPrefix = os.path.split(res_path)[-1].split(".")[0][:-7] if track == "-trackA": self.reg = True self.GTFile = osj(reg_gt_path, self.inPrefix + ".xml") # self.GTFile = osj(self.reg_gt_path, self.inPrefix) elif track == "-trackA1": # archival documents self.reg = True self.GTFile = osj(reg_gt_path_archival, self.inPrefix + ".xml") elif track == "-trackA2": # modern documents self.reg = True self.GTFile = osj(reg_gt_path_modern, self.inPrefix + ".xml") elif track == "-trackB1": self.str = True self.GTFile = osj(str_gt_path_1, self.inPrefix + ".xml") # self.GTFile = osj(self.str_gt_path_1, self.inPrefix) elif track == "-trackB2": self.str = True self.GTFile = osj(str_gt_path_2, self.inPrefix + ".xml") # print(self.GTFile) # self.GTFile = osj(self.str_gt_path_2, self.inPrefix) elif track == "-trackB2_a": self.str = True self.GTFile = osj(str_gt_path_archival, self.inPrefix + ".xml") elif track == "-trackB2_m": self.str = True self.GTFile = osj(str_gt_path_modern, self.inPrefix + ".xml") else: print(track) print("Not a valid track, please check your spelling.") # self.resultFile = res_path # self.inPrefix = os.path.split(res_path)[-1].split("-")[0] # if self.str: # # self.GTFile = osj(self.str_gt_path, self.inPrefix + "-str.xml") # self.GTFile = osj(self.str_gt_path, self.inPrefix + ".xml") # elif self.reg: # # self.GTFile = osj(self.reg_gt_path, self.inPrefix + "-reg.xml") # self.GTFile = osj(self.reg_gt_path, self.inPrefix + ".xml") # else: # print("Not a valid track, please check your spelling.") self.gene_ret_lst() def result(self): return self.return_result def gene_ret_lst(self): ret_lst = [] for iou in [0.6, 0.7, 0.8, 0.9]: temp = self.compute_retVal(iou) ret_lst.append(temp) # ret_lst.append(self.compute_retVal(iou)) ret_lst.append(self.inPrefix + ".xml") # ret_lst.append(self.inPrefix) # print("Done processing {}\n".format(self.resultFile)) self.return_result = ret_lst def compute_retVal(self, iou): gt_dom = xml.dom.minidom.parse(self.GTFile) # incorrect submission format handling try: result_dom = xml.dom.minidom.parse(self.resultFile) except Exception as e: # result_dom = xml.dom.minidom.parse(dummyDom) gt_tables = eval.get_table_list(gt_dom) retVal = ResultStructure(truePos=0, gtTotal=len(gt_tables), resTotal=0) return retVal # result_dom = xml.dom.minidom.parse(self.resultFile) if self.reg: ret = self.evaluate_result_reg(gt_dom, result_dom, iou) return ret if self.str: ret = self.evaluate_result_str(gt_dom, result_dom, iou) return ret def get_table_list(dom): """ return a list of Table objects corresponding to the table element of the DOM. """ return [Table(_nd) for _nd in dom.documentElement.getElementsByTagName("table")] def evaluate_result_reg(gt_dom, result_dom, iou_value): # parse the tables in input elements gt_tables = eval.get_table_list(gt_dom) result_tables = eval.get_table_list(result_dom) # duplicate result table list remaining_tables = result_tables.copy() # map the tables in gt and result file table_matches = [] # @param: table_matches - list of mapping of tables in gt and res file, in order (gt, res) for gtt in gt_tables: for rest in remaining_tables: if gtt.compute_table_iou(rest) >= iou_value: remaining_tables.remove(rest) table_matches.append((gtt, rest)) break assert len(table_matches) <= len(gt_tables) assert len(table_matches) <= len(result_tables) retVal = ResultStructure(truePos=len(table_matches), gtTotal=len(gt_tables), resTotal=len(result_tables)) return retVal def evaluate_result_str(gt_dom, result_dom, iou_value, table_iou_value=0.8): # parse the tables in input elements gt_tables = eval.get_table_list(gt_dom) result_tables = eval.get_table_list(result_dom) # duplicate result table list remaining_tables = result_tables.copy() gt_remaining = gt_tables.copy() # map the tables in gt and result file table_matches = [] # @param: table_matches - list of mapping of tables in gt and res file, in order (gt, res) for gtt in gt_remaining: for rest in remaining_tables: # note: for structural analysis, use 0.8 for table mapping if gtt.compute_table_iou(rest) >= table_iou_value: table_matches.append((gtt, rest)) remaining_tables.remove(rest) # unsafe... should be ok with the break below gt_remaining.remove(gtt) break total_gt_relation, total_res_relation, total_correct_relation = 0, 0, 0 for gt_table, ress_table in table_matches: # set up the cell mapping for matching tables cell_mapping = gt_table.find_cell_mapping(ress_table, iou_value) # set up the adj relations, convert the one for result table to a dictionary for faster searching gt_AR = gt_table.find_adj_relations() total_gt_relation += len(gt_AR) res_AR = ress_table.find_adj_relations() total_res_relation += len(res_AR) if False: # for DEBUG Table.printCellMapping(cell_mapping) Table.printAdjacencyRelationList(gt_AR, "GT") Table.printAdjacencyRelationList(res_AR, "run") # Now map GT adjacency relations to result lMappedAR = [] for ar in gt_AR: try: resFromCell = cell_mapping[ar.fromText] resToCell = cell_mapping[ar.toText] # make a mapped adjacency relation lMappedAR.append(AdjRelation(resFromCell, resToCell, ar.direction)) except: # no mapping is possible pass # compare two list of adjacency relation correct_dect = 0 for ar1 in res_AR: for ar2 in lMappedAR: if ar1.isEqual(ar2): correct_dect += 1 break total_correct_relation += correct_dect # handle gt_relations in unmatched gt table for gtt_remain in gt_remaining: total_gt_relation += len(gtt_remain.find_adj_relations()) # handle gt_relation in unmatched res table for res_remain in remaining_tables: total_res_relation += len(res_remain.find_adj_relations()) retVal = ResultStructure(truePos=total_correct_relation, gtTotal=total_gt_relation, resTotal=total_res_relation) return retVal def process_missing_files(track, gt_file_lst, cur_gt_num): if track in ["-trackA", "-trackA1", "-trackA2"]: gt_file_lst_full = [osj(reg_gt_path, filename) for filename in gt_file_lst] for file in gt_file_lst_full: if os.path.split(file)[-1].split(".")[-1] == "xml": gt_dom = xml.dom.minidom.parse(file) gt_root = gt_dom.documentElement # tables = [] table_elements = gt_root.getElementsByTagName("table") for res_table in table_elements: # t = Table(res_table) # tables.append(t) cur_gt_num += 1 return cur_gt_num elif track == "-trackB1": gt_file_lst_full = [osj(str_gt_path_1, filename) for filename in gt_file_lst] for file in gt_file_lst_full: if os.path.split(file)[-1].split(".")[-1] == "xml": gt_dom = xml.dom.minidom.parse(file) gt_root = gt_dom.documentElement tables = [] table_elements = gt_root.getElementsByTagName("table") for res_table in table_elements: t = Table(res_table) tables.append(t) for table in tables: cur_gt_num += len(table.find_adj_relations()) return cur_gt_num elif track == "-trackB2": gt_file_lst_full = [osj(str_gt_path_2, filename) for filename in gt_file_lst] for file in gt_file_lst_full: if os.path.split(file)[-1].split(".")[-1] == "xml": gt_dom = xml.dom.minidom.parse(file) gt_root = gt_dom.documentElement tables = [] table_elements = gt_root.getElementsByTagName("table") for res_table in table_elements: t = Table(res_table) tables.append(t) for table in tables: cur_gt_num += len(table.find_adj_relations()) return cur_gt_num def calc(F1): sum_a = 0.6 * F1[0] + 0.7 * F1[1] + 0.8 * F1[2] + 0.9 * F1[3] sum_b = 0.6 + 0.7 + 0.8 + 0.9 return sum_a / sum_b def calc_table_score(result_path): # measure = eval(*sys.argv[1:]) gt_file_lst = os.listdir(reg_gt_path_archival) track = "-trackA1" untar_path = result_path res_lst = [] for root, files, dirs in os.walk(untar_path): for name in dirs: if name.split(".")[-1] == "xml": cur_filepath = osj(os.path.abspath(root), name) res_lst.append(eval(track, cur_filepath)) # printing for debug # print("Processing... {}".format(name)) # print("DONE WITH FILE PROCESSING\n") # note: results are stored as list of each when iou at [0.6, 0.7, 0.8, 0.9, gt_filename] # gt number should be the same for all files gt_num = 0 correct_six, res_six = 0, 0 correct_seven, res_seven = 0, 0 correct_eight, res_eight = 0, 0 correct_nine, res_nine = 0, 0 for each_file in res_lst: # print(each_file) try: gt_file_lst.remove(each_file.result[-1]) if each_file.result[-1].replace('.xml', '.jpg') in gt_file_lst: gt_file_lst.remove(each_file.result[-1].replace('.xml', '.jpg')) correct_six += each_file.result[0].truePos gt_num += each_file.result[0].gtTotal res_six += each_file.result[0].resTotal # print("{} {} {}".format(each_file.result[0].truePos, each_file.result[0].gtTotal, each_file.result[0].resTotal)) correct_seven += each_file.result[1].truePos res_seven += each_file.result[1].resTotal correct_eight += each_file.result[2].truePos res_eight += each_file.result[2].resTotal correct_nine += each_file.result[3].truePos res_nine += each_file.result[3].resTotal except: print("Error occur in processing result list.") print(each_file.result[-1]) break # print(each_file.result[-1]) # print(each_file) # for file in gt_file_lst: # if file.split(".") != "xml": # gt_file_lst.remove(file) # # print(gt_file_lst) for i in range(len(gt_file_lst) - 1, -1, -1): if gt_file_lst[i].split(".")[-1] != "xml": del gt_file_lst[i] if len(gt_file_lst) > 0: print("\nWarning: missing result annotations for file: {}\n".format(gt_file_lst)) gt_total = process_missing_files(track, gt_file_lst, gt_num) else: gt_total = gt_num try: # print("Evaluation of {}".format(track.replace("-", ""))) # iou @ 0.6 p_six = correct_six / res_six r_six = correct_six / gt_total f1_six = 2 * p_six * r_six / (p_six + r_six) print("IOU @ 0.6 -\nprecision: {}\nrecall: {}\nf1: {}".format(p_six, r_six, f1_six)) print("correct: {}, gt: {}, res: {}\n".format(correct_six, gt_total, res_six)) # iou @ 0.7 p_seven = correct_seven / res_seven r_seven = correct_seven / gt_total f1_seven = 2 * p_seven * r_seven / (p_seven + r_seven) print("IOU @ 0.7 -\nprecision: {}\nrecall: {}\nf1: {}".format(p_seven, r_seven, f1_seven)) print("correct: {}, gt: {}, res: {}\n".format(correct_seven, gt_total, res_seven)) # iou @ 0.8 p_eight = correct_eight / res_eight r_eight = correct_eight / gt_total f1_eight = 2 * p_eight * r_eight / (p_eight + r_eight) print("IOU @ 0.8 -\nprecision: {}\nrecall: {}\nf1: {}".format(p_eight, r_eight, f1_eight)) print("correct: {}, gt: {}, res: {}\n".format(correct_eight, gt_total, res_eight)) # iou @ 0.9 p_nine = correct_nine / res_nine r_nine = correct_nine / gt_total f1_nine = 2 * p_nine * r_nine / (p_nine + r_nine) print("IOU @ 0.9 -\nprecision: {}\nrecall: {}\nf1: {}".format(p_nine, r_nine, f1_nine)) print("correct: {}, gt: {}, res: {}".format(correct_nine, gt_total, res_nine)) F1 = [f1_six, f1_seven, f1_eight, f1_nine] wF1 = calc(F1) print("Average weight F1: {}".format(wF1)) return { 'p_six':p_six * 100, "r_six":r_six * 100, "f1_six":f1_six * 100, "p_seven":p_seven * 100, "r_seven":r_seven * 100, "f1_seven":f1_seven * 100, "p_eight":p_eight * 100, "r_eight":r_eight * 100, "f1_eight":f1_eight * 100, "p_nine":p_nine * 100, "r_nine":r_nine * 100, "f1_nine":f1_nine * 100, "wF1":wF1 * 100 } except ZeroDivisionError: print( "Error: zero devision error found, (possible that no adjacency relations are found), please check the file input.") return {"wF1": 0}
null
182,644
import os import itertools import torch from typing import Any, Dict, List, Set from detectron2.data import build_detection_train_loader from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch from detectron2.evaluation import COCOEvaluator from detectron2.solver.build import maybe_add_gradient_clipping from ditod import add_vit_config from ditod import DetrDatasetMapper from detectron2.data.datasets import register_coco_instances import logging from detectron2.utils.logger import setup_logger from detectron2.utils import comm from detectron2.engine.defaults import create_ddp_model import weakref from detectron2.engine.train_loop import AMPTrainer, SimpleTrainer from ditod import MyDetectionCheckpointer, ICDAREvaluator from ditod import MyTrainer The provided code snippet includes necessary dependencies for implementing the `setup` function. Write a Python function `def setup(args)` to solve the following problem: Create configs and perform basic setups. Here is the function: def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() # add_coat_config(cfg) add_vit_config(cfg) cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() default_setup(cfg, args) return cfg
Create configs and perform basic setups.
182,645
import argparse import os import cv2 import tqdm def convert(fn): # given a file name, convert it into binary and store at the same position img = cv2.imread(fn) gim = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) gim = cv2.adaptiveThreshold(gim, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 45, 11) g3im = cv2.cvtColor(gim, cv2.COLOR_GRAY2BGR) cv2.imwrite(fn, g3im)
null
182,646
import os import os.path import random from typing import Any, Callable, cast, Dict, List, Optional, Tuple from PIL import Image from torchvision.datasets.vision import VisionDataset def has_file_allowed_extension(filename: str, extensions: Tuple[str, ...]) -> bool: """Checks if a file is an allowed extension. Args: filename (string): path to a file extensions (tuple of strings): extensions to consider (lowercase) Returns: bool: True if the filename ends with one of given extensions """ return filename.lower().endswith(extensions) IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp') The provided code snippet includes necessary dependencies for implementing the `is_image_file` function. Write a Python function `def is_image_file(filename: str) -> bool` to solve the following problem: Checks if a file is an allowed image extension. Args: filename (string): path to a file Returns: bool: True if the filename ends with a known image extension Here is the function: def is_image_file(filename: str) -> bool: """Checks if a file is an allowed image extension. Args: filename (string): path to a file Returns: bool: True if the filename ends with a known image extension """ return has_file_allowed_extension(filename, IMG_EXTENSIONS)
Checks if a file is an allowed image extension. Args: filename (string): path to a file Returns: bool: True if the filename ends with a known image extension
182,647
import os import os.path import random from typing import Any, Callable, cast, Dict, List, Optional, Tuple from PIL import Image from torchvision.datasets.vision import VisionDataset def has_file_allowed_extension(filename: str, extensions: Tuple[str, ...]) -> bool: """Checks if a file is an allowed extension. Args: filename (string): path to a file extensions (tuple of strings): extensions to consider (lowercase) Returns: bool: True if the filename ends with one of given extensions """ return filename.lower().endswith(extensions) def make_dataset( directory: str, class_to_idx: Dict[str, int], extensions: Optional[Tuple[str, ...]] = None, is_valid_file: Optional[Callable[[str], bool]] = None, ) -> List[Tuple[str, int]]: instances = [] directory = os.path.expanduser(directory) both_none = extensions is None and is_valid_file is None both_something = extensions is not None and is_valid_file is not None if both_none or both_something: raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time") if extensions is not None: def is_valid_file(x: str) -> bool: return has_file_allowed_extension(x, cast(Tuple[str, ...], extensions)) is_valid_file = cast(Callable[[str], bool], is_valid_file) for target_class in sorted(class_to_idx.keys()): class_index = class_to_idx[target_class] target_dir = os.path.join(directory, target_class) if not os.path.isdir(target_dir): continue for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)): for fname in sorted(fnames): path = os.path.join(root, fname) if is_valid_file(path): item = path, class_index instances.append(item) return instances
null
182,648
import os import os.path import random from typing import Any, Callable, cast, Dict, List, Optional, Tuple from PIL import Image from torchvision.datasets.vision import VisionDataset def pil_loader(path: str) -> Image.Image: def accimage_loader(path: str) -> Any: def default_loader(path: str) -> Any: from torchvision import get_image_backend if get_image_backend() == 'accimage': return accimage_loader(path) else: return pil_loader(path)
null
182,649
import datetime import io import os import math import time import json from collections import defaultdict, deque import datetime import numpy as np from timm.utils import get_state_dict from pathlib import Path import torch import torch.distributed as dist from torch._six import inf from torch.utils.tensorboard import SummaryWriter def setup_for_distributed(is_master): def init_distributed_mode(args): if args.dist_on_itp: args.rank = int(os.environ['OMPI_COMM_WORLD_RANK']) args.world_size = int(os.environ['OMPI_COMM_WORLD_SIZE']) args.gpu = int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK']) args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT']) os.environ['LOCAL_RANK'] = str(args.gpu) os.environ['RANK'] = str(args.rank) os.environ['WORLD_SIZE'] = str(args.world_size) # ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"] elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: args.rank = int(os.environ["RANK"]) args.world_size = int(os.environ['WORLD_SIZE']) args.gpu = int(os.environ['LOCAL_RANK']) elif 'SLURM_PROCID' in os.environ: args.rank = int(os.environ['SLURM_PROCID']) args.gpu = args.rank % torch.cuda.device_count() else: print('Not using distributed mode') args.distributed = False return args.distributed = True torch.cuda.set_device(args.gpu) args.dist_backend = 'nccl' print('| distributed init (rank {}): {}, gpu {}'.format( args.rank, args.dist_url, args.gpu), flush=True) torch.distributed.init_process_group( backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank, timeout=datetime.timedelta(0, 7200) ) torch.distributed.barrier() setup_for_distributed(args.rank == 0)
null
182,650
import datetime import io import os import math import time import json from collections import defaultdict, deque import datetime import numpy as np from timm.utils import get_state_dict from pathlib import Path import torch import torch.distributed as dist from torch._six import inf from torch.utils.tensorboard import SummaryWriter def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor: if isinstance(parameters, torch.Tensor): parameters = [parameters] parameters = [p for p in parameters if p.grad is not None] norm_type = float(norm_type) if len(parameters) == 0: return torch.tensor(0.) device = parameters[0].grad.device if norm_type == inf: total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters) else: total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type) return total_norm
null
182,651
import datetime import io import os import math import time import json from collections import defaultdict, deque import datetime import numpy as np from timm.utils import get_state_dict from pathlib import Path import torch import torch.distributed as dist from torch._six import inf from torch.utils.tensorboard import SummaryWriter def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0, start_warmup_value=0, warmup_steps=-1): warmup_schedule = np.array([]) warmup_iters = warmup_epochs * niter_per_ep if warmup_steps > 0: warmup_iters = warmup_steps print("Set warmup steps = %d" % warmup_iters) if warmup_epochs > 0: warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters) iters = np.arange(epochs * niter_per_ep - warmup_iters) schedule = np.array( [final_value + 0.5 * (base_value - final_value) * (1 + math.cos(math.pi * i / (len(iters)))) for i in iters]) schedule = np.concatenate((warmup_schedule, schedule)) # assert len(schedule) == epochs * niter_per_ep return schedule
null
182,652
import datetime import io import os import math import time import json from collections import defaultdict, deque import datetime import numpy as np from timm.utils import get_state_dict from pathlib import Path import torch import torch.distributed as dist from torch._six import inf from torch.utils.tensorboard import SummaryWriter def save_on_master(*args, **kwargs): if is_main_process(): torch.save(*args, **kwargs) def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_ema=None): output_dir = Path(args.output_dir) epoch_name = str(epoch) if loss_scaler is not None: checkpoint_paths = [output_dir / ('checkpoint-%s.pth' % epoch_name)] for checkpoint_path in checkpoint_paths: to_save = { 'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'epoch': epoch, 'scaler': loss_scaler.state_dict(), 'args': args, } if model_ema is not None: to_save['model_ema'] = get_state_dict(model_ema) save_on_master(to_save, checkpoint_path) else: client_state = {'epoch': epoch} if model_ema is not None: client_state['model_ema'] = get_state_dict(model_ema) model.save_checkpoint(save_dir=args.output_dir, tag="checkpoint-%s" % epoch_name, client_state=client_state)
null
182,653
import datetime import io import os import math import time import json from collections import defaultdict, deque import datetime import numpy as np from timm.utils import get_state_dict from pathlib import Path import torch import torch.distributed as dist from torch._six import inf from torch.utils.tensorboard import SummaryWriter def _load_checkpoint_for_ema(model_ema, checkpoint): """ Workaround for ModelEma._load_checkpoint to accept an already-loaded object """ mem_file = io.BytesIO() torch.save(checkpoint, mem_file) mem_file.seek(0) model_ema._load_checkpoint(mem_file) def load_state_dict(model, state_dict, prefix='', ignore_missing="relative_position_index"): missing_keys = [] unexpected_keys = [] error_msgs = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata def load(module, prefix=''): local_metadata = {} if metadata is None else metadata.get( prefix[:-1], {}) module._load_from_state_dict( state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + '.') load(model, prefix=prefix) warn_missing_keys = [] ignore_missing_keys = [] for key in missing_keys: keep_flag = True for ignore_key in ignore_missing.split('|'): if ignore_key in key: keep_flag = False break if keep_flag: warn_missing_keys.append(key) else: ignore_missing_keys.append(key) missing_keys = warn_missing_keys if len(missing_keys) > 0: print("Weights of {} not initialized from pretrained model: {}".format( model.__class__.__name__, missing_keys)) if len(unexpected_keys) > 0: print("Weights from pretrained model not used in {}: {}".format( model.__class__.__name__, unexpected_keys)) if len(ignore_missing_keys) > 0: print("Ignored weights of {} not initialized from pretrained model: {}".format( model.__class__.__name__, ignore_missing_keys)) if len(error_msgs) > 0: print('\n'.join(error_msgs)) def auto_load_model(args, model, model_without_ddp, optimizer, loss_scaler, model_ema=None): output_dir = Path(args.output_dir) if loss_scaler is not None: # torch.amp if args.auto_resume and len(args.resume) == 0: import glob all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*.pth')) latest_ckpt = -1 for ckpt in all_checkpoints: t = ckpt.split('-')[-1].split('.')[0] if t.isdigit(): latest_ckpt = max(int(t), latest_ckpt) if latest_ckpt >= 0: args.resume = os.path.join(output_dir, 'checkpoint-%d.pth' % latest_ckpt) print("Auto resume checkpoint: %s" % args.resume) if args.resume: if args.resume.startswith('https'): checkpoint = torch.hub.load_state_dict_from_url( args.resume, map_location='cpu', check_hash=True) else: checkpoint = torch.load(args.resume, map_location='cpu') model_without_ddp.load_state_dict(checkpoint['model']) print("Resume checkpoint %s" % args.resume) if 'optimizer' in checkpoint and 'epoch' in checkpoint: optimizer.load_state_dict(checkpoint['optimizer']) args.start_epoch = checkpoint['epoch'] + 1 if hasattr(args, 'model_ema') and args.model_ema: _load_checkpoint_for_ema(model_ema, checkpoint['model_ema']) if 'scaler' in checkpoint: loss_scaler.load_state_dict(checkpoint['scaler']) print("With optim & sched!") else: # deepspeed, only support '--auto_resume'. if args.auto_resume: import glob all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*')) latest_ckpt = -1 for ckpt in all_checkpoints: t = ckpt.split('-')[-1].split('.')[0] if t.isdigit(): latest_ckpt = max(int(t), latest_ckpt) if latest_ckpt >= 0: args.resume = os.path.join(output_dir, 'checkpoint-%d' % latest_ckpt) print("Auto resume checkpoint: %d" % latest_ckpt) _, client_states = model.load_checkpoint(args.output_dir, tag='checkpoint-%d' % latest_ckpt) args.start_epoch = client_states['epoch'] + 1 if model_ema is not None: if args.model_ema: _load_checkpoint_for_ema(model_ema, client_states['model_ema'])
null
182,654
import datetime import io import os import math import time import json from collections import defaultdict, deque import datetime import numpy as np from timm.utils import get_state_dict from pathlib import Path import torch import torch.distributed as dist from torch._six import inf from torch.utils.tensorboard import SummaryWriter def get_world_size(): if not is_dist_avail_and_initialized(): return 1 return dist.get_world_size() def create_ds_config(args): args.deepspeed_config = os.path.join(args.output_dir, "deepspeed_config.json") with open(args.deepspeed_config, mode="w") as writer: ds_config = { "train_batch_size": args.batch_size * args.update_freq * get_world_size(), "train_micro_batch_size_per_gpu": args.batch_size, "steps_per_print": 1000, "optimizer": { "type": "Adam", "adam_w_mode": True, "params": { "lr": args.lr, "weight_decay": args.weight_decay, "bias_correction": True, "betas": [ 0.9, 0.999 ], "eps": 1e-8 } }, "fp16": { "enabled": True, "loss_scale": 0, "initial_scale_power": 16, "loss_scale_window": 1000, "hysteresis": 2, "min_loss_scale": 1 }, "zero_optimization": { "stage": args.zero_stage }, "amp": { "enabled": False, "opt_level": "O2" } } if args.clip_grad is not None: ds_config.update({'gradient_clipping': args.clip_grad}) writer.write(json.dumps(ds_config, indent=2))
null
182,656
import torch from torch import optim as optim from timm.optim.adafactor import Adafactor from timm.optim.adahessian import Adahessian from timm.optim.adamp import AdamP from timm.optim.lookahead import Lookahead from timm.optim.nadam import Nadam from timm.optim.nvnovograd import NvNovoGrad from timm.optim.radam import RAdam from timm.optim.rmsprop_tf import RMSpropTF from timm.optim.sgdp import SGDP import json def get_parameter_groups(model, weight_decay=1e-5, skip_list=(), get_num_layer=None, get_layer_scale=None): parameter_group_names = {} parameter_group_vars = {} for name, param in model.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith(".bias") or name in skip_list: group_name = "no_decay" this_weight_decay = 0. else: group_name = "decay" this_weight_decay = weight_decay if get_num_layer is not None: layer_id = get_num_layer(name) group_name = "layer_%d_%s" % (layer_id, group_name) else: layer_id = None if group_name not in parameter_group_names: if get_layer_scale is not None: scale = get_layer_scale(layer_id) else: scale = 1. parameter_group_names[group_name] = { "weight_decay": this_weight_decay, "params": [], "lr_scale": scale } parameter_group_vars[group_name] = { "weight_decay": this_weight_decay, "params": [], "lr_scale": scale } parameter_group_vars[group_name]["params"].append(param) parameter_group_names[group_name]["params"].append(name) print("Param groups = %s" % json.dumps(parameter_group_names, indent=2)) return list(parameter_group_vars.values()) def create_optimizer(args, model, get_num_layer=None, get_layer_scale=None, filter_bias_and_bn=True, skip_list=None): opt_lower = args.opt.lower() weight_decay = args.weight_decay if weight_decay and filter_bias_and_bn: skip = {} if skip_list is not None: skip = skip_list elif hasattr(model, 'no_weight_decay'): skip = model.no_weight_decay() parameters = get_parameter_groups(model, weight_decay, skip, get_num_layer, get_layer_scale) weight_decay = 0. else: parameters = model.parameters() if 'fused' in opt_lower: assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers' opt_args = dict(lr=args.lr, weight_decay=weight_decay) if hasattr(args, 'opt_eps') and args.opt_eps is not None: opt_args['eps'] = args.opt_eps if hasattr(args, 'opt_betas') and args.opt_betas is not None: opt_args['betas'] = args.opt_betas opt_split = opt_lower.split('_') opt_lower = opt_split[-1] if opt_lower == 'sgd' or opt_lower == 'nesterov': opt_args.pop('eps', None) optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args) elif opt_lower == 'momentum': opt_args.pop('eps', None) optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args) elif opt_lower == 'adam': optimizer = optim.Adam(parameters, **opt_args) elif opt_lower == 'adamw': optimizer = optim.AdamW(parameters, **opt_args) elif opt_lower == 'nadam': optimizer = Nadam(parameters, **opt_args) elif opt_lower == 'radam': optimizer = RAdam(parameters, **opt_args) elif opt_lower == 'adamp': optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args) elif opt_lower == 'sgdp': optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args) elif opt_lower == 'adadelta': optimizer = optim.Adadelta(parameters, **opt_args) elif opt_lower == 'adafactor': if not args.lr: opt_args['lr'] = None optimizer = Adafactor(parameters, **opt_args) elif opt_lower == 'adahessian': optimizer = Adahessian(parameters, **opt_args) elif opt_lower == 'rmsprop': optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args) elif opt_lower == 'rmsproptf': optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args) elif opt_lower == 'novograd' or opt_lower == 'nvnovograd': optimizer = NvNovoGrad(parameters, **opt_args) elif opt_lower == 'fusedsgd': opt_args.pop('eps', None) optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=True, **opt_args) elif opt_lower == 'fusedmomentum': opt_args.pop('eps', None) optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=False, **opt_args) elif opt_lower == 'fusedadam': optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args) elif opt_lower == 'fusedadamw': optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args) elif opt_lower == 'fusedlamb': optimizer = FusedLAMB(parameters, **opt_args) elif opt_lower == 'fusednovograd': opt_args.setdefault('betas', (0.95, 0.98)) optimizer = FusedNovoGrad(parameters, **opt_args) else: assert False and "Invalid optimizer" raise ValueError if len(opt_split) > 1: if opt_split[0] == 'lookahead': optimizer = Lookahead(optimizer) return optimizer
null
182,657
import math from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.models.layers import drop_path, to_2tuple, trunc_normal_ from timm.models.registry import register_model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), **kwargs } class VisionTransformer(nn.Module): """ Vision Transformer with support for patch or hybrid CNN input stage """ def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None, use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, use_mean_pooling=True, init_scale=0.001): super().__init__() self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) # self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if use_abs_pos_emb: self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) else: self.pos_embed = None self.pos_drop = nn.Dropout(p=drop_rate) if use_shared_rel_pos_bias: self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads) else: self.rel_pos_bias = None dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.use_rel_pos_bias = use_rel_pos_bias self.blocks = nn.ModuleList([ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None) for i in range(depth)]) self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim) self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() if self.pos_embed is not None: trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) # trunc_normal_(self.mask_token, std=.02) trunc_normal_(self.head.weight, std=.02) self.apply(self._init_weights) self.fix_init_weight() self.head.weight.data.mul_(init_scale) self.head.bias.data.mul_(init_scale) def fix_init_weight(self): def rescale(param, layer_id): param.div_(math.sqrt(2.0 * layer_id)) for layer_id, layer in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def get_num_layers(self): return len(self.blocks) def no_weight_decay(self): return {'pos_embed', 'cls_token'} def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=''): self.num_classes = num_classes self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.patch_embed(x) batch_size, seq_len, _ = x.size() cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks x = torch.cat((cls_tokens, x), dim=1) if self.pos_embed is not None: x = x + self.pos_embed x = self.pos_drop(x) rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None for blk in self.blocks: x = blk(x, rel_pos_bias=rel_pos_bias) x = self.norm(x) if self.fc_norm is not None: t = x[:, 1:, :] return self.fc_norm(t.mean(1)) else: return x[:, 0] def forward(self, x): x = self.forward_features(x) x = self.head(x) return x def beit_small_patch16_224(pretrained=False, **kwargs): model = VisionTransformer( patch_size=16, embed_dim=384, depth=12, num_heads=8, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() return model
null
182,658
import math from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.models.layers import drop_path, to_2tuple, trunc_normal_ from timm.models.registry import register_model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), **kwargs } class VisionTransformer(nn.Module): """ Vision Transformer with support for patch or hybrid CNN input stage """ def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None, use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, use_mean_pooling=True, init_scale=0.001): super().__init__() self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) # self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if use_abs_pos_emb: self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) else: self.pos_embed = None self.pos_drop = nn.Dropout(p=drop_rate) if use_shared_rel_pos_bias: self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads) else: self.rel_pos_bias = None dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.use_rel_pos_bias = use_rel_pos_bias self.blocks = nn.ModuleList([ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None) for i in range(depth)]) self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim) self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() if self.pos_embed is not None: trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) # trunc_normal_(self.mask_token, std=.02) trunc_normal_(self.head.weight, std=.02) self.apply(self._init_weights) self.fix_init_weight() self.head.weight.data.mul_(init_scale) self.head.bias.data.mul_(init_scale) def fix_init_weight(self): def rescale(param, layer_id): param.div_(math.sqrt(2.0 * layer_id)) for layer_id, layer in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def get_num_layers(self): return len(self.blocks) def no_weight_decay(self): return {'pos_embed', 'cls_token'} def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=''): self.num_classes = num_classes self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.patch_embed(x) batch_size, seq_len, _ = x.size() cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks x = torch.cat((cls_tokens, x), dim=1) if self.pos_embed is not None: x = x + self.pos_embed x = self.pos_drop(x) rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None for blk in self.blocks: x = blk(x, rel_pos_bias=rel_pos_bias) x = self.norm(x) if self.fc_norm is not None: t = x[:, 1:, :] return self.fc_norm(t.mean(1)) else: return x[:, 0] def forward(self, x): x = self.forward_features(x) x = self.head(x) return x def beit_base_patch16_224(pretrained=False, **kwargs): model = VisionTransformer( patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() return model
null
182,659
import math from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.models.layers import drop_path, to_2tuple, trunc_normal_ from timm.models.registry import register_model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), **kwargs } class VisionTransformer(nn.Module): """ Vision Transformer with support for patch or hybrid CNN input stage """ def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None, use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, use_mean_pooling=True, init_scale=0.001): super().__init__() self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) # self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if use_abs_pos_emb: self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) else: self.pos_embed = None self.pos_drop = nn.Dropout(p=drop_rate) if use_shared_rel_pos_bias: self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads) else: self.rel_pos_bias = None dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.use_rel_pos_bias = use_rel_pos_bias self.blocks = nn.ModuleList([ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None) for i in range(depth)]) self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim) self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() if self.pos_embed is not None: trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) # trunc_normal_(self.mask_token, std=.02) trunc_normal_(self.head.weight, std=.02) self.apply(self._init_weights) self.fix_init_weight() self.head.weight.data.mul_(init_scale) self.head.bias.data.mul_(init_scale) def fix_init_weight(self): def rescale(param, layer_id): param.div_(math.sqrt(2.0 * layer_id)) for layer_id, layer in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def get_num_layers(self): return len(self.blocks) def no_weight_decay(self): return {'pos_embed', 'cls_token'} def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=''): self.num_classes = num_classes self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.patch_embed(x) batch_size, seq_len, _ = x.size() cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks x = torch.cat((cls_tokens, x), dim=1) if self.pos_embed is not None: x = x + self.pos_embed x = self.pos_drop(x) rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None for blk in self.blocks: x = blk(x, rel_pos_bias=rel_pos_bias) x = self.norm(x) if self.fc_norm is not None: t = x[:, 1:, :] return self.fc_norm(t.mean(1)) else: return x[:, 0] def forward(self, x): x = self.forward_features(x) x = self.head(x) return x def beit_base_patch16_384(pretrained=False, **kwargs): model = VisionTransformer( img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() return model
null
182,660
import math from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.models.layers import drop_path, to_2tuple, trunc_normal_ from timm.models.registry import register_model def _cfg(url='', **kwargs): class VisionTransformer(nn.Module): def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None, use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, use_mean_pooling=True, init_scale=0.001): def fix_init_weight(self): def rescale(param, layer_id): def _init_weights(self, m): def get_num_layers(self): def no_weight_decay(self): def get_classifier(self): def reset_classifier(self, num_classes, global_pool=''): def forward_features(self, x): def forward(self, x): def beit_large_patch16_224(pretrained=False, **kwargs): model = VisionTransformer( patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() return model
null
182,661
import math from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.models.layers import drop_path, to_2tuple, trunc_normal_ from timm.models.registry import register_model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), **kwargs } class VisionTransformer(nn.Module): """ Vision Transformer with support for patch or hybrid CNN input stage """ def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None, use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, use_mean_pooling=True, init_scale=0.001): super().__init__() self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) # self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if use_abs_pos_emb: self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) else: self.pos_embed = None self.pos_drop = nn.Dropout(p=drop_rate) if use_shared_rel_pos_bias: self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads) else: self.rel_pos_bias = None dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.use_rel_pos_bias = use_rel_pos_bias self.blocks = nn.ModuleList([ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None) for i in range(depth)]) self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim) self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() if self.pos_embed is not None: trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) # trunc_normal_(self.mask_token, std=.02) trunc_normal_(self.head.weight, std=.02) self.apply(self._init_weights) self.fix_init_weight() self.head.weight.data.mul_(init_scale) self.head.bias.data.mul_(init_scale) def fix_init_weight(self): def rescale(param, layer_id): param.div_(math.sqrt(2.0 * layer_id)) for layer_id, layer in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def get_num_layers(self): return len(self.blocks) def no_weight_decay(self): return {'pos_embed', 'cls_token'} def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=''): self.num_classes = num_classes self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.patch_embed(x) batch_size, seq_len, _ = x.size() cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks x = torch.cat((cls_tokens, x), dim=1) if self.pos_embed is not None: x = x + self.pos_embed x = self.pos_drop(x) rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None for blk in self.blocks: x = blk(x, rel_pos_bias=rel_pos_bias) x = self.norm(x) if self.fc_norm is not None: t = x[:, 1:, :] return self.fc_norm(t.mean(1)) else: return x[:, 0] def forward(self, x): x = self.forward_features(x) x = self.head(x) return x def beit_large_patch16_384(pretrained=False, **kwargs): model = VisionTransformer( img_size=384, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() return model
null
182,662
import math from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.models.layers import drop_path, to_2tuple, trunc_normal_ from timm.models.registry import register_model def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), **kwargs } class VisionTransformer(nn.Module): """ Vision Transformer with support for patch or hybrid CNN input stage """ def __init__(self, img_size=224, patch_size=16, in_chans=3, num_classes=1000, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm, init_values=None, use_abs_pos_emb=True, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, use_mean_pooling=True, init_scale=0.001): super().__init__() self.num_classes = num_classes self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models self.patch_embed = PatchEmbed( img_size=img_size, patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim) num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) # self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if use_abs_pos_emb: self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) else: self.pos_embed = None self.pos_drop = nn.Dropout(p=drop_rate) if use_shared_rel_pos_bias: self.rel_pos_bias = RelativePositionBias(window_size=self.patch_embed.patch_shape, num_heads=num_heads) else: self.rel_pos_bias = None dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule self.use_rel_pos_bias = use_rel_pos_bias self.blocks = nn.ModuleList([ Block( dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer, init_values=init_values, window_size=self.patch_embed.patch_shape if use_rel_pos_bias else None) for i in range(depth)]) self.norm = nn.Identity() if use_mean_pooling else norm_layer(embed_dim) self.fc_norm = norm_layer(embed_dim) if use_mean_pooling else None self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity() if self.pos_embed is not None: trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) # trunc_normal_(self.mask_token, std=.02) trunc_normal_(self.head.weight, std=.02) self.apply(self._init_weights) self.fix_init_weight() self.head.weight.data.mul_(init_scale) self.head.bias.data.mul_(init_scale) def fix_init_weight(self): def rescale(param, layer_id): param.div_(math.sqrt(2.0 * layer_id)) for layer_id, layer in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) def get_num_layers(self): return len(self.blocks) def no_weight_decay(self): return {'pos_embed', 'cls_token'} def get_classifier(self): return self.head def reset_classifier(self, num_classes, global_pool=''): self.num_classes = num_classes self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.patch_embed(x) batch_size, seq_len, _ = x.size() cls_tokens = self.cls_token.expand(batch_size, -1, -1) # stole cls_tokens impl from Phil Wang, thanks x = torch.cat((cls_tokens, x), dim=1) if self.pos_embed is not None: x = x + self.pos_embed x = self.pos_drop(x) rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None for blk in self.blocks: x = blk(x, rel_pos_bias=rel_pos_bias) x = self.norm(x) if self.fc_norm is not None: t = x[:, 1:, :] return self.fc_norm(t.mean(1)) else: return x[:, 0] def forward(self, x): x = self.forward_features(x) x = self.head(x) return x def beit_large_patch16_512(pretrained=False, **kwargs): model = VisionTransformer( img_size=512, patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), **kwargs) model.default_cfg = _cfg() return model
null
182,663
import argparse import datetime import numpy as np import time import torch import torch.backends.cudnn as cudnn import json import os from pathlib import Path from timm.data.mixup import Mixup from timm.models import create_model from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy from timm.utils import ModelEma from optim_factory import create_optimizer, get_parameter_groups, LayerDecayValueAssigner import webdataset as wds from datasets import build_dataset from engine_for_finetuning import train_one_epoch, evaluate from utils import NativeScalerWithGradNormCount as NativeScaler import utils from scipy import interpolate def get_args(): parser = argparse.ArgumentParser('BEiT fine-tuning and evaluation script for image classification', add_help=False) parser.add_argument('--batch_size', default=64, type=int) parser.add_argument('--epochs', default=30, type=int) parser.add_argument('--update_freq', default=1, type=int) parser.add_argument('--save_ckpt_freq', default=5, type=int) parser.add_argument('--eval_freq', default=5, type=int) # Model parameters parser.add_argument('--model', default='deit_base_patch16_224', type=str, metavar='MODEL', help='Name of model to train') parser.add_argument('--rel_pos_bias', action='store_true') parser.add_argument('--disable_rel_pos_bias', action='store_false', dest='rel_pos_bias') parser.set_defaults(rel_pos_bias=True) parser.add_argument('--abs_pos_emb', action='store_true') parser.add_argument('--qkv_bias', action='store_true') parser.add_argument('--layer_scale_init_value', default=0.1, type=float, help="0.1 for base, 1e-5 for large. set 0 to disable layer scale") parser.add_argument('--input_size', default=224, type=int, help='images input size') parser.add_argument('--drop', type=float, default=0.0, metavar='PCT', help='Dropout rate (default: 0.)') parser.add_argument('--attn_drop_rate', type=float, default=0.0, metavar='PCT', help='Attention dropout rate (default: 0.)') parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT', help='Drop path rate (default: 0.1)') parser.add_argument('--disable_eval_during_finetuning', action='store_true', default=False) parser.add_argument('--model_ema', action='store_true', default=False) parser.add_argument('--model_ema_decay', type=float, default=0.9999, help='') parser.add_argument('--model_ema_force_cpu', action='store_true', default=False, help='') # Optimizer parameters parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER', help='Optimizer (default: "adamw"') parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON', help='Optimizer Epsilon (default: 1e-8)') parser.add_argument('--opt_betas', default=None, type=float, nargs='+', metavar='BETA', help='Optimizer Betas (default: None, use opt default)') parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)') parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='SGD momentum (default: 0.9)') parser.add_argument('--weight_decay', type=float, default=0.05, help='weight decay (default: 0.05)') parser.add_argument('--weight_decay_end', type=float, default=None, help="""Final value of the weight decay. We use a cosine schedule for WD and using a larger decay by the end of training improves performance for ViTs.""") parser.add_argument('--lr', type=float, default=5e-4, metavar='LR', help='learning rate (default: 5e-4)') parser.add_argument('--layer_decay', type=float, default=0.9) parser.add_argument('--warmup_lr', type=float, default=1e-6, metavar='LR', help='warmup learning rate (default: 1e-6)') parser.add_argument('--min_lr', type=float, default=1e-6, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0 (1e-5)') parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N', help='epochs to warmup LR, if scheduler supports') parser.add_argument('--warmup_steps', type=int, default=-1, metavar='N', help='num of steps to warmup LR, will overload warmup_epochs if set > 0') # Augmentation parameters parser.add_argument('--color_jitter', type=float, default=0.4, metavar='PCT', help='Color jitter factor (default: 0.4)') parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME', help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)'), parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)') parser.add_argument('--train_interpolation', type=str, default='bicubic', help='Training interpolation (random, bilinear, bicubic default: "bicubic")') # Evaluation parameters parser.add_argument('--crop_pct', type=float, default=None) # * Random Erase params parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT', help='Random erase prob (default: 0.25)') parser.add_argument('--remode', type=str, default='pixel', help='Random erase mode (default: "pixel")') parser.add_argument('--recount', type=int, default=1, help='Random erase count (default: 1)') parser.add_argument('--resplit', action='store_true', default=False, help='Do not random erase first (clean) augmentation split') # * Mixup params parser.add_argument('--mixup', type=float, default=0, help='mixup alpha, mixup enabled if > 0.') parser.add_argument('--cutmix', type=float, default=0, help='cutmix alpha, cutmix enabled if > 0.') parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None, help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)') parser.add_argument('--mixup_prob', type=float, default=1.0, help='Probability of performing mixup or cutmix when either/both is enabled') parser.add_argument('--mixup_switch_prob', type=float, default=0.5, help='Probability of switching to cutmix when both mixup and cutmix enabled') parser.add_argument('--mixup_mode', type=str, default='batch', help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"') # * Finetuning params parser.add_argument('--finetune', default='', help='finetune from checkpoint') parser.add_argument('--model_key', default='model|module', type=str) parser.add_argument('--model_prefix', default='', type=str) parser.add_argument('--init_scale', default=0.001, type=float) parser.add_argument('--use_mean_pooling', action='store_true') parser.set_defaults(use_mean_pooling=True) parser.add_argument('--use_cls', action='store_false', dest='use_mean_pooling') parser.add_argument('--disable_weight_decay_on_rel_pos_bias', action='store_true', default=False) # Dataset parameters parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str, help='dataset path') parser.add_argument('--eval_data_path', default=None, type=str, help='dataset path for evaluation') parser.add_argument('--nb_classes', default=0, type=int, help='number of the classification types') parser.add_argument('--imagenet_default_mean_and_std', default=False, action='store_true') parser.add_argument('--data_set', default='IMNET', choices=['CIFAR', 'IMNET', 'image_folder', "rvlcdip", "rvlcdip_wds"], type=str, help='ImageNet dataset path') parser.add_argument('--output_dir', default='', help='path where to save, empty for no saving') parser.add_argument('--log_dir', default=None, help='path where to tensorboard log') parser.add_argument('--device', default='cuda', help='device to use for training / testing') parser.add_argument('--seed', default=0, type=int) parser.add_argument('--resume', default='', help='resume from checkpoint') parser.add_argument('--auto_resume', action='store_true') parser.add_argument('--no_auto_resume', action='store_false', dest='auto_resume') parser.set_defaults(auto_resume=True) parser.add_argument('--save_ckpt', action='store_true') parser.add_argument('--no_save_ckpt', action='store_false', dest='save_ckpt') parser.set_defaults(save_ckpt=True) parser.add_argument('--start_epoch', default=0, type=int, metavar='N', help='start epoch') parser.add_argument('--eval', action='store_true', help='Perform evaluation only') parser.add_argument('--dist_eval', action='store_true', default=False, help='Enabling distributed evaluation') parser.add_argument('--num_workers', default=10, type=int) parser.add_argument('--pin_mem', action='store_true', help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem') parser.set_defaults(pin_mem=True) # distributed training parameters parser.add_argument('--world_size', default=1, type=int, help='number of distributed processes') parser.add_argument('--local_rank', default=-1, type=int) parser.add_argument('--dist_on_itp', action='store_true') parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') parser.add_argument('--enable_deepspeed', action='store_true', default=False) parser.add_argument('--zero_stage', default=0, type=int, help='ZeRO optimizer stage (default: 0)') known_args, _ = parser.parse_known_args() if known_args.enable_deepspeed: try: import deepspeed from deepspeed import DeepSpeedConfig parser = deepspeed.add_config_arguments(parser) ds_init = deepspeed.initialize except: print("Please 'pip install deepspeed==0.4.0'") exit(0) else: ds_init = None return parser.parse_args(), ds_init
null
182,664
from timm.data import create_transform from timm.data.constants import \ IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.data.transforms import str_to_interp_mode from torchvision import transforms from dataset_folder import RvlcdipImageFolder def build_transform(is_train, args): resize_im = args.input_size > 32 imagenet_default_mean_and_std = args.imagenet_default_mean_and_std mean = IMAGENET_INCEPTION_MEAN if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_MEAN std = IMAGENET_INCEPTION_STD if not imagenet_default_mean_and_std else IMAGENET_DEFAULT_STD if is_train: # this should always dispatch to transforms_imagenet_train transform = create_transform( input_size=args.input_size, is_training=True, color_jitter=args.color_jitter, auto_augment=args.aa, interpolation=args.train_interpolation, re_prob=args.reprob, re_mode=args.remode, re_count=args.recount, mean=mean, std=std, ) if not resize_im: # replace RandomResizedCropAndInterpolation with # RandomCrop transform.transforms[0] = transforms.RandomCrop( args.input_size, padding=4) return transform t = [] if resize_im: if args.crop_pct is None: if args.input_size < 384: args.crop_pct = 224 / 256 else: args.crop_pct = 1.0 size = int(args.input_size / args.crop_pct) t.append( transforms.Resize(size, interpolation=str_to_interp_mode("bicubic")), # to maintain same ratio w.r.t. 224 images ) t.append(transforms.CenterCrop(args.input_size)) t.append(transforms.ToTensor()) t.append(transforms.Normalize(mean, std)) return transforms.Compose(t) class RvlcdipImageFolder(RvlcdipDatasetFolder): def __init__( self, root: str, transform: Optional[Callable] = None, target_transform: Optional[Callable] = None, loader: Callable[[str], Any] = default_loader, split: str = None, dataset_size: Optional[int] = None ): super().__init__(root, loader, IMG_EXTENSIONS if split is None else None, transform=transform, target_transform=target_transform, split=split, dataset_size=dataset_size) self.imgs = self.samples def build_dataset(is_train, args): transform = build_transform(is_train, args) print("Transform = ") if isinstance(transform, tuple): for trans in transform: print(" - - - - - - - - - - ") for t in trans.transforms: print(t) else: for t in transform.transforms: print(t) print("---------------------------") if args.data_set == 'rvlcdip': root = args.data_path if is_train else args.eval_data_path split = "train" if is_train else "test" dataset = RvlcdipImageFolder(root, split=split, transform=transform) nb_classes = args.nb_classes assert len(dataset.class_to_idx) == nb_classes else: raise NotImplementedError() assert nb_classes == args.nb_classes print("Number of the class = %d" % args.nb_classes) return dataset, nb_classes
null
182,665
import math import sys from typing import Iterable, Optional import torch from timm.data import Mixup from timm.utils import accuracy, ModelEma import utils def train_class_batch(model, samples, target, criterion): outputs = model(samples) if not isinstance(outputs, torch.Tensor): # assume that the model outputs a tuple of [outputs, outputs_kd] outputs, outputs_kd = outputs loss = criterion(outputs, target) return loss, outputs def get_loss_scale_for_deepspeed(model): optimizer = model.optimizer return optimizer.loss_scale if hasattr(optimizer, "loss_scale") else optimizer.cur_scale def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, data_loader: Iterable, optimizer: torch.optim.Optimizer, device: torch.device, epoch: int, loss_scaler, max_norm: float = 0, model_ema: Optional[ModelEma] = None, mixup_fn: Optional[Mixup] = None, log_writer=None, start_steps=None, lr_schedule_values=None, wd_schedule_values=None, num_training_steps_per_epoch=None, update_freq=None): model.train(True) metric_logger = utils.MetricLogger(delimiter=" ") metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) header = 'Epoch: [{}]'.format(epoch) print_freq = 10 if loss_scaler is None: model.zero_grad() model.micro_steps = 0 else: optimizer.zero_grad() for data_iter_step, (samples, targets) in enumerate(metric_logger.log_every(data_loader, print_freq, header)): step = data_iter_step // update_freq if step >= num_training_steps_per_epoch: continue it = start_steps + step # global training iteration # Update LR & WD for the first acc if lr_schedule_values is not None or wd_schedule_values is not None and data_iter_step % update_freq == 0: for i, param_group in enumerate(optimizer.param_groups): if lr_schedule_values is not None: param_group["lr"] = lr_schedule_values[it] * param_group["lr_scale"] if wd_schedule_values is not None and param_group["weight_decay"] > 0: param_group["weight_decay"] = wd_schedule_values[it] samples = samples.to(device, non_blocking=True) targets = targets.to(device, non_blocking=True) if mixup_fn is not None: samples, targets = mixup_fn(samples, targets) if loss_scaler is None: samples = samples.half() loss, output = train_class_batch( model, samples, targets, criterion) else: with torch.cuda.amp.autocast(): loss, output = train_class_batch( model, samples, targets, criterion) loss_value = loss.item() if not math.isfinite(loss_value): print("Loss is {}, stopping training".format(loss_value)) sys.exit(1) if loss_scaler is None: loss /= update_freq model.backward(loss) model.step() if (data_iter_step + 1) % update_freq == 0: # model.zero_grad() # Deepspeed will call step() & model.zero_grad() automatic if model_ema is not None: model_ema.update(model) grad_norm = None loss_scale_value = get_loss_scale_for_deepspeed(model) else: # this attribute is added by timm on one optimizer (adahessian) is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order loss /= update_freq grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm, parameters=model.parameters(), create_graph=is_second_order, update_grad=(data_iter_step + 1) % update_freq == 0) if (data_iter_step + 1) % update_freq == 0: optimizer.zero_grad() if model_ema is not None: model_ema.update(model) loss_scale_value = loss_scaler.state_dict()["scale"] torch.cuda.synchronize() if mixup_fn is None: class_acc = (output.max(-1)[-1] == targets).float().mean() else: class_acc = None metric_logger.update(loss=loss_value) metric_logger.update(class_acc=class_acc) metric_logger.update(loss_scale=loss_scale_value) min_lr = 10. max_lr = 0. for group in optimizer.param_groups: min_lr = min(min_lr, group["lr"]) max_lr = max(max_lr, group["lr"]) metric_logger.update(lr=max_lr) metric_logger.update(min_lr=min_lr) weight_decay_value = None for group in optimizer.param_groups: if group["weight_decay"] > 0: weight_decay_value = group["weight_decay"] metric_logger.update(weight_decay=weight_decay_value) metric_logger.update(grad_norm=grad_norm) if log_writer is not None: log_writer.update(loss=loss_value, head="loss") log_writer.update(class_acc=class_acc, head="loss") log_writer.update(loss_scale=loss_scale_value, head="opt") log_writer.update(lr=max_lr, head="opt") log_writer.update(min_lr=min_lr, head="opt") log_writer.update(weight_decay=weight_decay_value, head="opt") log_writer.update(grad_norm=grad_norm, head="opt") log_writer.set_step() # gather the stats from all processes metric_logger.synchronize_between_processes() print("Averaged stats:", metric_logger) return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
null
182,667
import warnings import math import torch from functools import partial import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as checkpoint from timm.models.layers import drop_path, to_2tuple, trunc_normal_ def _cfg(url='', **kwargs): class BEiT(nn.Module): def __init__(self, img_size=[224, 224], patch_size=16, in_chans=3, num_classes=80, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., hybrid_backbone=None, norm_layer=None, init_values=None, use_abs_pos_emb=False, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, use_checkpoint=True, pretrained=None, out_features=None, ): def fix_init_weight(self): def rescale(param, layer_id): def _init_weights(self, m): def get_num_layers(self): def no_weight_decay(self): def forward_features(self, x): def forward(self, x): def beit_base_patch16(pretrained=False, **kwargs): model = BEiT( patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), init_values=None, **kwargs) model.default_cfg = _cfg() return model
null
182,668
import warnings import math import torch from functools import partial import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as checkpoint from timm.models.layers import drop_path, to_2tuple, trunc_normal_ def _cfg(url='', **kwargs): class BEiT(nn.Module): def __init__(self, img_size=[224, 224], patch_size=16, in_chans=3, num_classes=80, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., hybrid_backbone=None, norm_layer=None, init_values=None, use_abs_pos_emb=False, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, use_checkpoint=True, pretrained=None, out_features=None, ): def fix_init_weight(self): def rescale(param, layer_id): def _init_weights(self, m): def get_num_layers(self): def no_weight_decay(self): def forward_features(self, x): def forward(self, x): def beit_large_patch16(pretrained=False, **kwargs): model = BEiT( patch_size=16, embed_dim=1024, depth=24, num_heads=16, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), init_values=None, **kwargs) model.default_cfg = _cfg() return model
null
182,669
import warnings import math import torch from functools import partial import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as checkpoint from timm.models.layers import drop_path, to_2tuple, trunc_normal_ def _cfg(url='', **kwargs): class BEiT(nn.Module): def __init__(self, img_size=[224, 224], patch_size=16, in_chans=3, num_classes=80, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0., hybrid_backbone=None, norm_layer=None, init_values=None, use_abs_pos_emb=False, use_rel_pos_bias=False, use_shared_rel_pos_bias=False, use_checkpoint=True, pretrained=None, out_features=None, ): def fix_init_weight(self): def rescale(param, layer_id): def _init_weights(self, m): def get_num_layers(self): def no_weight_decay(self): def forward_features(self, x): def forward(self, x): def dit_base_patch16(pretrained=False, **kwargs): model = BEiT( patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4, qkv_bias=True, norm_layer=partial(nn.LayerNorm, eps=1e-6), init_values=0.1, **kwargs) model.default_cfg = _cfg() return model
null
182,681
import argparse import logging import os import sys import weakref from collections import OrderedDict from typing import Optional import torch from fvcore.nn.precise_bn import get_bn_modules from omegaconf import OmegaConf from torch.nn.parallel import DistributedDataParallel import detectron2.data.transforms as T from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import CfgNode, LazyConfig from detectron2.data import ( MetadataCatalog, build_detection_test_loader, build_detection_train_loader, ) from detectron2.evaluation import ( DatasetEvaluator, inference_on_dataset, print_csv_format, verify_results, ) from detectron2.modeling import build_model from detectron2.solver import build_lr_scheduler, build_optimizer from detectron2.utils import comm from detectron2.utils.collect_env import collect_env_info from detectron2.utils.env import seed_all_rng from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter from detectron2.utils.file_io import PathManager from detectron2.utils.logger import setup_logger from detectron2.engine import hooks from detectron2.engine.train_loop import AMPTrainer, SimpleTrainer, TrainerBase from .mycheckpointer import MyDetectionCheckpointer from typing import Any, Dict, List, Set import itertools from detectron2.solver.build import maybe_add_gradient_clipping from .dataset_mapper import DetrDatasetMapper from .funsd_evaluation import FUNSDEvaluator The provided code snippet includes necessary dependencies for implementing the `create_ddp_model` function. Write a Python function `def create_ddp_model(model, *, fp16_compression=False, **kwargs)` to solve the following problem: Create a DistributedDataParallel model if there are >1 processes. Args: model: a torch.nn.Module fp16_compression: add fp16 compression hooks to the ddp object. See more at https://pytorch.org/docs/stable/ddp_comm_hooks.html#torch.distributed.algorithms.ddp_comm_hooks.default_hooks.fp16_compress_hook kwargs: other arguments of :module:`torch.nn.parallel.DistributedDataParallel`. Here is the function: def create_ddp_model(model, *, fp16_compression=False, **kwargs): """ Create a DistributedDataParallel model if there are >1 processes. Args: model: a torch.nn.Module fp16_compression: add fp16 compression hooks to the ddp object. See more at https://pytorch.org/docs/stable/ddp_comm_hooks.html#torch.distributed.algorithms.ddp_comm_hooks.default_hooks.fp16_compress_hook kwargs: other arguments of :module:`torch.nn.parallel.DistributedDataParallel`. """ # noqa if comm.get_world_size() == 1: return model if "device_ids" not in kwargs: kwargs["device_ids"] = [comm.get_local_rank()] ddp = DistributedDataParallel(model, **kwargs) if fp16_compression: from torch.distributed.algorithms.ddp_comm_hooks import default as comm_hooks ddp.register_comm_hook(state=None, hook=comm_hooks.fp16_compress_hook) return ddp
Create a DistributedDataParallel model if there are >1 processes. Args: model: a torch.nn.Module fp16_compression: add fp16 compression hooks to the ddp object. See more at https://pytorch.org/docs/stable/ddp_comm_hooks.html#torch.distributed.algorithms.ddp_comm_hooks.default_hooks.fp16_compress_hook kwargs: other arguments of :module:`torch.nn.parallel.DistributedDataParallel`.
182,682
import argparse import logging import os import sys import weakref from collections import OrderedDict from typing import Optional import torch from fvcore.nn.precise_bn import get_bn_modules from omegaconf import OmegaConf from torch.nn.parallel import DistributedDataParallel import detectron2.data.transforms as T from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import CfgNode, LazyConfig from detectron2.data import ( MetadataCatalog, build_detection_test_loader, build_detection_train_loader, ) from detectron2.evaluation import ( DatasetEvaluator, inference_on_dataset, print_csv_format, verify_results, ) from detectron2.modeling import build_model from detectron2.solver import build_lr_scheduler, build_optimizer from detectron2.utils import comm from detectron2.utils.collect_env import collect_env_info from detectron2.utils.env import seed_all_rng from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter from detectron2.utils.file_io import PathManager from detectron2.utils.logger import setup_logger from detectron2.engine import hooks from detectron2.engine.train_loop import AMPTrainer, SimpleTrainer, TrainerBase from .mycheckpointer import MyDetectionCheckpointer from typing import Any, Dict, List, Set import itertools from detectron2.solver.build import maybe_add_gradient_clipping from .dataset_mapper import DetrDatasetMapper from .funsd_evaluation import FUNSDEvaluator The provided code snippet includes necessary dependencies for implementing the `default_argument_parser` function. Write a Python function `def default_argument_parser(epilog=None)` to solve the following problem: Create a parser with some common arguments used by detectron2 users. Args: epilog (str): epilog passed to ArgumentParser describing the usage. Returns: argparse.ArgumentParser: Here is the function: def default_argument_parser(epilog=None): """ Create a parser with some common arguments used by detectron2 users. Args: epilog (str): epilog passed to ArgumentParser describing the usage. Returns: argparse.ArgumentParser: """ parser = argparse.ArgumentParser( epilog=epilog or f""" Examples: Run on single machine: $ {sys.argv[0]} --num-gpus 8 --config-file cfg.yaml Change some config options: $ {sys.argv[0]} --config-file cfg.yaml MODEL.WEIGHTS /path/to/weight.pth SOLVER.BASE_LR 0.001 Run on multiple machines: (machine0)$ {sys.argv[0]} --machine-rank 0 --num-machines 2 --dist-url <URL> [--other-flags] (machine1)$ {sys.argv[0]} --machine-rank 1 --num-machines 2 --dist-url <URL> [--other-flags] """, formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file") parser.add_argument( "--resume", action="store_true", help="Whether to attempt to resume from the checkpoint directory. " "See documentation of `MyTrainer.resume_or_load()` for what it means.", ) parser.add_argument("--eval-only", action="store_true", help="perform evaluation only") parser.add_argument("--num-gpus", type=int, default=1, help="number of gpus *per machine*") parser.add_argument("--num-machines", type=int, default=1, help="total number of machines") parser.add_argument( "--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)" ) # PyTorch still may leave orphan processes in multi-gpu training. # Therefore we use a deterministic way to obtain port, # so that users are aware of orphan processes by seeing the port occupied. port = 2 ** 15 + 2 ** 14 + hash(os.getuid() if sys.platform != "win32" else 1) % 2 ** 14 parser.add_argument( "--dist-url", default="tcp://127.0.0.1:{}".format(port), help="initialization URL for pytorch distributed backend. See " "https://pytorch.org/docs/stable/distributed.html for details.", ) parser.add_argument( "opts", help=""" Modify config options at the end of the command. For Yacs configs, use space-separated "PATH.KEY VALUE" pairs. For python-based LazyConfig, use "path.key=value". """.strip(), default=None, nargs=argparse.REMAINDER, ) return parser
Create a parser with some common arguments used by detectron2 users. Args: epilog (str): epilog passed to ArgumentParser describing the usage. Returns: argparse.ArgumentParser:
182,683
import argparse import logging import os import sys import weakref from collections import OrderedDict from typing import Optional import torch from fvcore.nn.precise_bn import get_bn_modules from omegaconf import OmegaConf from torch.nn.parallel import DistributedDataParallel import detectron2.data.transforms as T from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import CfgNode, LazyConfig from detectron2.data import ( MetadataCatalog, build_detection_test_loader, build_detection_train_loader, ) from detectron2.evaluation import ( DatasetEvaluator, inference_on_dataset, print_csv_format, verify_results, ) from detectron2.modeling import build_model from detectron2.solver import build_lr_scheduler, build_optimizer from detectron2.utils import comm from detectron2.utils.collect_env import collect_env_info from detectron2.utils.env import seed_all_rng from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter from detectron2.utils.file_io import PathManager from detectron2.utils.logger import setup_logger from detectron2.engine import hooks from detectron2.engine.train_loop import AMPTrainer, SimpleTrainer, TrainerBase from .mycheckpointer import MyDetectionCheckpointer from typing import Any, Dict, List, Set import itertools from detectron2.solver.build import maybe_add_gradient_clipping from .dataset_mapper import DetrDatasetMapper from .funsd_evaluation import FUNSDEvaluator def _try_get_key(cfg, *keys, default=None): """ Try select keys from cfg until the first key that exists. Otherwise return default. """ if isinstance(cfg, CfgNode): cfg = OmegaConf.create(cfg.dump()) for k in keys: none = object() p = OmegaConf.select(cfg, k, default=none) if p is not none: return p return default def _highlight(code, filename): try: import pygments except ImportError: return code from pygments.lexers import Python3Lexer, YamlLexer from pygments.formatters import Terminal256Formatter lexer = Python3Lexer() if filename.endswith(".py") else YamlLexer() code = pygments.highlight(code, lexer, Terminal256Formatter(style="monokai")) return code The provided code snippet includes necessary dependencies for implementing the `default_setup` function. Write a Python function `def default_setup(cfg, args)` to solve the following problem: Perform some basic common setups at the beginning of a job, including: 1. Set up the detectron2 logger 2. Log basic information about environment, cmdline arguments, and config 3. Backup the config to the output directory Args: cfg (CfgNode or omegaconf.DictConfig): the full config to be used args (argparse.NameSpace): the command line arguments to be logged Here is the function: def default_setup(cfg, args): """ Perform some basic common setups at the beginning of a job, including: 1. Set up the detectron2 logger 2. Log basic information about environment, cmdline arguments, and config 3. Backup the config to the output directory Args: cfg (CfgNode or omegaconf.DictConfig): the full config to be used args (argparse.NameSpace): the command line arguments to be logged """ output_dir = _try_get_key(cfg, "OUTPUT_DIR", "output_dir", "train.output_dir") if comm.is_main_process() and output_dir: PathManager.mkdirs(output_dir) rank = comm.get_rank() setup_logger(output_dir, distributed_rank=rank, name="fvcore") logger = setup_logger(output_dir, distributed_rank=rank) logger.info("Rank of current process: {}. World size: {}".format(rank, comm.get_world_size())) logger.info("Environment info:\n" + collect_env_info()) logger.info("Command line arguments: " + str(args)) if hasattr(args, "config_file") and args.config_file != "": logger.info( "Contents of args.config_file={}:\n{}".format( args.config_file, _highlight(PathManager.open(args.config_file, "r").read(), args.config_file), ) ) if comm.is_main_process() and output_dir: # Note: some of our scripts may expect the existence of # config.yaml in output directory path = os.path.join(output_dir, "config.yaml") if isinstance(cfg, CfgNode): logger.info("Running with full config:\n{}".format(_highlight(cfg.dump(), ".yaml"))) with PathManager.open(path, "w") as f: f.write(cfg.dump()) else: LazyConfig.save(cfg, path) logger.info("Full config saved to {}".format(path)) # make sure each worker has a different, yet deterministic seed if specified seed = _try_get_key(cfg, "SEED", "train.seed", default=-1) seed_all_rng(None if seed < 0 else seed + rank) # cudnn benchmark has large overhead. It shouldn't be used considering the small size of # typical validation set. if not (hasattr(args, "eval_only") and args.eval_only): torch.backends.cudnn.benchmark = _try_get_key( cfg, "CUDNN_BENCHMARK", "train.cudnn_benchmark", default=False )
Perform some basic common setups at the beginning of a job, including: 1. Set up the detectron2 logger 2. Log basic information about environment, cmdline arguments, and config 3. Backup the config to the output directory Args: cfg (CfgNode or omegaconf.DictConfig): the full config to be used args (argparse.NameSpace): the command line arguments to be logged
182,684
import argparse import logging import os import sys import weakref from collections import OrderedDict from typing import Optional import torch from fvcore.nn.precise_bn import get_bn_modules from omegaconf import OmegaConf from torch.nn.parallel import DistributedDataParallel import detectron2.data.transforms as T from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import CfgNode, LazyConfig from detectron2.data import ( MetadataCatalog, build_detection_test_loader, build_detection_train_loader, ) from detectron2.evaluation import ( DatasetEvaluator, inference_on_dataset, print_csv_format, verify_results, ) from detectron2.modeling import build_model from detectron2.solver import build_lr_scheduler, build_optimizer from detectron2.utils import comm from detectron2.utils.collect_env import collect_env_info from detectron2.utils.env import seed_all_rng from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter from detectron2.utils.file_io import PathManager from detectron2.utils.logger import setup_logger from detectron2.engine import hooks from detectron2.engine.train_loop import AMPTrainer, SimpleTrainer, TrainerBase from .mycheckpointer import MyDetectionCheckpointer from typing import Any, Dict, List, Set import itertools from detectron2.solver.build import maybe_add_gradient_clipping from .dataset_mapper import DetrDatasetMapper from .funsd_evaluation import FUNSDEvaluator The provided code snippet includes necessary dependencies for implementing the `default_writers` function. Write a Python function `def default_writers(output_dir: str, max_iter: Optional[int] = None)` to solve the following problem: Build a list of :class:`EventWriter` to be used. It now consists of a :class:`CommonMetricPrinter`, :class:`TensorboardXWriter` and :class:`JSONWriter`. Args: output_dir: directory to store JSON metrics and tensorboard events max_iter: the total number of iterations Returns: list[EventWriter]: a list of :class:`EventWriter` objects. Here is the function: def default_writers(output_dir: str, max_iter: Optional[int] = None): """ Build a list of :class:`EventWriter` to be used. It now consists of a :class:`CommonMetricPrinter`, :class:`TensorboardXWriter` and :class:`JSONWriter`. Args: output_dir: directory to store JSON metrics and tensorboard events max_iter: the total number of iterations Returns: list[EventWriter]: a list of :class:`EventWriter` objects. """ PathManager.mkdirs(output_dir) return [ # It may not always print what you want to see, since it prints "common" metrics only. CommonMetricPrinter(max_iter), JSONWriter(os.path.join(output_dir, "metrics.json")), TensorboardXWriter(output_dir), ]
Build a list of :class:`EventWriter` to be used. It now consists of a :class:`CommonMetricPrinter`, :class:`TensorboardXWriter` and :class:`JSONWriter`. Args: output_dir: directory to store JSON metrics and tensorboard events max_iter: the total number of iterations Returns: list[EventWriter]: a list of :class:`EventWriter` objects.
182,685
from detectron2.checkpoint import DetectionCheckpointer from typing import Any import torch import torch.nn as nn from fvcore.common.checkpoint import _IncompatibleKeys, _strip_prefix_if_present, TORCH_VERSION from torch import distributed as dist from scipy import interpolate import numpy as np import torch.nn.functional as F def append_prefix(k): prefix = 'backbone.bottom_up.backbone.' # return prefix + k if not k.startswith(prefix) else k return k def modify_ckpt_state(model, state_dict, logger=None): # reshape absolute position embedding for Swin if state_dict.get(append_prefix('absolute_pos_embed')) is not None: absolute_pos_embed = state_dict[append_prefix('absolute_pos_embed')] N1, L, C1 = absolute_pos_embed.size() N2, C2, H, W = model.backbone.bottom_up.backbone.absolute_pos_embed.size() if N1 != N2 or C1 != C2 or L != H * W: logger.warning("Error in loading absolute_pos_embed, pass") else: state_dict[append_prefix('absolute_pos_embed')] = absolute_pos_embed.view(N2, H, W, C2).permute(0, 3, 1, 2) def get_dist_info(): if dist.is_available() and dist.is_initialized(): rank = dist.get_rank() world_size = dist.get_world_size() else: rank = 0 world_size = 1 return rank, world_size rank, _ = get_dist_info() all_keys = list(state_dict.keys()) for key in all_keys: if "relative_position_index" in key: state_dict.pop(key) if "relative_position_bias_table" in key: rel_pos_bias = state_dict[key] src_num_pos, num_attn_heads = rel_pos_bias.size() if key not in model.state_dict(): continue dst_num_pos, _ = model.state_dict()[key].size() dst_patch_shape = model.backbone.bottom_up.backbone.patch_embed.patch_shape if dst_patch_shape[0] != dst_patch_shape[1]: raise NotImplementedError() num_extra_tokens = dst_num_pos - (dst_patch_shape[0] * 2 - 1) * (dst_patch_shape[1] * 2 - 1) src_size = int((src_num_pos - num_extra_tokens) ** 0.5) dst_size = int((dst_num_pos - num_extra_tokens) ** 0.5) if src_size != dst_size: if rank == 0: print("Position interpolate for %s from %dx%d to %dx%d" % ( key, src_size, src_size, dst_size, dst_size)) extra_tokens = rel_pos_bias[-num_extra_tokens:, :] rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :] def geometric_progression(a, r, n): return a * (1.0 - r ** n) / (1.0 - r) left, right = 1.01, 1.5 while right - left > 1e-6: q = (left + right) / 2.0 gp = geometric_progression(1, q, src_size // 2) if gp > dst_size // 2: right = q else: left = q # if q > 1.13492: # q = 1.13492 dis = [] cur = 1 for i in range(src_size // 2): dis.append(cur) cur += q ** (i + 1) r_ids = [-_ for _ in reversed(dis)] x = r_ids + [0] + dis y = r_ids + [0] + dis t = dst_size // 2.0 dx = np.arange(-t, t + 0.1, 1.0) dy = np.arange(-t, t + 0.1, 1.0) if rank == 0: print("x = {}".format(x)) print("dx = {}".format(dx)) all_rel_pos_bias = [] for i in range(num_attn_heads): z = rel_pos_bias[:, i].view(src_size, src_size).float().numpy() f = interpolate.interp2d(x, y, z, kind='cubic') all_rel_pos_bias.append( torch.Tensor(f(dx, dy)).contiguous().view(-1, 1).to(rel_pos_bias.device)) rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1) new_rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0) state_dict[key] = new_rel_pos_bias if append_prefix('pos_embed') in state_dict: pos_embed_checkpoint = state_dict[append_prefix('pos_embed')] embedding_size = pos_embed_checkpoint.shape[-1] num_patches = model.backbone.bottom_up.backbone.patch_embed.num_patches num_extra_tokens = model.backbone.bottom_up.backbone.pos_embed.shape[-2] - num_patches # height (== width) for the checkpoint position embedding orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5) # height (== width) for the new position embedding # new_size = int(num_patches ** 0.5) new_size_w = model.backbone.bottom_up.backbone.patch_embed.num_patches_w new_size_h = model.backbone.bottom_up.backbone.patch_embed.num_patches_h # class_token and dist_token are kept unchanged if orig_size != new_size_h or orig_size != new_size_w: if rank == 0: print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size_w, new_size_h)) extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens] # only the position tokens are interpolated pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:] pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2) pos_tokens = torch.nn.functional.interpolate( pos_tokens, size=(new_size_w, new_size_h), mode='bicubic', align_corners=False) pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2) new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1) state_dict[append_prefix('pos_embed')] = new_pos_embed # interpolate position bias table if needed relative_position_bias_table_keys = [k for k in state_dict.keys() if "relative_position_bias_table" in k] for table_key in relative_position_bias_table_keys: table_pretrained = state_dict[table_key] if table_key not in model.state_dict(): continue table_current = model.state_dict()[table_key] L1, nH1 = table_pretrained.size() L2, nH2 = table_current.size() if nH1 != nH2: logger.warning(f"Error in loading {table_key}, pass") else: if L1 != L2: S1 = int(L1 ** 0.5) S2 = int(L2 ** 0.5) table_pretrained_resized = F.interpolate( table_pretrained.permute(1, 0).view(1, nH1, S1, S1), size=(S2, S2), mode='bicubic') state_dict[table_key] = table_pretrained_resized.view(nH2, L2).permute(1, 0) if append_prefix('rel_pos_bias.relative_position_bias_table') in state_dict and \ model.backbone.bottom_up.backbone.use_rel_pos_bias and \ not model.backbone.bottom_up.backbone.use_shared_rel_pos_bias and \ append_prefix('blocks.0.attn.relative_position_bias_table') not in state_dict: logger.info("[BEIT] Expand the shared relative position embedding to each transformer block. ") num_layers = model.backbone.bottom_up.backbone.get_num_layers() rel_pos_bias = state_dict[append_prefix("rel_pos_bias.relative_position_bias_table")] for i in range(num_layers): state_dict["blocks.%d.attn.relative_position_bias_table" % i] = rel_pos_bias.clone() state_dict.pop(append_prefix("rel_pos_bias.relative_position_bias_table")) return state_dict
null
182,686
import warnings import math import torch import torch.nn as nn import torch.utils.checkpoint as checkpoint from timm.models.layers import trunc_normal_, drop_path, to_2tuple from functools import partial def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), **kwargs } class ViT(nn.Module): """ Vision Transformer with support for patch or hybrid CNN input stage """ def __init__(self, model_name='vit_base_patch16_224', img_size=384, patch_size=16, in_chans=3, embed_dim=1024, depth=24, num_heads=16, num_classes=19, mlp_ratio=4., qkv_bias=True, qk_scale=None, drop_rate=0.1, attn_drop_rate=0., drop_path_rate=0., hybrid_backbone=None, norm_layer=partial(nn.LayerNorm, eps=1e-6), norm_cfg=None, pos_embed_interp=False, random_init=False, align_corners=False, use_checkpoint=False, num_extra_tokens=1, out_features=None, **kwargs, ): super(ViT, self).__init__() self.model_name = model_name self.img_size = img_size self.patch_size = patch_size self.in_chans = in_chans self.embed_dim = embed_dim self.depth = depth self.num_heads = num_heads self.num_classes = num_classes self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.qk_scale = qk_scale self.drop_rate = drop_rate self.attn_drop_rate = attn_drop_rate self.drop_path_rate = drop_path_rate self.hybrid_backbone = hybrid_backbone self.norm_layer = norm_layer self.norm_cfg = norm_cfg self.pos_embed_interp = pos_embed_interp self.random_init = random_init self.align_corners = align_corners self.use_checkpoint = use_checkpoint self.num_extra_tokens = num_extra_tokens self.out_features = out_features self.out_indices = [int(name[5:]) for name in out_features] # self.num_stages = self.depth # self.out_indices = tuple(range(self.num_stages)) if self.hybrid_backbone is not None: self.patch_embed = HybridEmbed( self.hybrid_backbone, img_size=self.img_size, in_chans=self.in_chans, embed_dim=self.embed_dim) else: self.patch_embed = PatchEmbed( img_size=self.img_size, patch_size=self.patch_size, in_chans=self.in_chans, embed_dim=self.embed_dim) self.num_patches = self.patch_embed.num_patches self.cls_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) if self.num_extra_tokens == 2: self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) self.pos_embed = nn.Parameter(torch.zeros( 1, self.num_patches + self.num_extra_tokens, self.embed_dim)) self.pos_drop = nn.Dropout(p=self.drop_rate) # self.num_extra_tokens = self.pos_embed.shape[-2] - self.num_patches dpr = [x.item() for x in torch.linspace(0, self.drop_path_rate, self.depth)] # stochastic depth decay rule self.blocks = nn.ModuleList([ Block( dim=self.embed_dim, num_heads=self.num_heads, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, qk_scale=self.qk_scale, drop=self.drop_rate, attn_drop=self.attn_drop_rate, drop_path=dpr[i], norm_layer=self.norm_layer) for i in range(self.depth)]) # NOTE as per official impl, we could have a pre-logits representation dense layer + tanh here # self.repr = nn.Linear(embed_dim, representation_size) # self.repr_act = nn.Tanh() if patch_size == 16: self.fpn1 = nn.Sequential( nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), nn.SyncBatchNorm(embed_dim), nn.GELU(), nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), ) self.fpn2 = nn.Sequential( nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), ) self.fpn3 = nn.Identity() self.fpn4 = nn.MaxPool2d(kernel_size=2, stride=2) elif patch_size == 8: self.fpn1 = nn.Sequential( nn.ConvTranspose2d(embed_dim, embed_dim, kernel_size=2, stride=2), ) self.fpn2 = nn.Identity() self.fpn3 = nn.Sequential( nn.MaxPool2d(kernel_size=2, stride=2), ) self.fpn4 = nn.Sequential( nn.MaxPool2d(kernel_size=4, stride=4), ) trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) if self.num_extra_tokens==2: trunc_normal_(self.dist_token, std=0.2) self.apply(self._init_weights) # self.fix_init_weight() def fix_init_weight(self): def rescale(param, layer_id): param.div_(math.sqrt(2.0 * layer_id)) for layer_id, layer in enumerate(self.blocks): rescale(layer.attn.proj.weight.data, layer_id + 1) rescale(layer.mlp.fc2.weight.data, layer_id + 1) def _init_weights(self, m): if isinstance(m, nn.Linear): trunc_normal_(m.weight, std=.02) if isinstance(m, nn.Linear) and m.bias is not None: nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) nn.init.constant_(m.weight, 1.0) ''' def init_weights(self): logger = get_root_logger() trunc_normal_(self.pos_embed, std=.02) trunc_normal_(self.cls_token, std=.02) self.apply(self._init_weights) if self.init_cfg is None: logger.warn(f'No pre-trained weights for ' f'{self.__class__.__name__}, ' f'training start from scratch') else: assert 'checkpoint' in self.init_cfg, f'Only support ' \ f'specify `Pretrained` in ' \ f'`init_cfg` in ' \ f'{self.__class__.__name__} ' logger.info(f"Will load ckpt from {self.init_cfg['checkpoint']}") load_checkpoint(self, filename=self.init_cfg['checkpoint'], strict=False, logger=logger) ''' def get_num_layers(self): return len(self.blocks) def no_weight_decay(self): return {'pos_embed', 'cls_token'} def _conv_filter(self, state_dict, patch_size=16): """ convert patch embedding weight from manual patchify + linear proj to conv""" out_dict = {} for k, v in state_dict.items(): if 'patch_embed.proj.weight' in k: v = v.reshape((v.shape[0], 3, patch_size, patch_size)) out_dict[k] = v return out_dict def to_2D(self, x): n, hw, c = x.shape h = w = int(math.sqrt(hw)) x = x.transpose(1, 2).reshape(n, c, h, w) return x def to_1D(self, x): n, c, h, w = x.shape x = x.reshape(n, c, -1).transpose(1, 2) return x def interpolate_pos_encoding(self, x, w, h): npatch = x.shape[1] - self.num_extra_tokens N = self.pos_embed.shape[1] - self.num_extra_tokens if npatch == N and w == h: return self.pos_embed class_ORdist_pos_embed = self.pos_embed[:, 0:self.num_extra_tokens] patch_pos_embed = self.pos_embed[:, self.num_extra_tokens:] dim = x.shape[-1] w0 = w // self.patch_embed.patch_size[0] h0 = h // self.patch_embed.patch_size[1] # we add a small number to avoid floating point error in the interpolation # see discussion at https://github.com/facebookresearch/dino/issues/8 w0, h0 = w0 + 0.1, h0 + 0.1 patch_pos_embed = nn.functional.interpolate( patch_pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2), scale_factor=(w0 / math.sqrt(N), h0 / math.sqrt(N)), mode='bicubic', ) assert int(w0) == patch_pos_embed.shape[-2] and int(h0) == patch_pos_embed.shape[-1] patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_ORdist_pos_embed, patch_pos_embed), dim=1) def prepare_tokens(self, x, mask=None): B, nc, w, h = x.shape # patch linear embedding x = self.patch_embed(x) # mask image modeling if mask is not None: x = self.mask_model(x, mask) x = x.flatten(2).transpose(1, 2) # add the [CLS] token to the embed patch tokens all_tokens = [self.cls_token.expand(B, -1, -1)] if self.num_extra_tokens == 2: dist_tokens = self.dist_token.expand(B, -1, -1) all_tokens.append(dist_tokens) all_tokens.append(x) x = torch.cat(all_tokens, dim=1) # add positional encoding to each token x = x + self.interpolate_pos_encoding(x, w, h) return self.pos_drop(x) def forward_features(self, x): # print(f"==========shape of x is {x.shape}==========") B, _, H, W = x.shape Hp, Wp = H // self.patch_size, W // self.patch_size x = self.prepare_tokens(x) features = [] for i, blk in enumerate(self.blocks): if self.use_checkpoint: x = checkpoint.checkpoint(blk, x) else: x = blk(x) if i in self.out_indices: xp = x[:, self.num_extra_tokens:, :].permute(0, 2, 1).reshape(B, -1, Hp, Wp) features.append(xp.contiguous()) ops = [self.fpn1, self.fpn2, self.fpn3, self.fpn4] for i in range(len(features)): features[i] = ops[i](features[i]) feat_out = {} for name, value in zip(self.out_features, features): feat_out[name] = value return feat_out def forward(self, x): x = self.forward_features(x) return x def deit_base_patch16(pretrained=False, **kwargs): model = ViT( patch_size=16, drop_rate=0., embed_dim=768, depth=12, num_heads=12, num_classes=1000, mlp_ratio=4., qkv_bias=True, use_checkpoint=True, num_extra_tokens=2, **kwargs) model.default_cfg = _cfg() return model
null
182,688
from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.engine import default_argument_parser, default_setup, launch from detectron2.data.datasets import register_coco_instances from ditod import MyTrainer, add_vit_config The provided code snippet includes necessary dependencies for implementing the `setup` function. Write a Python function `def setup(args)` to solve the following problem: Create configs and perform basic setups. Here is the function: def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() # add_coat_config(cfg) add_vit_config(cfg) cfg.merge_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() default_setup(cfg, args) return cfg
Create configs and perform basic setups.
182,692
import json import hashlib import io import os import base64 from PIL import Image from tqdm import tqdm def calculate_md5(image): md5_hash = hashlib.md5() with io.BytesIO() as output: image.save(output, format='JPEG') image_data = output.getvalue() md5_hash.update(image_data) return md5_hash.hexdigest() def process_files(directory): tsv_data = [] for file in tqdm(os.listdir(directory)): if file.endswith('.json'): json_path = os.path.join(directory, file) jpg_path = os.path.join(directory, file.replace('.json', '.jpg')) with open(json_path, 'r') as json_file: data = json.load(json_file) image = Image.open(jpg_path) md5 = calculate_md5(image) caption = data['caption'] width = data['width'] height = data['height'] with io.BytesIO() as buffer: image.save(buffer, format='JPEG') image_base64 = base64.b64encode(buffer.getvalue()).decode("utf-8") combined_data_str = {'phrase': data['noun_chunks'], 'expression_v1': data['ref_exps']} tsv_row = [md5, caption, image_base64, width, height, combined_data_str] tsv_data.append('\t'.join(map(str, tsv_row))) return tsv_data
null
182,693
import json import hashlib import io import os import base64 from PIL import Image from tqdm import tqdm def write_tsv(tsv_data, output_file): with open(output_file, 'w') as file: file.write('\n'.join(tsv_data))
null
182,694
import json import os from glob import glob def grit(): json_files = glob(f"/path/to/grit/*.tsv") source_files = [] for json_file_name in json_files: basename = os.path.basename(json_file_name) source_files.append(f"../grit/{basename}") file_list = { "source": source_files, "source_lang": "grit", "weight": 1.0, "name": "grit" } with open("/path/to/dataset_config/json/train.json", "w") as file_list_file: json.dump([file_list], file_list_file, indent=4)
null
182,695
import json import os import requests from urllib.parse import urlparse from requests.exceptions import HTTPError import sys from pathlib import Path import textwrap import ast import os import numpy as np from PIL import Image import matplotlib.pyplot as plt import matplotlib.pylab as pylab import cv2 import base64 import io def vis_image(json_obj, output_folder): url = json_obj['url'] try: response = requests.get(url) response.raise_for_status() file_name = os.path.basename(urlparse(url).path) # output_path = os.path.join(output_folder, file_name) file_key_name = json_obj['key'] + os.path.splitext(file_name)[1] output_path = os.path.join(output_folder, file_key_name) except Exception as e: print(f"Error while downloading {url}: {e}") return with open(output_path, 'wb') as file: file.write(response.content) try: pil_img = Image.open(output_path).convert("RGB") except: return image = np.array(pil_img)[:, :, [2, 1, 0]] image_h = pil_img.height image_w = pil_img.width caption = json_obj['caption'] def is_overlapping(rect1, rect2): x1, y1, x2, y2 = rect1 x3, y3, x4, y4 = rect2 return not (x2 < x3 or x1 > x4 or y2 < y3 or y1 > y4) grounding_list = json_obj['ref_exps'] new_image = image.copy() previous_locations = [] previous_bboxes = [] text_offset = 10 text_offset_original = 4 text_size = max(0.07 * min(image_h, image_w) / 100, 0.5) text_line = int(max(1 * min(image_h, image_w) / 512, 1)) box_line = int(max(2 * min(image_h, image_w) / 512, 2)) text_height = text_offset # init # pdb.set_trace() for (phrase_s, phrase_e, x1_norm, y1_norm, x2_norm, y2_norm, score) in grounding_list: phrase = caption[phrase_s:phrase_e] x1, y1, x2, y2 = int(x1_norm * image_w), int(y1_norm * image_h), int(x2_norm * image_w), int(y2_norm * image_h) print(f"Decode results: {phrase} - {[x1, y1, x2, y2]}") # draw bbox # random color color = tuple(np.random.randint(0, 255, size=3).tolist()) new_image = cv2.rectangle(new_image, (x1, y1), (x2, y2), color, box_line) # add phrase name # decide the text location first for x_prev, y_prev in previous_locations: if abs(x1 - x_prev) < abs(text_offset) and abs(y1 - y_prev) < abs(text_offset): y1 += text_height if y1 < 2 * text_offset: y1 += text_offset + text_offset_original # add text background (text_width, text_height), _ = cv2.getTextSize(phrase, cv2.FONT_HERSHEY_SIMPLEX, text_size, text_line) text_bg_x1, text_bg_y1, text_bg_x2, text_bg_y2 = x1, y1 - text_height - text_offset_original, x1 + text_width, y1 for prev_bbox in previous_bboxes: while is_overlapping((text_bg_x1, text_bg_y1, text_bg_x2, text_bg_y2), prev_bbox): text_bg_y1 += text_offset text_bg_y2 += text_offset y1 += text_offset if text_bg_y2 >= image_h: text_bg_y1 = max(0, image_h - text_height - text_offset_original) text_bg_y2 = image_h y1 = max(0, image_h - text_height - text_offset_original + text_offset) break alpha = 0.5 for i in range(text_bg_y1, text_bg_y2): for j in range(text_bg_x1, text_bg_x2): if i < image_h and j < image_w: new_image[i, j] = (alpha * new_image[i, j] + (1 - alpha) * np.array(color)).astype(np.uint8) cv2.putText( new_image, phrase, (x1, y1 - text_offset_original), cv2.FONT_HERSHEY_SIMPLEX, text_size, (0, 0, 0), text_line, cv2.LINE_AA ) previous_locations.append((x1, y1)) previous_bboxes.append((text_bg_x1, text_bg_y1, text_bg_x2, text_bg_y2)) try: file_key_name = json_obj['key'] + '_exp' + os.path.splitext(file_name)[1] output_path = os.path.join(output_folder, file_key_name) imshow(new_image, file_name= output_path, caption=caption) except: # Out of (supported formats: eps, jpeg, jpg, pdf, pgf, png, ps, raw, rgba, svg, svgz, tif, tiff, webp) return def download_images_from_jsonl(jsonl_path, output_folder): with open(jsonl_path, 'r') as jsonl_file: for line in jsonl_file: json_obj = json.loads(line) url = json_obj['url'] # download_image(url, output_folder) vis_image(json_obj, output_folder)
null
182,696
import json import os import requests from urllib.parse import urlparse from requests.exceptions import HTTPError import sys from pathlib import Path import textwrap import ast import os import numpy as np from PIL import Image import matplotlib.pyplot as plt import matplotlib.pylab as pylab import cv2 import base64 import io def download_image(url, output_folder): try: response = requests.get(url) response.raise_for_status() except HTTPError as e: print(f"Error while downloading {url}: {e}") return file_name = os.path.basename(urlparse(url).path) output_path = os.path.join(output_folder, file_name) with open(output_path, 'wb') as file: file.write(response.content)
null
182,719
import ast import json import logging import math import os import random import sys import time from dataclasses import dataclass from multiprocessing import Value import braceexpand import numpy as np import pandas as pd import torch import torchvision.datasets as datasets import webdataset as wds from PIL import Image from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler, IterableDataset, get_worker_info from torch.utils.data.distributed import DistributedSampler from webdataset.filters import _shuffle from webdataset.tariterators import base_plus_ext, url_opener, tar_file_expander, valid_sample from open_clip import tokenize def get_imagenet(args, preprocess_fns, split): def get_dataset_fn(data_path, dataset_type): def get_data(args, preprocess_fns, epoch=0): preprocess_train, preprocess_val = preprocess_fns data = {} if args.train_data: data["train"] = get_dataset_fn(args.train_data, args.dataset_type)( args, preprocess_train, is_train=True, epoch=epoch) if args.val_data: data["val"] = get_dataset_fn(args.val_data, args.dataset_type)( args, preprocess_val, is_train=False) if args.imagenet_val is not None: data["imagenet-val"] = get_imagenet(args, preprocess_fns, "val") if args.imagenet_v2 is not None: data["imagenet-v2"] = get_imagenet(args, preprocess_fns, "v2") return data
null
182,724
import unilm import argparse import logging import math import os import sys from typing import Any, Callable, Dict, List, Optional, Tuple logger = logging.getLogger("fairseq_cli.train") import numpy as np import torch from omegaconf import DictConfig, OmegaConf from fairseq import checkpoint_utils, options, quantization_utils, tasks, utils from fairseq.data import data_utils, iterators from fairseq.data.plasma_utils import PlasmaStore from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.distributed import fsdp_enable_wrap, fsdp_wrap from fairseq.distributed import utils as distributed_utils from fairseq.file_io import PathManager from fairseq.logging import meters, metrics, progress_bar from fairseq.model_parallel.megatron_trainer import MegatronTrainer from fairseq.trainer import Trainer def _flatten_config(cfg: DictConfig): config = OmegaConf.to_container(cfg) # remove any legacy Namespaces and replace with a single "args" namespace = None for k, v in list(config.items()): if isinstance(v, argparse.Namespace): namespace = v del config[k] if namespace is not None: config["args"] = vars(namespace) return config def validate_and_save( cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr, valid_subsets: List[str], end_of_epoch: bool, ) -> Tuple[List[Optional[float]], bool]: num_updates = trainer.get_num_updates() max_update = cfg.optimization.max_update or math.inf # Stopping conditions (and an additional one based on validation loss later # on) should_stop = False if num_updates >= max_update: should_stop = True logger.info( f"Stopping training due to " f"num_updates: {num_updates} >= max_update: {max_update}" ) training_time_hours = trainer.cumulative_training_time() / (60 * 60) if ( cfg.optimization.stop_time_hours > 0 and training_time_hours > cfg.optimization.stop_time_hours ): should_stop = True logger.info( f"Stopping training due to " f"cumulative_training_time: {training_time_hours} > " f"stop_time_hours: {cfg.optimization.stop_time_hours} hour(s)" ) do_save = ( (end_of_epoch and epoch_itr.epoch % cfg.checkpoint.save_interval == 0) or should_stop or ( cfg.checkpoint.save_interval_updates > 0 and num_updates > 0 and num_updates % cfg.checkpoint.save_interval_updates == 0 and num_updates >= cfg.dataset.validate_after_updates ) ) do_validate = ( ( (not end_of_epoch and do_save) # validate during mid-epoch saves or (end_of_epoch and epoch_itr.epoch % cfg.dataset.validate_interval == 0) or should_stop or ( cfg.dataset.validate_interval_updates > 0 and num_updates > 0 and num_updates % cfg.dataset.validate_interval_updates == 0 ) ) and not cfg.dataset.disable_validation and num_updates >= cfg.dataset.validate_after_updates ) # Validate valid_losses = [None] if do_validate: valid_losses = validate(cfg, trainer, task, epoch_itr, valid_subsets) should_stop |= should_stop_early(cfg, valid_losses[0]) return valid_losses, should_stop def get_training_stats(stats: Dict[str, Any]) -> Dict[str, Any]: stats["wall"] = round(metrics.get_meter("default", "wall").elapsed_time, 0) return stats def progress_bar( iterator, log_format: Optional[str] = None, log_interval: int = 100, log_file: Optional[str] = None, epoch: Optional[int] = None, prefix: Optional[str] = None, tensorboard_logdir: Optional[str] = None, default_log_format: str = "tqdm", wandb_project: Optional[str] = None, wandb_run_name: Optional[str] = None, azureml_logging: Optional[bool] = False, ): if log_format is None: log_format = default_log_format if log_file is not None: handler = logging.FileHandler(filename=log_file) logger.addHandler(handler) if log_format == "tqdm" and not sys.stderr.isatty(): log_format = "simple" if log_format == "json": bar = JsonProgressBar(iterator, epoch, prefix, log_interval) elif log_format == "none": bar = NoopProgressBar(iterator, epoch, prefix) elif log_format == "simple": bar = SimpleProgressBar(iterator, epoch, prefix, log_interval) elif log_format == "tqdm": bar = TqdmProgressBar(iterator, epoch, prefix) else: raise ValueError("Unknown log format: {}".format(log_format)) if tensorboard_logdir: try: # [FB only] custom wrapper for TensorBoard import palaas # noqa from .fb_tbmf_wrapper import FbTbmfWrapper bar = FbTbmfWrapper(bar, log_interval) except ImportError: bar = TensorboardProgressBarWrapper(bar, tensorboard_logdir) if wandb_project: bar = WandBProgressBarWrapper(bar, wandb_project, run_name=wandb_run_name) if azureml_logging: bar = AzureMLProgressBarWrapper(bar) return bar class Trainer(object): """Main class for data parallel training. This class supports synchronous distributed data parallel training, where multiple workers each have a full model replica and gradients are accumulated across workers before each update. We use :class:`~torch.nn.parallel.DistributedDataParallel` to handle communication of the gradients across workers. """ def __init__(self, cfg: FairseqConfig, task, model, criterion, quantizer=None): if isinstance(cfg, Namespace): logger.warning( "argparse.Namespace configuration is deprecated! Automatically converting to OmegaConf" ) cfg = convert_namespace_to_omegaconf(cfg) self.cfg = cfg self.task = task # catalog shared parameters shared_params = _catalog_shared_params(model) self.tpu = cfg.common.tpu self.cuda = torch.cuda.is_available() and not cfg.common.cpu and not self.tpu if self.cuda: self.device = torch.device("cuda") elif self.tpu: self.device = utils.get_tpu_device() else: self.device = torch.device("cpu") if self.is_fsdp: import fairscale if self.cfg.common.bf16: raise ValueError( "FullyShardedDataParallel is not compatible with --bf16 or " "--memory-efficient-bf16" ) if self.cfg.distributed_training.zero_sharding != "none": raise ValueError( "FullyShardedDataParallel is not compatible with --zero-sharding " "option (it's already built in)" ) if max(self.cfg.optimization.update_freq) > 1 and fairscale.__version__ < "0.4.0": raise RuntimeError( "Please update to fairscale 0.4.0 or newer when combining " "--update-freq with FullyShardedDataParallel" ) else: if ( hasattr(self.cfg.distributed_training, "cpu_offload") and self.cfg.distributed_training.cpu_offload ): raise ValueError("--cpu-offload requires --ddp-backend=fully_sharded") # copy model and criterion to current device/dtype self._criterion = criterion self._model = model if not self.is_fsdp: if cfg.common.fp16: assert not cfg.common.amp, "Cannot use fp16 and AMP together" self._criterion = self._criterion.half() self._model = self._model.half() elif cfg.common.bf16: self._criterion = self._criterion.to(dtype=torch.bfloat16) self._model = self._model.to(dtype=torch.bfloat16) elif cfg.common.amp: self._amp_retries = 0 if ( not cfg.distributed_training.pipeline_model_parallel # the DistributedFairseqModel wrapper will handle moving to device, # so only handle cases which don't use the wrapper and not self.use_distributed_wrapper ): self._criterion = self._criterion.to(device=self.device) self._model = self._model.to(device=self.device) self.pipeline_model_parallel = cfg.distributed_training.pipeline_model_parallel self.last_device = None if self.cuda and self.pipeline_model_parallel: self.last_device = torch.device( cfg.distributed_training.pipeline_devices[-1] ) # check that shared parameters are preserved after device transfer for shared_param in shared_params: ref = _get_module_by_path(self._model, shared_param[0]) for path in shared_param[1:]: logger.info( "detected shared parameter: {} <- {}".format(shared_param[0], path) ) _set_module_by_path(self._model, path, ref) self._dummy_batch = None # indicates we don't have a dummy batch at first self._lr_scheduler = None self._num_updates = 0 self._num_xla_compiles = 0 # for TPUs self._optim_history = None self._optimizer = None self._warn_once = set() self._wrapped_criterion = None self._wrapped_model = None self._ema = None # TODO(myleott): support tpu if self.cuda and self.data_parallel_world_size > 1: self._grad_norm_buf = torch.cuda.DoubleTensor(self.data_parallel_world_size) else: self._grad_norm_buf = None self.quantizer = quantizer if self.quantizer is not None: self.quantizer.set_trainer(self) # get detailed cuda environment if self.cuda: self.cuda_env = utils.CudaEnvironment() if self.data_parallel_world_size > 1: self.cuda_env_arr = distributed_utils.all_gather_list( self.cuda_env, group=distributed_utils.get_global_group() ) else: self.cuda_env_arr = [self.cuda_env] if self.data_parallel_rank == 0: utils.CudaEnvironment.pretty_print_cuda_env_list(self.cuda_env_arr) else: self.cuda_env = None self.cuda_env_arr = None metrics.log_start_time("wall", priority=790, round=0) self._start_time = time.time() self._previous_training_time = 0 self._cumulative_training_time = None def reinitialize(self): """Reinitialize the Trainer, typically after model params change.""" self._lr_scheduler = None self._optimizer = None self._wrapped_criterion = None self._wrapped_model = None def data_parallel_world_size(self): if self.cfg.distributed_training.distributed_world_size == 1: return 1 return distributed_utils.get_data_parallel_world_size() def data_parallel_process_group(self): return distributed_utils.get_data_parallel_group() def data_parallel_rank(self): if self.cfg.distributed_training.distributed_world_size == 1: return 0 return distributed_utils.get_data_parallel_rank() def is_data_parallel_master(self): # NOTE: this returns true for all model parallel replicas with data # parallel rank 0 return self.data_parallel_rank == 0 def use_distributed_wrapper(self) -> bool: return ( self.data_parallel_world_size > 1 and not self.cfg.optimization.use_bmuf ) or ( self.is_fsdp and self.cfg.distributed_training.cpu_offload ) def should_save_checkpoint_on_current_rank(self) -> bool: """Indicates whether to save checkpoints on the current DDP rank.""" if ( self.is_fsdp and self.cfg.distributed_training.use_sharded_state ) or getattr(self.cfg.model, "base_layers", 0) > 0: return True else: return self.is_data_parallel_master def always_call_state_dict_during_save_checkpoint(self) -> bool: if self.is_fsdp and not self.cfg.distributed_training.use_sharded_state: # FSDP calls communication collective when consolidating checkpoints return True else: return False def checkpoint_suffix(self) -> str: """Suffix to add to the checkpoint file name.""" if self.is_fsdp and self.cfg.distributed_training.use_sharded_state: return self.cfg.checkpoint.checkpoint_suffix + "-shard{0}".format( self.data_parallel_rank ) else: return self.cfg.checkpoint.checkpoint_suffix or "" def criterion(self): if self._wrapped_criterion is None: if utils.has_parameters(self._criterion) and self.use_distributed_wrapper: self._wrapped_criterion = models.DistributedFairseqModel( self.cfg.distributed_training, self._criterion, process_group=self.data_parallel_process_group, device=self.device, ) else: self._wrapped_criterion = self._criterion return self._wrapped_criterion def model(self): if self._wrapped_model is None: if self.use_distributed_wrapper: self._wrapped_model = models.DistributedFairseqModel( self.cfg.distributed_training, self._model, process_group=self.data_parallel_process_group, device=self.device, ) else: self._wrapped_model = self._model return self._wrapped_model def ema(self): if self._ema is None: self._build_ema() return self._ema def _build_ema(self): if self.cfg.ema.store_ema: self._ema = build_ema(self._model, self.cfg.ema, self.device) logger.info( "Exponential Moving Average Shadow Model is initialized." ) def optimizer(self): if self._optimizer is None: self._build_optimizer() return self._optimizer def lr_scheduler(self): if self._lr_scheduler is None: self._build_optimizer() # this will initialize self._lr_scheduler return self._lr_scheduler def _build_optimizer(self): params = list( filter( lambda p: p.requires_grad, chain(self.model.parameters(), self.criterion.parameters()), ) ) if self.is_fsdp and self.cfg.common.fp16: # FullyShardedDataParallel always uses MemoryEfficientFP16 wrapper, # mostly for the grad scaling. But if we don't have the # --memory-efficient-fp16 flag set, then we're effectively doing # regular --fp16 and can allow the use of optimizers that would # otherwise be unsupported by MemoryEfficientFP16Optimizer. allow_unsupported = not self.cfg.common.memory_efficient_fp16 self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer( self.cfg, params, allow_unsupported=allow_unsupported ) elif self.cfg.common.fp16 or self.cfg.common.bf16 or self.cfg.common.amp: if self.cuda and torch.cuda.get_device_capability(0)[0] < 7: logger.info( "NOTE: your device does NOT support faster training with --fp16 or --amp, " "please switch to FP32 which is likely to be faster" ) if ( self.cfg.common.memory_efficient_fp16 or self.cfg.common.memory_efficient_bf16 ): self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer( self.cfg, params ) elif self.cfg.common.amp: self._optimizer = optim.AMPOptimizer.build_optimizer(self.cfg, params) else: self._optimizer = optim.FP16Optimizer.build_optimizer(self.cfg, params) else: if self.cuda and torch.cuda.get_device_capability(0)[0] >= 7: logger.info("NOTE: your device may support faster training with --fp16 or --amp") self._optimizer = optim.build_optimizer(self.cfg.optimizer, params) if self.is_fsdp: assert ( not self.cfg.optimization.use_bmuf ), "--ddp-backend=fully_sharded is not compatible with BMUF" assert self._optimizer.supports_flat_params, ( "--ddp-backend=fully_sharded is only compatible with pointwise " "optimizers (e.g., Adam, AdamW, Adadelta, Adamax, SGD, etc.). " "However, the sharding will result in slightly different results when " "using non-pointwise optimizers (e.g., Adagrad, Adafactor, LAMB)" ) if self.cfg.optimization.use_bmuf: self._optimizer = optim.FairseqBMUF( self.cfg.bmuf, self._optimizer, ) if self.cfg.distributed_training.zero_sharding == "os": if ( self.cfg.common.fp16 and not self.cfg.common.memory_efficient_fp16 and not self.cfg.common.memory_efficient_bf16 ) and not self.cfg.common.fp16_no_flatten_grads: raise ValueError( "ZeRO is incomptabile with fp16 and flattened grads. " "Please use --fp16-no-flatten-grads" ) else: optim.shard_(self._optimizer, self.data_parallel_process_group) # We should initialize the learning rate scheduler immediately after # building the optimizer, so that the initial learning rate is set. self._lr_scheduler = lr_scheduler.build_lr_scheduler( self.cfg.lr_scheduler, self.optimizer, ) self._lr_scheduler.step_update(0) def is_fsdp(self): return self.cfg.distributed_training.ddp_backend == "fully_sharded" def consolidate_optimizer(self): """For OSS, we need to consolidate the state dict.""" if self.cfg.checkpoint.no_save_optimizer_state: return self._gathered_optim_state = None if hasattr(self.optimizer.optimizer, "consolidate_state_dict"): self.optimizer.optimizer.consolidate_state_dict() elif self.is_fsdp and not self.model.use_sharded_state: st = self.model.gather_full_optim_state_dict( self.optimizer ) # only returns on rank 0 self._gathered_optim_state = st def state_dict(self): state_dict = { "args": None, # legacy "cfg": ( OmegaConf.to_container(self.cfg, resolve=True, enum_to_str=True) if OmegaConf.is_config(self.cfg) else self.cfg ), "model": self.model.state_dict(), "criterion": ( self.criterion.state_dict() if utils.has_parameters(self.criterion) else None ), "optimizer_history": (self._optim_history or []) + [ { "criterion_name": self.get_criterion().__class__.__name__, "optimizer_name": self.optimizer.__class__.__name__, "lr_scheduler_state": self.lr_scheduler.state_dict(), "num_updates": self.get_num_updates(), } ], "task_state": self.task.state_dict() if self.task is not None else {}, "extra_state": { "metrics": metrics.state_dict(), "previous_training_time": self.cumulative_training_time(), }, } if self.cfg.ema.store_ema: # Save EMA model state as extra state state_dict["extra_state"]["ema"] = self.ema.get_model().state_dict() if self.cfg.ema.ema_fp32: # Save EMA params in fp32 state_dict["extra_state"]["ema_fp32_params"] = self.ema.fp32_params if not self.cfg.checkpoint.no_save_optimizer_state: if self._gathered_optim_state is not None: state_dict["last_optimizer_state"] = self._gathered_optim_state self._gathered_optim_state = None else: state_dict["last_optimizer_state"] = self.optimizer.state_dict() if self.is_fsdp: # save meta data for recombining checkpoint upon loading state_dict["fsdp_metadata"] = self.model.local_metadata_dict() return state_dict def save_checkpoint(self, filename, extra_state): """Save all training state in a checkpoint file.""" logger.info(f"Saving checkpoint to {filename}") # call state_dict on all ranks in case it needs internal communication state_dict = utils.move_to_cpu(self.state_dict()) state_dict["extra_state"].update(extra_state) if self.should_save_checkpoint_on_current_rank: checkpoint_utils.torch_persistent_save( state_dict, filename, async_write=self.cfg.checkpoint.write_checkpoints_asynchronously, ) logger.info(f"Finished saving checkpoint to {filename}") def load_checkpoint( self, filename, reset_optimizer=False, reset_lr_scheduler=False, optimizer_overrides=None, reset_meters=False, ): """ Load all training state from a checkpoint file. rank = 0 will load the checkpoint, and then broadcast it to all other ranks. """ extra_state, self._optim_history, last_optim_state = None, [], None logger.info(f"Preparing to load checkpoint {filename}") is_distributed = self.data_parallel_world_size > 1 bexists = PathManager.isfile(filename) if bexists: load_on_all_ranks = ( self.cfg.checkpoint.load_checkpoint_on_all_dp_ranks # TPUs don't support broadcast yet, so load checkpoints # on every worker for now or self.tpu # FSDP requires loading checkpoint shards on all ranks or (self.is_fsdp and self.cfg.distributed_training.use_sharded_state) or getattr(self.cfg.model, "base_layers", 0) > 0 ) if load_on_all_ranks or self.data_parallel_rank == 0: state = checkpoint_utils.load_checkpoint_to_cpu( filename, load_on_all_ranks=load_on_all_ranks ) last_optim_state = state.get("last_optimizer_state", None) # If doing zero_sharding, do not broadcast global optimizer # state. Later we will broadcast sharded states to each rank # to avoid memory from exploding. if ( not load_on_all_ranks and self.cfg.distributed_training.zero_sharding == "os" and "last_optimizer_state" in state and is_distributed ): state["last_optimizer_state"] = "SHARDED" else: last_optim_state = None state = None if is_distributed and not load_on_all_ranks: state = distributed_utils.broadcast_object( state, src_rank=0, group=self.data_parallel_process_group, dist_device=self.device, ) if self.data_parallel_rank > 0: last_optim_state = state.get("last_optimizer_state", None) # load model parameters try: self.model.load_state_dict( state["model"], strict=True, model_cfg=self.cfg.model ) # save memory for later steps del state["model"] if utils.has_parameters(self.get_criterion()): self.get_criterion().load_state_dict( state["criterion"], strict=True ) del state["criterion"] except Exception: raise Exception( "Cannot load model parameters from checkpoint {}; " "please ensure that the architectures match.".format(filename) ) extra_state = state["extra_state"] self._optim_history = state["optimizer_history"] if last_optim_state is not None and not reset_optimizer: # rebuild optimizer after loading model, since params may have changed self._build_optimizer() # only reload optimizer and lr_scheduler if they match last_optim = self._optim_history[-1] assert ( last_optim["criterion_name"] == self.get_criterion().__class__.__name__ ), f"Criterion does not match; please reset the optimizer (--reset-optimizer). {last_optim['criterion_name']} vs {self.get_criterion().__class__.__name__}" assert ( last_optim["optimizer_name"] == self.optimizer.__class__.__name__ ), f"Optimizer does not match; please reset the optimizer (--reset-optimizer). {last_optim['optimizer_name']} vs {self.optimizer.__class__.__name__}" if not reset_lr_scheduler: self.lr_scheduler.load_state_dict(last_optim["lr_scheduler_state"]) if self.is_fsdp and not self.model.use_sharded_state: # if use_sharded_state, the last_optim_state is already sharded, skip this last_optim_state = self.model.get_shard_from_optim_state_dict( last_optim_state ) elif not load_on_all_ranks and is_distributed: last_optim_state = self.optimizer.broadcast_global_state_dict( last_optim_state ) self.optimizer.load_state_dict(last_optim_state, optimizer_overrides) self.set_num_updates(last_optim["num_updates"]) if extra_state is not None: itr_state = extra_state["train_iterator"] if type(itr_state) == list: # assert len(itr_state) == self.data_parallel_world_size itr_state = itr_state[self.data_parallel_rank] extra_state["train_iterator"] = itr_state epoch = itr_state.get("epoch", 1) if "previous_training_time" in extra_state: self._previous_training_time = extra_state["previous_training_time"] self._start_time = time.time() self.lr_step(epoch) if ( itr_state.get("version", 1) >= 2 and itr_state.get("iterations_in_epoch", 0) == 0 ): # reset meters at start of epoch reset_meters = True if "metrics" in extra_state and not reset_meters: metrics.load_state_dict(extra_state["metrics"]) # reset TimeMeters, since their start times don't make sense anymore for meter in metrics.get_meters("default"): if isinstance(meter, meters.TimeMeter): meter.reset() if self.cfg.ema.store_ema: if "ema" not in extra_state: logger.warn( "EMA not found in checkpoint. But store_ema is True. " "EMA is re-initialized from checkpoint." ) self.ema.restore(state["model"], build_fp32_params=self.cfg.ema.ema_fp32) else: logger.info( "Loading EMA from checkpoint" ) self.ema.restore(extra_state["ema"], build_fp32_params=False) if self.cfg.ema.ema_fp32: if "ema_fp32_params" in extra_state: logger.info( "Loading EMA fp32 params from checkpoint" ) self.ema.build_fp32_params(extra_state["ema_fp32_params"]) else: logger.info( "Building EMA fp32 params from EMA model in checkpoint" ) self.ema.build_fp32_params() logger.info( "Loaded checkpoint {} (epoch {} @ {} updates)".format( filename, epoch, self.get_num_updates() ) ) else: logger.info("No existing checkpoint found {}".format(filename)) return extra_state def get_train_iterator( self, epoch, combine=True, load_dataset=True, data_selector=None, shard_batch_itr=True, disable_iterator_cache=False, ): """Return an EpochBatchIterator over the training set for a given epoch.""" if load_dataset: logger.info("loading train data for epoch {}".format(epoch)) self.task.load_dataset( self.cfg.dataset.train_subset, epoch=epoch, combine=combine, data_selector=data_selector, tpu=self.tpu, ) batch_iterator = self.task.get_batch_iterator( dataset=self.task.dataset(self.cfg.dataset.train_subset), max_tokens=self.cfg.dataset.max_tokens, max_sentences=self.cfg.dataset.batch_size, max_positions=utils.resolve_max_positions( self.task.max_positions(), self.model.max_positions(), self.cfg.dataset.max_tokens, ), ignore_invalid_inputs=True, required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple, seed=self.cfg.common.seed, num_shards=self.data_parallel_world_size if shard_batch_itr else 1, shard_id=self.data_parallel_rank if shard_batch_itr else 0, num_workers=self.cfg.dataset.num_workers, epoch=epoch, data_buffer_size=self.cfg.dataset.data_buffer_size, disable_iterator_cache=disable_iterator_cache, ) self.reset_dummy_batch(batch_iterator.first_batch) return batch_iterator def get_valid_iterator( self, subset, disable_iterator_cache=False, ): """Return an EpochBatchIterator over given validation subset for a given epoch.""" batch_iterator = self.task.get_batch_iterator( dataset=self.task.dataset(subset), max_tokens=self.cfg.dataset.max_tokens_valid, max_sentences=self.cfg.dataset.batch_size_valid, max_positions=utils.resolve_max_positions( self.task.max_positions(), self.model.max_positions(), ), ignore_invalid_inputs=self.cfg.dataset.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple, seed=self.cfg.common.seed, num_shards=self.data_parallel_world_size, shard_id=self.data_parallel_rank, num_workers=self.cfg.dataset.num_workers, # always pass a fixed "epoch" to keep validation data consistent # across training epochs epoch=1, data_buffer_size=self.cfg.dataset.data_buffer_size, disable_iterator_cache=disable_iterator_cache, ) self.reset_dummy_batch(batch_iterator.first_batch) return batch_iterator def begin_epoch(self, epoch): """Called at the beginning of each epoch.""" logger.info("begin training epoch {}".format(epoch)) self.lr_step_begin_epoch(epoch) if self.quantizer is not None: self.quantizer.begin_epoch(epoch) # task specific setup per epoch self.task.begin_epoch(epoch, self.get_model()) if self.tpu: import torch_xla.core.xla_model as xm xm.rendezvous("begin_epoch") # wait for all workers xm.mark_step() def begin_valid_epoch(self, epoch): """Called at the beginning of each validation epoch.""" # task specific setup per validation epoch self.task.begin_valid_epoch(epoch, self.get_model()) def reset_dummy_batch(self, batch): self._dummy_batch = batch def train_step(self, samples, raise_oom=False): """Do forward, backward and parameter update.""" self._set_seed() self.model.train() self.criterion.train() self.zero_grad() metrics.log_start_time("train_wall", priority=800, round=0) # If EMA is enabled through store_ema=True # and task.uses_ema is True, pass the EMA model as a keyword # argument to the task. extra_kwargs = {} if self.cfg.ema.store_ema and getattr(self.task, "uses_ema", False): extra_kwargs["ema_model"] = self.ema.get_model() # forward and backward pass logging_outputs, sample_size, ooms = [], 0, 0 for i, sample in enumerate(samples): # delayed update loop sample, is_dummy_batch = self._prepare_sample(sample) def maybe_no_sync(): """ Whenever *samples* contains more than one mini-batch, we want to accumulate gradients locally and only call all-reduce in the last backwards pass. """ if ( self.data_parallel_world_size > 1 and hasattr(self.model, "no_sync") and i < len(samples) - 1 # The no_sync context manager results in increased memory # usage with FSDP, since full-size gradients will be # accumulated on each GPU. It's typically a better tradeoff # to do the extra communication with FSDP. and not self.is_fsdp ): return self.model.no_sync() else: return contextlib.ExitStack() # dummy contextmanager try: with maybe_no_sync(): # forward and backward loss, sample_size_i, logging_output = self.task.train_step( sample=sample, model=self.model, criterion=self.criterion, optimizer=self.optimizer, update_num=self.get_num_updates(), ignore_grad=is_dummy_batch, **extra_kwargs, ) del loss logging_outputs.append(logging_output) sample_size += sample_size_i # emptying the CUDA cache after the first step can # reduce the chance of OOM if self.cuda and self.get_num_updates() == 0: torch.cuda.empty_cache() except RuntimeError as e: if "out of memory" in str(e): self._log_oom(e) if raise_oom: raise e logger.warning( "attempting to recover from OOM in forward/backward pass" ) ooms += 1 self.zero_grad() if self.cuda: torch.cuda.empty_cache() if self.cfg.distributed_training.distributed_world_size == 1: return None else: raise e if self.tpu and i < len(samples) - 1: # tpu-comment: every XLA operation before marking step is # appended to the IR graph, and processing too many batches # before marking step can lead to OOM errors. # To handle gradient accumulation use case, we explicitly # mark step here for every forward pass without a backward pass self._xla_markstep_and_send_to_cpu() if is_dummy_batch: if torch.is_tensor(sample_size): sample_size.zero_() else: sample_size *= 0.0 if torch.is_tensor(sample_size): sample_size = sample_size.float() else: sample_size = float(sample_size) # gather logging outputs from all replicas if self._sync_stats(): train_time = self._local_cumulative_training_time() logging_outputs, ( sample_size, ooms, total_train_time, ) = self._aggregate_logging_outputs( logging_outputs, sample_size, ooms, train_time, ignore=is_dummy_batch ) self._cumulative_training_time = ( total_train_time / self.data_parallel_world_size ) overflow = False try: with torch.autograd.profiler.record_function("reduce-grads"): # reduce gradients across workers self.optimizer.all_reduce_grads(self.model) if utils.has_parameters(self.criterion): self.optimizer.all_reduce_grads(self.criterion) with torch.autograd.profiler.record_function("multiply-grads"): # multiply gradients by (data_parallel_size / sample_size) since # DDP normalizes by the number of data parallel workers for # improved fp16 precision. # Thus we get (sum_of_gradients / sample_size) at the end. # In case of fp16, this step also undoes loss scaling. # (Debugging note: Some optimizers perform this scaling on the # fly, so inspecting model.parameters() or optimizer.params may # still show the original, unscaled gradients.) numer = ( self.data_parallel_world_size if not self.cfg.optimization.use_bmuf or self._sync_stats() else 1 ) self.optimizer.multiply_grads(numer / (sample_size or 1.0)) # Note: (sample_size or 1.0) handles the case of a zero gradient, in a # way that avoids CPU/device transfers in case sample_size is a GPU or # TPU object. The assumption is that the gradient itself is also 0. with torch.autograd.profiler.record_function("clip-grads"): # clip grads grad_norm = self.clip_grad_norm(self.cfg.optimization.clip_norm) # check that grad norms are consistent across workers # on tpu check tensor is slow if not self.tpu: if ( not self.cfg.optimization.use_bmuf and self.cfg.distributed_training.ddp_backend != "slow_mo" ): self._check_grad_norms(grad_norm) if not torch.isfinite(grad_norm).all(): # in case of AMP, if gradients are Nan/Inf then # optimizer step is still required if self.cfg.common.amp: overflow = True else: # check local gradnorm single GPU case, trigger NanDetector raise FloatingPointError("gradients are Nan/Inf") with torch.autograd.profiler.record_function("optimizer"): # take an optimization step self.task.optimizer_step( self.optimizer, model=self.model, update_num=self.get_num_updates() ) if self.cfg.common.amp and overflow: if self._amp_retries == self.cfg.common.amp_batch_retries: logger.info("AMP: skipping this batch.") self._amp_retries = 0 else: self._amp_retries += 1 return self.train_step(samples, raise_oom) # recursion to feed in same batch except FloatingPointError: # re-run the forward and backward pass with hooks attached to print # out where it fails self.zero_grad() with NanDetector(self.get_model()): for _, sample in enumerate(samples): sample, _ = self._prepare_sample(sample) self.task.train_step( sample, self.model, self.criterion, self.optimizer, self.get_num_updates(), ignore_grad=False, **extra_kwargs, ) raise except OverflowError as e: overflow = True logger.info( f"NOTE: gradient overflow detected, ignoring gradient, {str(e)}" ) grad_norm = torch.tensor(0.0).cuda() self.zero_grad() except RuntimeError as e: if "out of memory" in str(e): self._log_oom(e) logger.error("OOM during optimization, irrecoverable") raise e # Some distributed wrappers (e.g., SlowMo) need access to the optimizer # after the step if hasattr(self.model, "perform_additional_optimizer_actions"): if hasattr(self.optimizer, "fp32_params"): self.model.perform_additional_optimizer_actions( self.optimizer.optimizer, self.optimizer.fp32_params ) else: self.model.perform_additional_optimizer_actions( self.optimizer.optimizer ) logging_output = None if not overflow or self.cfg.distributed_training.ddp_backend == "slow_mo": self.set_num_updates(self.get_num_updates() + 1) if self.cfg.ema.store_ema: # Step EMA forward with new model. self.ema.step( self.get_model(), self.get_num_updates(), ) metrics.log_scalar( "ema_decay", self.ema.get_decay(), priority=10000, round=5, weight=0, ) if self.tpu: import torch_xla.core.xla_model as xm # mark step on TPUs self._xla_markstep_and_send_to_cpu() # only log stats every log_interval steps # this causes wps to be misreported when log_interval > 1 logging_output = {} if self.get_num_updates() % self.cfg.common.log_interval == 0: # log memory usage mem_info = xm.get_memory_info(self.device) gb_free = mem_info["kb_free"] / 1024 / 1024 gb_total = mem_info["kb_total"] / 1024 / 1024 metrics.log_scalar( "gb_free", gb_free, priority=1500, round=1, weight=0 ) metrics.log_scalar( "gb_total", gb_total, priority=1600, round=1, weight=0 ) logging_outputs = self._xla_markstep_and_send_to_cpu( logging_outputs ) logging_output = self._reduce_and_log_stats( logging_outputs, sample_size, grad_norm ) # log whenever there's an XLA compilation, since these # slow down training and may indicate opportunities for # optimization self._check_xla_compilation() else: if self.cuda and self.cuda_env is not None: # log minimum free memory over the iteration gb_used = torch.cuda.max_memory_allocated() / 1024 / 1024 / 1024 torch.cuda.reset_peak_memory_stats() gb_free = self.cuda_env.total_memory_in_GB - gb_used metrics.log_scalar( "gb_free", gb_free, priority=1500, round=1, weight=0 ) # log stats logging_output = self._reduce_and_log_stats( logging_outputs, sample_size, grad_norm ) # clear CUDA cache to reduce memory fragmentation if ( self.cuda and self.cfg.common.empty_cache_freq > 0 and ( (self.get_num_updates() + self.cfg.common.empty_cache_freq - 1) % self.cfg.common.empty_cache_freq ) == 0 ): torch.cuda.empty_cache() if self.cfg.common.fp16 or self.cfg.common.amp: metrics.log_scalar( "loss_scale", ( self.optimizer.scaler.loss_scale if self.cfg.common.fp16 else self.optimizer.scaler.get_scale() ), priority=700, round=4, weight=0, ) metrics.log_stop_time("train_wall") return logging_output def valid_step(self, sample, raise_oom=False): """Do forward pass in evaluation mode.""" if self.tpu: import torch_xla.core.xla_model as xm xm.rendezvous("valid_step") # wait for all workers # If EMA is enabled through store_ema=True # and task.uses_ema is True, pass the EMA model as a keyword # argument to the task. extra_kwargs = {} if self.cfg.ema.store_ema and getattr(self.task, "uses_ema", False): extra_kwargs["ema_model"] = self.ema.get_model() with torch.no_grad(): self.model.eval() self.criterion.eval() sample, is_dummy_batch = self._prepare_sample(sample) try: _loss, sample_size, logging_output = self.task.valid_step( sample, self.model, self.criterion, **extra_kwargs ) except RuntimeError as e: if "out of memory" in str(e): self._log_oom(e) if not raise_oom: logger.warning( "ran out of memory in validation step, retrying batch" ) for p in self.model.parameters(): if p.grad is not None: p.grad = None # free some memory if self.cuda: torch.cuda.empty_cache() return self.valid_step(sample, raise_oom=True) raise e logging_outputs = [logging_output] if is_dummy_batch: if torch.is_tensor(sample_size): sample_size.zero_() else: sample_size *= 0.0 # gather logging outputs from all replicas if self.data_parallel_world_size > 1: logging_outputs, (sample_size,) = self._aggregate_logging_outputs( logging_outputs, sample_size, ignore=is_dummy_batch, ) # log validation stats if self.tpu: logging_outputs = self._xla_markstep_and_send_to_cpu(logging_outputs) # logging_output = self._reduce_and_log_stats(logging_outputs, sample_size) return logging_outputs def zero_grad(self): self.optimizer.zero_grad() def lr_step_begin_epoch(self, epoch): """Adjust the learning rate at the beginning of the epoch.""" self.lr_scheduler.step_begin_epoch(epoch) # prefer updating the LR based on the number of steps return self.lr_step_update() def lr_step(self, epoch, val_loss=None): """Adjust the learning rate at the end of the epoch.""" self.lr_scheduler.step(epoch, val_loss) # prefer updating the LR based on the number of steps return self.lr_step_update() def lr_step_update(self): """Update the learning rate after each update.""" new_lr = self.lr_scheduler.step_update(self.get_num_updates()) if isinstance(new_lr, dict): for k, v in new_lr.items(): metrics.log_scalar(f"lr_{k}", v, weight=0, priority=300) new_lr = new_lr.get("default", next(iter(new_lr.values()))) else: metrics.log_scalar("lr", new_lr, weight=0, priority=300) return new_lr def get_lr(self): """Get the current learning rate.""" return self.optimizer.get_lr() def get_model(self): """Get the (non-wrapped) model instance.""" return self._model def get_criterion(self): """Get the (non-wrapped) criterion instance.""" return self._criterion def get_meter(self, name): """[deprecated] Get a specific meter by name.""" from fairseq import meters if "get_meter" not in self._warn_once: self._warn_once.add("get_meter") utils.deprecation_warning( "Trainer.get_meter is deprecated. Please use fairseq.metrics instead." ) train_meters = metrics.get_meters("train") if train_meters is None: train_meters = {} if name == "train_loss" and "loss" in train_meters: return train_meters["loss"] elif name == "train_nll_loss": # support for legacy train.py, which assumed this meter is # always initialized m = train_meters.get("nll_loss", None) return m or meters.AverageMeter() elif name == "wall": # support for legacy train.py, which assumed this meter is # always initialized m = metrics.get_meter("default", "wall") return m or meters.TimeMeter() elif name == "wps": m = metrics.get_meter("train", "wps") return m or meters.TimeMeter() elif name in {"valid_loss", "valid_nll_loss"}: # support for legacy train.py, which assumed these meters # are always initialized k = name[len("valid_") :] m = metrics.get_meter("valid", k) return m or meters.AverageMeter() elif name == "oom": return meters.AverageMeter() elif name in train_meters: return train_meters[name] return None def get_num_updates(self): """Get the number of parameters updates.""" return self._num_updates def set_num_updates(self, num_updates): """Set the number of parameters updates.""" self._num_updates = num_updates self.lr_step_update() if self.quantizer: self.quantizer.step_update(self._num_updates) metrics.log_scalar("num_updates", self._num_updates, weight=0, priority=200) def clip_grad_norm(self, clip_norm): def agg_norm_fn(total_norm): total_norm = total_norm.cuda().float() ** 2 total_norm = distributed_utils.all_reduce( total_norm, group=self.data_parallel_process_group ) return total_norm ** 0.5 should_agg_norm = ( self.is_fsdp and ( self.data_parallel_process_group is not None or torch.distributed.is_initialized() ) ) return self.optimizer.clip_grad_norm( clip_norm, aggregate_norm_fn=agg_norm_fn if should_agg_norm else None ) def cumulative_training_time(self): if self._cumulative_training_time is None: # single GPU return self._local_cumulative_training_time() else: return self._cumulative_training_time def _local_cumulative_training_time(self): """Aggregate training time in seconds.""" return time.time() - self._start_time + self._previous_training_time def _fp_convert_sample(self, sample): def apply_half(t): if t.dtype is torch.float32: return t.to(dtype=torch.half) return t def apply_bfloat16(t): if t.dtype is torch.float32: return t.to(dtype=torch.bfloat16) return t if self.cfg.common.fp16: sample = utils.apply_to_sample(apply_half, sample) if self.cfg.common.bf16: sample = utils.apply_to_sample(apply_bfloat16, sample) return sample def _prepare_sample(self, sample, is_dummy=False): if sample == "DUMMY": raise Exception( "Trying to use an uninitialized 'dummy' batch. This usually indicates " "that the total number of batches is smaller than the number of " "participating GPUs. Try reducing the batch size or using fewer GPUs." ) if sample is None or len(sample) == 0: assert ( self._dummy_batch is not None and len(self._dummy_batch) > 0 ), "Invalid dummy batch: {}".format(self._dummy_batch) sample, _ = self._prepare_sample(self._dummy_batch, is_dummy=True) return sample, True # Given that PCIe/NVLink bandwidth is significantly smaller than DRAM bandwidth # it makes sense to do the format conversion on the CPU and then transfer # a smaller buffer to the device. This also saves GPU memory capacity. if self.cfg.common.on_cpu_convert_precision: sample = self._fp_convert_sample(sample) if self.cuda: if self.pipeline_model_parallel: if 'target' in sample: sample['target'] = utils.move_to_cuda(sample['target'], device=self.last_device) else: sample = utils.move_to_cuda(sample) elif self.tpu and is_dummy: # the dummy batch may not be on the appropriate device sample = utils.move_to_cuda(sample, device=self.device) if not self.cfg.common.on_cpu_convert_precision: sample = self._fp_convert_sample(sample) if self._dummy_batch == "DUMMY": self._dummy_batch = sample return sample, False def _set_seed(self): # Set seed based on args.seed and the update number so that we get # reproducible results when resuming from checkpoints seed = self.cfg.common.seed + self.get_num_updates() utils.set_torch_seed(seed) def _sync_stats(self): # Return True if it's using multiple GPUs and DDP or multiple GPUs with # BMUF and it's a bmuf sync with warmup iterations completed before. if self.data_parallel_world_size == 1: return False elif self.cfg.optimization.use_bmuf: return ( self.get_num_updates() + 1 ) % self.cfg.bmuf.global_sync_iter == 0 and ( self.get_num_updates() + 1 ) > self.cfg.bmuf.warmup_iterations else: return True def _log_oom(self, exc): msg = "OOM: Ran out of memory with exception: {}".format(exc) logger.warning(msg) if torch.cuda.is_available() and hasattr(torch.cuda, "memory_summary"): for device_idx in range(torch.cuda.device_count()): logger.warning(torch.cuda.memory_summary(device=device_idx)) sys.stderr.flush() def _aggregate_logging_outputs( self, logging_outputs: List[Dict[str, Any]], *extra_stats_to_sum, ignore=False, ): if self.task.__class__.logging_outputs_can_be_summed(self.get_criterion()): return self._fast_stat_sync_sum( logging_outputs, *extra_stats_to_sum, ignore=ignore ) else: return self._all_gather_list_sync( logging_outputs, *extra_stats_to_sum, ignore=ignore ) def _all_gather_list_sync( self, logging_outputs: List[Dict[str, Any]], *extra_stats_to_sum, ignore=False, ): """ Sync logging outputs across workers. all_gather_list_sync is suitable when logging outputs are complex types. """ if self.tpu: raise NotImplementedError if ignore: logging_outputs = [] results = list( zip( *distributed_utils.all_gather_list( [logging_outputs] + list(extra_stats_to_sum), max_size=getattr(self.cfg.common, "all_gather_list_size", 16384), group=self.data_parallel_process_group, ) ) ) logging_outputs, extra_stats_to_sum = results[0], results[1:] logging_outputs = list(chain.from_iterable(logging_outputs)) extra_stats_to_sum = [sum(s) for s in extra_stats_to_sum] return logging_outputs, extra_stats_to_sum def _fast_stat_sync_sum( self, logging_outputs: List[Dict[str, Any]], *extra_stats_to_sum, ignore=False, ): """ Sync logging outputs across workers. fast_stat_sync_sum is faster than all_gather_list_sync, but is only suitable when logging outputs are scalars and can be summed. Note that *logging_outputs* cannot contain any nested dicts/lists. """ data = {} for i, stat in enumerate(extra_stats_to_sum): data["extra_stats_" + str(i)] = stat if len(logging_outputs) > 0: log_keys = list(logging_outputs[0].keys()) for k in log_keys: if not ignore: v = sum(log[k] for log in logging_outputs if k in log) else: v = logging_outputs[0][k] v = torch.zeros_like(v) if torch.is_tensor(v) else 0 data["logging_outputs_" + k] = v else: log_keys = None data = distributed_utils.all_reduce_dict( data, device=self.device, group=self.data_parallel_process_group ) extra_stats_to_sum = [ data["extra_stats_" + str(i)] for i in range(len(extra_stats_to_sum)) ] if log_keys is not None: logging_outputs = [{k: data["logging_outputs_" + k] for k in log_keys}] else: logging_outputs = [] return logging_outputs, extra_stats_to_sum def _check_grad_norms(self, grad_norm): """Check that grad norms are consistent across workers.""" if self._grad_norm_buf is not None: self._grad_norm_buf.zero_() self._grad_norm_buf[self.data_parallel_rank] = grad_norm distributed_utils.all_reduce( self._grad_norm_buf, group=self.data_parallel_process_group ) def is_consistent(tensor): max_abs_diff = torch.max(torch.abs(tensor - tensor[0])) return ( (torch.isfinite(tensor).all() and (max_abs_diff / (tensor[0] + 1e-6) < 1e-6).all()) or (self.cfg.common.amp and not torch.isfinite(tensor).all()) # in case of amp non-finite grads are fine ) if not is_consistent(self._grad_norm_buf): pretty_detail = "\n".join( "rank {:3d} = {:.8f}".format(r, n) for r, n in enumerate(self._grad_norm_buf.tolist()) ) error_detail = "grad_norm across the workers:\n{}\n".format( pretty_detail ) # use FloatingPointError to trigger NanDetector raise FloatingPointError( "Fatal error: gradients are inconsistent between workers. " "Try --ddp-backend=legacy_ddp. " "Or are you mixing up different generation of GPUs in training?" + "\n" + "-" * 80 + "\n{}\n".format(error_detail) + "-" * 80 ) def _reduce_and_log_stats(self, logging_outputs, sample_size, grad_norm=None): if grad_norm is not None and ( not torch.is_tensor(grad_norm) or torch.isfinite(grad_norm) ): metrics.log_speed("ups", 1.0, priority=100, round=2) metrics.log_scalar("gnorm", grad_norm, priority=400, round=3) if self.cfg.optimization.clip_norm > 0: metrics.log_scalar( "clip", torch.where( grad_norm > self.cfg.optimization.clip_norm, grad_norm.new_tensor(100), grad_norm.new_tensor(0), ), priority=500, round=1, ) with metrics.aggregate() as agg: if logging_outputs is not None: self.task.reduce_metrics(logging_outputs, self.get_criterion()) del logging_outputs # extra warning for criterions that don't properly log a loss value if "loss" not in agg: if "loss" not in self._warn_once: self._warn_once.add("loss") logger.warning( "Criterion.reduce_metrics did not log a 'loss' value, " "which may break some functionality" ) metrics.log_scalar("loss", -1) # support legacy interface if self.tpu: logging_output = {} else: logging_output = agg.get_smoothed_values() logging_output["sample_size"] = sample_size for key_to_delete in ["ppl", "wps", "wpb", "bsz"]: if key_to_delete in logging_output: del logging_output[key_to_delete] return logging_output def _check_xla_compilation(self): import torch_xla.debug.metrics as met compile_stats = met.metric_data("CompileTime") if compile_stats is None: return num_xla_compiles = compile_stats[0] if num_xla_compiles > self._num_xla_compiles: logger.warning( "XLA compilation detected on device #{}; too many of these can lead " "to slow training, but we expect a few in the beginning".format( self.cfg.distributed_training.distributed_rank ) ) self._num_xla_compiles = num_xla_compiles def _xla_markstep_and_send_to_cpu(self, data=None): import torch_xla.core.xla_model as xm xm.mark_step() if data is not None: from fairseq.utils import xla_device_to_cpu return xla_device_to_cpu(data) The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train( cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr ) -> Tuple[List[Optional[float]], bool]` to solve the following problem: Train the model for one epoch and return validation losses. Here is the function: def train( cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr ) -> Tuple[List[Optional[float]], bool]: """Train the model for one epoch and return validation losses.""" # Initialize data iterator itr = epoch_itr.next_epoch_itr( fix_batches_to_gpus=cfg.distributed_training.fix_batches_to_gpus, shuffle=(epoch_itr.next_epoch_idx > cfg.dataset.curriculum), ) update_freq = ( cfg.optimization.update_freq[epoch_itr.epoch - 1] if epoch_itr.epoch <= len(cfg.optimization.update_freq) else cfg.optimization.update_freq[-1] ) itr = iterators.GroupedIterator( itr, update_freq, # skip_remainder_batch=cfg.optimization.skip_remainder_batch, ) if cfg.common.tpu: itr = utils.tpu_data_loader(itr) progress = progress_bar.progress_bar( itr, log_format=cfg.common.log_format, log_file=cfg.common.log_file, log_interval=cfg.common.log_interval, epoch=epoch_itr.epoch, tensorboard_logdir=( cfg.common.tensorboard_logdir if distributed_utils.is_master(cfg.distributed_training) else None ), default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"), wandb_project=( cfg.common.wandb_project if distributed_utils.is_master(cfg.distributed_training) else None ), wandb_run_name=os.environ.get( "WANDB_NAME", os.path.basename(cfg.checkpoint.save_dir) ), azureml_logging=( cfg.common.azureml_logging if distributed_utils.is_master(cfg.distributed_training) else False ), ) progress.update_config(_flatten_config(cfg)) trainer.begin_epoch(epoch_itr.epoch) valid_subsets = cfg.dataset.valid_subset.split(",") should_stop = False num_updates = trainer.get_num_updates() logger.info("Start validating") valid_losses, should_stop = validate_and_save( cfg, trainer, task, epoch_itr, valid_subsets, True ) # log end-of-epoch stats logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch)) stats = get_training_stats(metrics.get_smoothed_values("train")) progress.print(stats, tag="train", step=num_updates) # reset epoch-level meters metrics.reset_meters("train") return valid_losses, True
Train the model for one epoch and return validation losses.
182,725
import unilm import argparse import logging import math import os import sys from typing import Any, Callable, Dict, List, Optional, Tuple import numpy as np import torch from omegaconf import DictConfig, OmegaConf from fairseq import checkpoint_utils, options, quantization_utils, tasks, utils from fairseq.data import data_utils, iterators from fairseq.data.plasma_utils import PlasmaStore from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.distributed import fsdp_enable_wrap, fsdp_wrap from fairseq.distributed import utils as distributed_utils from fairseq.file_io import PathManager from fairseq.logging import meters, metrics, progress_bar from fairseq.model_parallel.megatron_trainer import MegatronTrainer from fairseq.trainer import Trainer class Trainer(object): """Main class for data parallel training. This class supports synchronous distributed data parallel training, where multiple workers each have a full model replica and gradients are accumulated across workers before each update. We use :class:`~torch.nn.parallel.DistributedDataParallel` to handle communication of the gradients across workers. """ def __init__(self, cfg: FairseqConfig, task, model, criterion, quantizer=None): if isinstance(cfg, Namespace): logger.warning( "argparse.Namespace configuration is deprecated! Automatically converting to OmegaConf" ) cfg = convert_namespace_to_omegaconf(cfg) self.cfg = cfg self.task = task # catalog shared parameters shared_params = _catalog_shared_params(model) self.tpu = cfg.common.tpu self.cuda = torch.cuda.is_available() and not cfg.common.cpu and not self.tpu if self.cuda: self.device = torch.device("cuda") elif self.tpu: self.device = utils.get_tpu_device() else: self.device = torch.device("cpu") if self.is_fsdp: import fairscale if self.cfg.common.bf16: raise ValueError( "FullyShardedDataParallel is not compatible with --bf16 or " "--memory-efficient-bf16" ) if self.cfg.distributed_training.zero_sharding != "none": raise ValueError( "FullyShardedDataParallel is not compatible with --zero-sharding " "option (it's already built in)" ) if max(self.cfg.optimization.update_freq) > 1 and fairscale.__version__ < "0.4.0": raise RuntimeError( "Please update to fairscale 0.4.0 or newer when combining " "--update-freq with FullyShardedDataParallel" ) else: if ( hasattr(self.cfg.distributed_training, "cpu_offload") and self.cfg.distributed_training.cpu_offload ): raise ValueError("--cpu-offload requires --ddp-backend=fully_sharded") # copy model and criterion to current device/dtype self._criterion = criterion self._model = model if not self.is_fsdp: if cfg.common.fp16: assert not cfg.common.amp, "Cannot use fp16 and AMP together" self._criterion = self._criterion.half() self._model = self._model.half() elif cfg.common.bf16: self._criterion = self._criterion.to(dtype=torch.bfloat16) self._model = self._model.to(dtype=torch.bfloat16) elif cfg.common.amp: self._amp_retries = 0 if ( not cfg.distributed_training.pipeline_model_parallel # the DistributedFairseqModel wrapper will handle moving to device, # so only handle cases which don't use the wrapper and not self.use_distributed_wrapper ): self._criterion = self._criterion.to(device=self.device) self._model = self._model.to(device=self.device) self.pipeline_model_parallel = cfg.distributed_training.pipeline_model_parallel self.last_device = None if self.cuda and self.pipeline_model_parallel: self.last_device = torch.device( cfg.distributed_training.pipeline_devices[-1] ) # check that shared parameters are preserved after device transfer for shared_param in shared_params: ref = _get_module_by_path(self._model, shared_param[0]) for path in shared_param[1:]: logger.info( "detected shared parameter: {} <- {}".format(shared_param[0], path) ) _set_module_by_path(self._model, path, ref) self._dummy_batch = None # indicates we don't have a dummy batch at first self._lr_scheduler = None self._num_updates = 0 self._num_xla_compiles = 0 # for TPUs self._optim_history = None self._optimizer = None self._warn_once = set() self._wrapped_criterion = None self._wrapped_model = None self._ema = None # TODO(myleott): support tpu if self.cuda and self.data_parallel_world_size > 1: self._grad_norm_buf = torch.cuda.DoubleTensor(self.data_parallel_world_size) else: self._grad_norm_buf = None self.quantizer = quantizer if self.quantizer is not None: self.quantizer.set_trainer(self) # get detailed cuda environment if self.cuda: self.cuda_env = utils.CudaEnvironment() if self.data_parallel_world_size > 1: self.cuda_env_arr = distributed_utils.all_gather_list( self.cuda_env, group=distributed_utils.get_global_group() ) else: self.cuda_env_arr = [self.cuda_env] if self.data_parallel_rank == 0: utils.CudaEnvironment.pretty_print_cuda_env_list(self.cuda_env_arr) else: self.cuda_env = None self.cuda_env_arr = None metrics.log_start_time("wall", priority=790, round=0) self._start_time = time.time() self._previous_training_time = 0 self._cumulative_training_time = None def reinitialize(self): """Reinitialize the Trainer, typically after model params change.""" self._lr_scheduler = None self._optimizer = None self._wrapped_criterion = None self._wrapped_model = None def data_parallel_world_size(self): if self.cfg.distributed_training.distributed_world_size == 1: return 1 return distributed_utils.get_data_parallel_world_size() def data_parallel_process_group(self): return distributed_utils.get_data_parallel_group() def data_parallel_rank(self): if self.cfg.distributed_training.distributed_world_size == 1: return 0 return distributed_utils.get_data_parallel_rank() def is_data_parallel_master(self): # NOTE: this returns true for all model parallel replicas with data # parallel rank 0 return self.data_parallel_rank == 0 def use_distributed_wrapper(self) -> bool: return ( self.data_parallel_world_size > 1 and not self.cfg.optimization.use_bmuf ) or ( self.is_fsdp and self.cfg.distributed_training.cpu_offload ) def should_save_checkpoint_on_current_rank(self) -> bool: """Indicates whether to save checkpoints on the current DDP rank.""" if ( self.is_fsdp and self.cfg.distributed_training.use_sharded_state ) or getattr(self.cfg.model, "base_layers", 0) > 0: return True else: return self.is_data_parallel_master def always_call_state_dict_during_save_checkpoint(self) -> bool: if self.is_fsdp and not self.cfg.distributed_training.use_sharded_state: # FSDP calls communication collective when consolidating checkpoints return True else: return False def checkpoint_suffix(self) -> str: """Suffix to add to the checkpoint file name.""" if self.is_fsdp and self.cfg.distributed_training.use_sharded_state: return self.cfg.checkpoint.checkpoint_suffix + "-shard{0}".format( self.data_parallel_rank ) else: return self.cfg.checkpoint.checkpoint_suffix or "" def criterion(self): if self._wrapped_criterion is None: if utils.has_parameters(self._criterion) and self.use_distributed_wrapper: self._wrapped_criterion = models.DistributedFairseqModel( self.cfg.distributed_training, self._criterion, process_group=self.data_parallel_process_group, device=self.device, ) else: self._wrapped_criterion = self._criterion return self._wrapped_criterion def model(self): if self._wrapped_model is None: if self.use_distributed_wrapper: self._wrapped_model = models.DistributedFairseqModel( self.cfg.distributed_training, self._model, process_group=self.data_parallel_process_group, device=self.device, ) else: self._wrapped_model = self._model return self._wrapped_model def ema(self): if self._ema is None: self._build_ema() return self._ema def _build_ema(self): if self.cfg.ema.store_ema: self._ema = build_ema(self._model, self.cfg.ema, self.device) logger.info( "Exponential Moving Average Shadow Model is initialized." ) def optimizer(self): if self._optimizer is None: self._build_optimizer() return self._optimizer def lr_scheduler(self): if self._lr_scheduler is None: self._build_optimizer() # this will initialize self._lr_scheduler return self._lr_scheduler def _build_optimizer(self): params = list( filter( lambda p: p.requires_grad, chain(self.model.parameters(), self.criterion.parameters()), ) ) if self.is_fsdp and self.cfg.common.fp16: # FullyShardedDataParallel always uses MemoryEfficientFP16 wrapper, # mostly for the grad scaling. But if we don't have the # --memory-efficient-fp16 flag set, then we're effectively doing # regular --fp16 and can allow the use of optimizers that would # otherwise be unsupported by MemoryEfficientFP16Optimizer. allow_unsupported = not self.cfg.common.memory_efficient_fp16 self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer( self.cfg, params, allow_unsupported=allow_unsupported ) elif self.cfg.common.fp16 or self.cfg.common.bf16 or self.cfg.common.amp: if self.cuda and torch.cuda.get_device_capability(0)[0] < 7: logger.info( "NOTE: your device does NOT support faster training with --fp16 or --amp, " "please switch to FP32 which is likely to be faster" ) if ( self.cfg.common.memory_efficient_fp16 or self.cfg.common.memory_efficient_bf16 ): self._optimizer = optim.MemoryEfficientFP16Optimizer.build_optimizer( self.cfg, params ) elif self.cfg.common.amp: self._optimizer = optim.AMPOptimizer.build_optimizer(self.cfg, params) else: self._optimizer = optim.FP16Optimizer.build_optimizer(self.cfg, params) else: if self.cuda and torch.cuda.get_device_capability(0)[0] >= 7: logger.info("NOTE: your device may support faster training with --fp16 or --amp") self._optimizer = optim.build_optimizer(self.cfg.optimizer, params) if self.is_fsdp: assert ( not self.cfg.optimization.use_bmuf ), "--ddp-backend=fully_sharded is not compatible with BMUF" assert self._optimizer.supports_flat_params, ( "--ddp-backend=fully_sharded is only compatible with pointwise " "optimizers (e.g., Adam, AdamW, Adadelta, Adamax, SGD, etc.). " "However, the sharding will result in slightly different results when " "using non-pointwise optimizers (e.g., Adagrad, Adafactor, LAMB)" ) if self.cfg.optimization.use_bmuf: self._optimizer = optim.FairseqBMUF( self.cfg.bmuf, self._optimizer, ) if self.cfg.distributed_training.zero_sharding == "os": if ( self.cfg.common.fp16 and not self.cfg.common.memory_efficient_fp16 and not self.cfg.common.memory_efficient_bf16 ) and not self.cfg.common.fp16_no_flatten_grads: raise ValueError( "ZeRO is incomptabile with fp16 and flattened grads. " "Please use --fp16-no-flatten-grads" ) else: optim.shard_(self._optimizer, self.data_parallel_process_group) # We should initialize the learning rate scheduler immediately after # building the optimizer, so that the initial learning rate is set. self._lr_scheduler = lr_scheduler.build_lr_scheduler( self.cfg.lr_scheduler, self.optimizer, ) self._lr_scheduler.step_update(0) def is_fsdp(self): return self.cfg.distributed_training.ddp_backend == "fully_sharded" def consolidate_optimizer(self): """For OSS, we need to consolidate the state dict.""" if self.cfg.checkpoint.no_save_optimizer_state: return self._gathered_optim_state = None if hasattr(self.optimizer.optimizer, "consolidate_state_dict"): self.optimizer.optimizer.consolidate_state_dict() elif self.is_fsdp and not self.model.use_sharded_state: st = self.model.gather_full_optim_state_dict( self.optimizer ) # only returns on rank 0 self._gathered_optim_state = st def state_dict(self): state_dict = { "args": None, # legacy "cfg": ( OmegaConf.to_container(self.cfg, resolve=True, enum_to_str=True) if OmegaConf.is_config(self.cfg) else self.cfg ), "model": self.model.state_dict(), "criterion": ( self.criterion.state_dict() if utils.has_parameters(self.criterion) else None ), "optimizer_history": (self._optim_history or []) + [ { "criterion_name": self.get_criterion().__class__.__name__, "optimizer_name": self.optimizer.__class__.__name__, "lr_scheduler_state": self.lr_scheduler.state_dict(), "num_updates": self.get_num_updates(), } ], "task_state": self.task.state_dict() if self.task is not None else {}, "extra_state": { "metrics": metrics.state_dict(), "previous_training_time": self.cumulative_training_time(), }, } if self.cfg.ema.store_ema: # Save EMA model state as extra state state_dict["extra_state"]["ema"] = self.ema.get_model().state_dict() if self.cfg.ema.ema_fp32: # Save EMA params in fp32 state_dict["extra_state"]["ema_fp32_params"] = self.ema.fp32_params if not self.cfg.checkpoint.no_save_optimizer_state: if self._gathered_optim_state is not None: state_dict["last_optimizer_state"] = self._gathered_optim_state self._gathered_optim_state = None else: state_dict["last_optimizer_state"] = self.optimizer.state_dict() if self.is_fsdp: # save meta data for recombining checkpoint upon loading state_dict["fsdp_metadata"] = self.model.local_metadata_dict() return state_dict def save_checkpoint(self, filename, extra_state): """Save all training state in a checkpoint file.""" logger.info(f"Saving checkpoint to {filename}") # call state_dict on all ranks in case it needs internal communication state_dict = utils.move_to_cpu(self.state_dict()) state_dict["extra_state"].update(extra_state) if self.should_save_checkpoint_on_current_rank: checkpoint_utils.torch_persistent_save( state_dict, filename, async_write=self.cfg.checkpoint.write_checkpoints_asynchronously, ) logger.info(f"Finished saving checkpoint to {filename}") def load_checkpoint( self, filename, reset_optimizer=False, reset_lr_scheduler=False, optimizer_overrides=None, reset_meters=False, ): """ Load all training state from a checkpoint file. rank = 0 will load the checkpoint, and then broadcast it to all other ranks. """ extra_state, self._optim_history, last_optim_state = None, [], None logger.info(f"Preparing to load checkpoint {filename}") is_distributed = self.data_parallel_world_size > 1 bexists = PathManager.isfile(filename) if bexists: load_on_all_ranks = ( self.cfg.checkpoint.load_checkpoint_on_all_dp_ranks # TPUs don't support broadcast yet, so load checkpoints # on every worker for now or self.tpu # FSDP requires loading checkpoint shards on all ranks or (self.is_fsdp and self.cfg.distributed_training.use_sharded_state) or getattr(self.cfg.model, "base_layers", 0) > 0 ) if load_on_all_ranks or self.data_parallel_rank == 0: state = checkpoint_utils.load_checkpoint_to_cpu( filename, load_on_all_ranks=load_on_all_ranks ) last_optim_state = state.get("last_optimizer_state", None) # If doing zero_sharding, do not broadcast global optimizer # state. Later we will broadcast sharded states to each rank # to avoid memory from exploding. if ( not load_on_all_ranks and self.cfg.distributed_training.zero_sharding == "os" and "last_optimizer_state" in state and is_distributed ): state["last_optimizer_state"] = "SHARDED" else: last_optim_state = None state = None if is_distributed and not load_on_all_ranks: state = distributed_utils.broadcast_object( state, src_rank=0, group=self.data_parallel_process_group, dist_device=self.device, ) if self.data_parallel_rank > 0: last_optim_state = state.get("last_optimizer_state", None) # load model parameters try: self.model.load_state_dict( state["model"], strict=True, model_cfg=self.cfg.model ) # save memory for later steps del state["model"] if utils.has_parameters(self.get_criterion()): self.get_criterion().load_state_dict( state["criterion"], strict=True ) del state["criterion"] except Exception: raise Exception( "Cannot load model parameters from checkpoint {}; " "please ensure that the architectures match.".format(filename) ) extra_state = state["extra_state"] self._optim_history = state["optimizer_history"] if last_optim_state is not None and not reset_optimizer: # rebuild optimizer after loading model, since params may have changed self._build_optimizer() # only reload optimizer and lr_scheduler if they match last_optim = self._optim_history[-1] assert ( last_optim["criterion_name"] == self.get_criterion().__class__.__name__ ), f"Criterion does not match; please reset the optimizer (--reset-optimizer). {last_optim['criterion_name']} vs {self.get_criterion().__class__.__name__}" assert ( last_optim["optimizer_name"] == self.optimizer.__class__.__name__ ), f"Optimizer does not match; please reset the optimizer (--reset-optimizer). {last_optim['optimizer_name']} vs {self.optimizer.__class__.__name__}" if not reset_lr_scheduler: self.lr_scheduler.load_state_dict(last_optim["lr_scheduler_state"]) if self.is_fsdp and not self.model.use_sharded_state: # if use_sharded_state, the last_optim_state is already sharded, skip this last_optim_state = self.model.get_shard_from_optim_state_dict( last_optim_state ) elif not load_on_all_ranks and is_distributed: last_optim_state = self.optimizer.broadcast_global_state_dict( last_optim_state ) self.optimizer.load_state_dict(last_optim_state, optimizer_overrides) self.set_num_updates(last_optim["num_updates"]) if extra_state is not None: itr_state = extra_state["train_iterator"] if type(itr_state) == list: # assert len(itr_state) == self.data_parallel_world_size itr_state = itr_state[self.data_parallel_rank] extra_state["train_iterator"] = itr_state epoch = itr_state.get("epoch", 1) if "previous_training_time" in extra_state: self._previous_training_time = extra_state["previous_training_time"] self._start_time = time.time() self.lr_step(epoch) if ( itr_state.get("version", 1) >= 2 and itr_state.get("iterations_in_epoch", 0) == 0 ): # reset meters at start of epoch reset_meters = True if "metrics" in extra_state and not reset_meters: metrics.load_state_dict(extra_state["metrics"]) # reset TimeMeters, since their start times don't make sense anymore for meter in metrics.get_meters("default"): if isinstance(meter, meters.TimeMeter): meter.reset() if self.cfg.ema.store_ema: if "ema" not in extra_state: logger.warn( "EMA not found in checkpoint. But store_ema is True. " "EMA is re-initialized from checkpoint." ) self.ema.restore(state["model"], build_fp32_params=self.cfg.ema.ema_fp32) else: logger.info( "Loading EMA from checkpoint" ) self.ema.restore(extra_state["ema"], build_fp32_params=False) if self.cfg.ema.ema_fp32: if "ema_fp32_params" in extra_state: logger.info( "Loading EMA fp32 params from checkpoint" ) self.ema.build_fp32_params(extra_state["ema_fp32_params"]) else: logger.info( "Building EMA fp32 params from EMA model in checkpoint" ) self.ema.build_fp32_params() logger.info( "Loaded checkpoint {} (epoch {} @ {} updates)".format( filename, epoch, self.get_num_updates() ) ) else: logger.info("No existing checkpoint found {}".format(filename)) return extra_state def get_train_iterator( self, epoch, combine=True, load_dataset=True, data_selector=None, shard_batch_itr=True, disable_iterator_cache=False, ): """Return an EpochBatchIterator over the training set for a given epoch.""" if load_dataset: logger.info("loading train data for epoch {}".format(epoch)) self.task.load_dataset( self.cfg.dataset.train_subset, epoch=epoch, combine=combine, data_selector=data_selector, tpu=self.tpu, ) batch_iterator = self.task.get_batch_iterator( dataset=self.task.dataset(self.cfg.dataset.train_subset), max_tokens=self.cfg.dataset.max_tokens, max_sentences=self.cfg.dataset.batch_size, max_positions=utils.resolve_max_positions( self.task.max_positions(), self.model.max_positions(), self.cfg.dataset.max_tokens, ), ignore_invalid_inputs=True, required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple, seed=self.cfg.common.seed, num_shards=self.data_parallel_world_size if shard_batch_itr else 1, shard_id=self.data_parallel_rank if shard_batch_itr else 0, num_workers=self.cfg.dataset.num_workers, epoch=epoch, data_buffer_size=self.cfg.dataset.data_buffer_size, disable_iterator_cache=disable_iterator_cache, ) self.reset_dummy_batch(batch_iterator.first_batch) return batch_iterator def get_valid_iterator( self, subset, disable_iterator_cache=False, ): """Return an EpochBatchIterator over given validation subset for a given epoch.""" batch_iterator = self.task.get_batch_iterator( dataset=self.task.dataset(subset), max_tokens=self.cfg.dataset.max_tokens_valid, max_sentences=self.cfg.dataset.batch_size_valid, max_positions=utils.resolve_max_positions( self.task.max_positions(), self.model.max_positions(), ), ignore_invalid_inputs=self.cfg.dataset.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=self.cfg.dataset.required_batch_size_multiple, seed=self.cfg.common.seed, num_shards=self.data_parallel_world_size, shard_id=self.data_parallel_rank, num_workers=self.cfg.dataset.num_workers, # always pass a fixed "epoch" to keep validation data consistent # across training epochs epoch=1, data_buffer_size=self.cfg.dataset.data_buffer_size, disable_iterator_cache=disable_iterator_cache, ) self.reset_dummy_batch(batch_iterator.first_batch) return batch_iterator def begin_epoch(self, epoch): """Called at the beginning of each epoch.""" logger.info("begin training epoch {}".format(epoch)) self.lr_step_begin_epoch(epoch) if self.quantizer is not None: self.quantizer.begin_epoch(epoch) # task specific setup per epoch self.task.begin_epoch(epoch, self.get_model()) if self.tpu: import torch_xla.core.xla_model as xm xm.rendezvous("begin_epoch") # wait for all workers xm.mark_step() def begin_valid_epoch(self, epoch): """Called at the beginning of each validation epoch.""" # task specific setup per validation epoch self.task.begin_valid_epoch(epoch, self.get_model()) def reset_dummy_batch(self, batch): self._dummy_batch = batch def train_step(self, samples, raise_oom=False): """Do forward, backward and parameter update.""" self._set_seed() self.model.train() self.criterion.train() self.zero_grad() metrics.log_start_time("train_wall", priority=800, round=0) # If EMA is enabled through store_ema=True # and task.uses_ema is True, pass the EMA model as a keyword # argument to the task. extra_kwargs = {} if self.cfg.ema.store_ema and getattr(self.task, "uses_ema", False): extra_kwargs["ema_model"] = self.ema.get_model() # forward and backward pass logging_outputs, sample_size, ooms = [], 0, 0 for i, sample in enumerate(samples): # delayed update loop sample, is_dummy_batch = self._prepare_sample(sample) def maybe_no_sync(): """ Whenever *samples* contains more than one mini-batch, we want to accumulate gradients locally and only call all-reduce in the last backwards pass. """ if ( self.data_parallel_world_size > 1 and hasattr(self.model, "no_sync") and i < len(samples) - 1 # The no_sync context manager results in increased memory # usage with FSDP, since full-size gradients will be # accumulated on each GPU. It's typically a better tradeoff # to do the extra communication with FSDP. and not self.is_fsdp ): return self.model.no_sync() else: return contextlib.ExitStack() # dummy contextmanager try: with maybe_no_sync(): # forward and backward loss, sample_size_i, logging_output = self.task.train_step( sample=sample, model=self.model, criterion=self.criterion, optimizer=self.optimizer, update_num=self.get_num_updates(), ignore_grad=is_dummy_batch, **extra_kwargs, ) del loss logging_outputs.append(logging_output) sample_size += sample_size_i # emptying the CUDA cache after the first step can # reduce the chance of OOM if self.cuda and self.get_num_updates() == 0: torch.cuda.empty_cache() except RuntimeError as e: if "out of memory" in str(e): self._log_oom(e) if raise_oom: raise e logger.warning( "attempting to recover from OOM in forward/backward pass" ) ooms += 1 self.zero_grad() if self.cuda: torch.cuda.empty_cache() if self.cfg.distributed_training.distributed_world_size == 1: return None else: raise e if self.tpu and i < len(samples) - 1: # tpu-comment: every XLA operation before marking step is # appended to the IR graph, and processing too many batches # before marking step can lead to OOM errors. # To handle gradient accumulation use case, we explicitly # mark step here for every forward pass without a backward pass self._xla_markstep_and_send_to_cpu() if is_dummy_batch: if torch.is_tensor(sample_size): sample_size.zero_() else: sample_size *= 0.0 if torch.is_tensor(sample_size): sample_size = sample_size.float() else: sample_size = float(sample_size) # gather logging outputs from all replicas if self._sync_stats(): train_time = self._local_cumulative_training_time() logging_outputs, ( sample_size, ooms, total_train_time, ) = self._aggregate_logging_outputs( logging_outputs, sample_size, ooms, train_time, ignore=is_dummy_batch ) self._cumulative_training_time = ( total_train_time / self.data_parallel_world_size ) overflow = False try: with torch.autograd.profiler.record_function("reduce-grads"): # reduce gradients across workers self.optimizer.all_reduce_grads(self.model) if utils.has_parameters(self.criterion): self.optimizer.all_reduce_grads(self.criterion) with torch.autograd.profiler.record_function("multiply-grads"): # multiply gradients by (data_parallel_size / sample_size) since # DDP normalizes by the number of data parallel workers for # improved fp16 precision. # Thus we get (sum_of_gradients / sample_size) at the end. # In case of fp16, this step also undoes loss scaling. # (Debugging note: Some optimizers perform this scaling on the # fly, so inspecting model.parameters() or optimizer.params may # still show the original, unscaled gradients.) numer = ( self.data_parallel_world_size if not self.cfg.optimization.use_bmuf or self._sync_stats() else 1 ) self.optimizer.multiply_grads(numer / (sample_size or 1.0)) # Note: (sample_size or 1.0) handles the case of a zero gradient, in a # way that avoids CPU/device transfers in case sample_size is a GPU or # TPU object. The assumption is that the gradient itself is also 0. with torch.autograd.profiler.record_function("clip-grads"): # clip grads grad_norm = self.clip_grad_norm(self.cfg.optimization.clip_norm) # check that grad norms are consistent across workers # on tpu check tensor is slow if not self.tpu: if ( not self.cfg.optimization.use_bmuf and self.cfg.distributed_training.ddp_backend != "slow_mo" ): self._check_grad_norms(grad_norm) if not torch.isfinite(grad_norm).all(): # in case of AMP, if gradients are Nan/Inf then # optimizer step is still required if self.cfg.common.amp: overflow = True else: # check local gradnorm single GPU case, trigger NanDetector raise FloatingPointError("gradients are Nan/Inf") with torch.autograd.profiler.record_function("optimizer"): # take an optimization step self.task.optimizer_step( self.optimizer, model=self.model, update_num=self.get_num_updates() ) if self.cfg.common.amp and overflow: if self._amp_retries == self.cfg.common.amp_batch_retries: logger.info("AMP: skipping this batch.") self._amp_retries = 0 else: self._amp_retries += 1 return self.train_step(samples, raise_oom) # recursion to feed in same batch except FloatingPointError: # re-run the forward and backward pass with hooks attached to print # out where it fails self.zero_grad() with NanDetector(self.get_model()): for _, sample in enumerate(samples): sample, _ = self._prepare_sample(sample) self.task.train_step( sample, self.model, self.criterion, self.optimizer, self.get_num_updates(), ignore_grad=False, **extra_kwargs, ) raise except OverflowError as e: overflow = True logger.info( f"NOTE: gradient overflow detected, ignoring gradient, {str(e)}" ) grad_norm = torch.tensor(0.0).cuda() self.zero_grad() except RuntimeError as e: if "out of memory" in str(e): self._log_oom(e) logger.error("OOM during optimization, irrecoverable") raise e # Some distributed wrappers (e.g., SlowMo) need access to the optimizer # after the step if hasattr(self.model, "perform_additional_optimizer_actions"): if hasattr(self.optimizer, "fp32_params"): self.model.perform_additional_optimizer_actions( self.optimizer.optimizer, self.optimizer.fp32_params ) else: self.model.perform_additional_optimizer_actions( self.optimizer.optimizer ) logging_output = None if not overflow or self.cfg.distributed_training.ddp_backend == "slow_mo": self.set_num_updates(self.get_num_updates() + 1) if self.cfg.ema.store_ema: # Step EMA forward with new model. self.ema.step( self.get_model(), self.get_num_updates(), ) metrics.log_scalar( "ema_decay", self.ema.get_decay(), priority=10000, round=5, weight=0, ) if self.tpu: import torch_xla.core.xla_model as xm # mark step on TPUs self._xla_markstep_and_send_to_cpu() # only log stats every log_interval steps # this causes wps to be misreported when log_interval > 1 logging_output = {} if self.get_num_updates() % self.cfg.common.log_interval == 0: # log memory usage mem_info = xm.get_memory_info(self.device) gb_free = mem_info["kb_free"] / 1024 / 1024 gb_total = mem_info["kb_total"] / 1024 / 1024 metrics.log_scalar( "gb_free", gb_free, priority=1500, round=1, weight=0 ) metrics.log_scalar( "gb_total", gb_total, priority=1600, round=1, weight=0 ) logging_outputs = self._xla_markstep_and_send_to_cpu( logging_outputs ) logging_output = self._reduce_and_log_stats( logging_outputs, sample_size, grad_norm ) # log whenever there's an XLA compilation, since these # slow down training and may indicate opportunities for # optimization self._check_xla_compilation() else: if self.cuda and self.cuda_env is not None: # log minimum free memory over the iteration gb_used = torch.cuda.max_memory_allocated() / 1024 / 1024 / 1024 torch.cuda.reset_peak_memory_stats() gb_free = self.cuda_env.total_memory_in_GB - gb_used metrics.log_scalar( "gb_free", gb_free, priority=1500, round=1, weight=0 ) # log stats logging_output = self._reduce_and_log_stats( logging_outputs, sample_size, grad_norm ) # clear CUDA cache to reduce memory fragmentation if ( self.cuda and self.cfg.common.empty_cache_freq > 0 and ( (self.get_num_updates() + self.cfg.common.empty_cache_freq - 1) % self.cfg.common.empty_cache_freq ) == 0 ): torch.cuda.empty_cache() if self.cfg.common.fp16 or self.cfg.common.amp: metrics.log_scalar( "loss_scale", ( self.optimizer.scaler.loss_scale if self.cfg.common.fp16 else self.optimizer.scaler.get_scale() ), priority=700, round=4, weight=0, ) metrics.log_stop_time("train_wall") return logging_output def valid_step(self, sample, raise_oom=False): """Do forward pass in evaluation mode.""" if self.tpu: import torch_xla.core.xla_model as xm xm.rendezvous("valid_step") # wait for all workers # If EMA is enabled through store_ema=True # and task.uses_ema is True, pass the EMA model as a keyword # argument to the task. extra_kwargs = {} if self.cfg.ema.store_ema and getattr(self.task, "uses_ema", False): extra_kwargs["ema_model"] = self.ema.get_model() with torch.no_grad(): self.model.eval() self.criterion.eval() sample, is_dummy_batch = self._prepare_sample(sample) try: _loss, sample_size, logging_output = self.task.valid_step( sample, self.model, self.criterion, **extra_kwargs ) except RuntimeError as e: if "out of memory" in str(e): self._log_oom(e) if not raise_oom: logger.warning( "ran out of memory in validation step, retrying batch" ) for p in self.model.parameters(): if p.grad is not None: p.grad = None # free some memory if self.cuda: torch.cuda.empty_cache() return self.valid_step(sample, raise_oom=True) raise e logging_outputs = [logging_output] if is_dummy_batch: if torch.is_tensor(sample_size): sample_size.zero_() else: sample_size *= 0.0 # gather logging outputs from all replicas if self.data_parallel_world_size > 1: logging_outputs, (sample_size,) = self._aggregate_logging_outputs( logging_outputs, sample_size, ignore=is_dummy_batch, ) # log validation stats if self.tpu: logging_outputs = self._xla_markstep_and_send_to_cpu(logging_outputs) # logging_output = self._reduce_and_log_stats(logging_outputs, sample_size) return logging_outputs def zero_grad(self): self.optimizer.zero_grad() def lr_step_begin_epoch(self, epoch): """Adjust the learning rate at the beginning of the epoch.""" self.lr_scheduler.step_begin_epoch(epoch) # prefer updating the LR based on the number of steps return self.lr_step_update() def lr_step(self, epoch, val_loss=None): """Adjust the learning rate at the end of the epoch.""" self.lr_scheduler.step(epoch, val_loss) # prefer updating the LR based on the number of steps return self.lr_step_update() def lr_step_update(self): """Update the learning rate after each update.""" new_lr = self.lr_scheduler.step_update(self.get_num_updates()) if isinstance(new_lr, dict): for k, v in new_lr.items(): metrics.log_scalar(f"lr_{k}", v, weight=0, priority=300) new_lr = new_lr.get("default", next(iter(new_lr.values()))) else: metrics.log_scalar("lr", new_lr, weight=0, priority=300) return new_lr def get_lr(self): """Get the current learning rate.""" return self.optimizer.get_lr() def get_model(self): """Get the (non-wrapped) model instance.""" return self._model def get_criterion(self): """Get the (non-wrapped) criterion instance.""" return self._criterion def get_meter(self, name): """[deprecated] Get a specific meter by name.""" from fairseq import meters if "get_meter" not in self._warn_once: self._warn_once.add("get_meter") utils.deprecation_warning( "Trainer.get_meter is deprecated. Please use fairseq.metrics instead." ) train_meters = metrics.get_meters("train") if train_meters is None: train_meters = {} if name == "train_loss" and "loss" in train_meters: return train_meters["loss"] elif name == "train_nll_loss": # support for legacy train.py, which assumed this meter is # always initialized m = train_meters.get("nll_loss", None) return m or meters.AverageMeter() elif name == "wall": # support for legacy train.py, which assumed this meter is # always initialized m = metrics.get_meter("default", "wall") return m or meters.TimeMeter() elif name == "wps": m = metrics.get_meter("train", "wps") return m or meters.TimeMeter() elif name in {"valid_loss", "valid_nll_loss"}: # support for legacy train.py, which assumed these meters # are always initialized k = name[len("valid_") :] m = metrics.get_meter("valid", k) return m or meters.AverageMeter() elif name == "oom": return meters.AverageMeter() elif name in train_meters: return train_meters[name] return None def get_num_updates(self): """Get the number of parameters updates.""" return self._num_updates def set_num_updates(self, num_updates): """Set the number of parameters updates.""" self._num_updates = num_updates self.lr_step_update() if self.quantizer: self.quantizer.step_update(self._num_updates) metrics.log_scalar("num_updates", self._num_updates, weight=0, priority=200) def clip_grad_norm(self, clip_norm): def agg_norm_fn(total_norm): total_norm = total_norm.cuda().float() ** 2 total_norm = distributed_utils.all_reduce( total_norm, group=self.data_parallel_process_group ) return total_norm ** 0.5 should_agg_norm = ( self.is_fsdp and ( self.data_parallel_process_group is not None or torch.distributed.is_initialized() ) ) return self.optimizer.clip_grad_norm( clip_norm, aggregate_norm_fn=agg_norm_fn if should_agg_norm else None ) def cumulative_training_time(self): if self._cumulative_training_time is None: # single GPU return self._local_cumulative_training_time() else: return self._cumulative_training_time def _local_cumulative_training_time(self): """Aggregate training time in seconds.""" return time.time() - self._start_time + self._previous_training_time def _fp_convert_sample(self, sample): def apply_half(t): if t.dtype is torch.float32: return t.to(dtype=torch.half) return t def apply_bfloat16(t): if t.dtype is torch.float32: return t.to(dtype=torch.bfloat16) return t if self.cfg.common.fp16: sample = utils.apply_to_sample(apply_half, sample) if self.cfg.common.bf16: sample = utils.apply_to_sample(apply_bfloat16, sample) return sample def _prepare_sample(self, sample, is_dummy=False): if sample == "DUMMY": raise Exception( "Trying to use an uninitialized 'dummy' batch. This usually indicates " "that the total number of batches is smaller than the number of " "participating GPUs. Try reducing the batch size or using fewer GPUs." ) if sample is None or len(sample) == 0: assert ( self._dummy_batch is not None and len(self._dummy_batch) > 0 ), "Invalid dummy batch: {}".format(self._dummy_batch) sample, _ = self._prepare_sample(self._dummy_batch, is_dummy=True) return sample, True # Given that PCIe/NVLink bandwidth is significantly smaller than DRAM bandwidth # it makes sense to do the format conversion on the CPU and then transfer # a smaller buffer to the device. This also saves GPU memory capacity. if self.cfg.common.on_cpu_convert_precision: sample = self._fp_convert_sample(sample) if self.cuda: if self.pipeline_model_parallel: if 'target' in sample: sample['target'] = utils.move_to_cuda(sample['target'], device=self.last_device) else: sample = utils.move_to_cuda(sample) elif self.tpu and is_dummy: # the dummy batch may not be on the appropriate device sample = utils.move_to_cuda(sample, device=self.device) if not self.cfg.common.on_cpu_convert_precision: sample = self._fp_convert_sample(sample) if self._dummy_batch == "DUMMY": self._dummy_batch = sample return sample, False def _set_seed(self): # Set seed based on args.seed and the update number so that we get # reproducible results when resuming from checkpoints seed = self.cfg.common.seed + self.get_num_updates() utils.set_torch_seed(seed) def _sync_stats(self): # Return True if it's using multiple GPUs and DDP or multiple GPUs with # BMUF and it's a bmuf sync with warmup iterations completed before. if self.data_parallel_world_size == 1: return False elif self.cfg.optimization.use_bmuf: return ( self.get_num_updates() + 1 ) % self.cfg.bmuf.global_sync_iter == 0 and ( self.get_num_updates() + 1 ) > self.cfg.bmuf.warmup_iterations else: return True def _log_oom(self, exc): msg = "OOM: Ran out of memory with exception: {}".format(exc) logger.warning(msg) if torch.cuda.is_available() and hasattr(torch.cuda, "memory_summary"): for device_idx in range(torch.cuda.device_count()): logger.warning(torch.cuda.memory_summary(device=device_idx)) sys.stderr.flush() def _aggregate_logging_outputs( self, logging_outputs: List[Dict[str, Any]], *extra_stats_to_sum, ignore=False, ): if self.task.__class__.logging_outputs_can_be_summed(self.get_criterion()): return self._fast_stat_sync_sum( logging_outputs, *extra_stats_to_sum, ignore=ignore ) else: return self._all_gather_list_sync( logging_outputs, *extra_stats_to_sum, ignore=ignore ) def _all_gather_list_sync( self, logging_outputs: List[Dict[str, Any]], *extra_stats_to_sum, ignore=False, ): """ Sync logging outputs across workers. all_gather_list_sync is suitable when logging outputs are complex types. """ if self.tpu: raise NotImplementedError if ignore: logging_outputs = [] results = list( zip( *distributed_utils.all_gather_list( [logging_outputs] + list(extra_stats_to_sum), max_size=getattr(self.cfg.common, "all_gather_list_size", 16384), group=self.data_parallel_process_group, ) ) ) logging_outputs, extra_stats_to_sum = results[0], results[1:] logging_outputs = list(chain.from_iterable(logging_outputs)) extra_stats_to_sum = [sum(s) for s in extra_stats_to_sum] return logging_outputs, extra_stats_to_sum def _fast_stat_sync_sum( self, logging_outputs: List[Dict[str, Any]], *extra_stats_to_sum, ignore=False, ): """ Sync logging outputs across workers. fast_stat_sync_sum is faster than all_gather_list_sync, but is only suitable when logging outputs are scalars and can be summed. Note that *logging_outputs* cannot contain any nested dicts/lists. """ data = {} for i, stat in enumerate(extra_stats_to_sum): data["extra_stats_" + str(i)] = stat if len(logging_outputs) > 0: log_keys = list(logging_outputs[0].keys()) for k in log_keys: if not ignore: v = sum(log[k] for log in logging_outputs if k in log) else: v = logging_outputs[0][k] v = torch.zeros_like(v) if torch.is_tensor(v) else 0 data["logging_outputs_" + k] = v else: log_keys = None data = distributed_utils.all_reduce_dict( data, device=self.device, group=self.data_parallel_process_group ) extra_stats_to_sum = [ data["extra_stats_" + str(i)] for i in range(len(extra_stats_to_sum)) ] if log_keys is not None: logging_outputs = [{k: data["logging_outputs_" + k] for k in log_keys}] else: logging_outputs = [] return logging_outputs, extra_stats_to_sum def _check_grad_norms(self, grad_norm): """Check that grad norms are consistent across workers.""" if self._grad_norm_buf is not None: self._grad_norm_buf.zero_() self._grad_norm_buf[self.data_parallel_rank] = grad_norm distributed_utils.all_reduce( self._grad_norm_buf, group=self.data_parallel_process_group ) def is_consistent(tensor): max_abs_diff = torch.max(torch.abs(tensor - tensor[0])) return ( (torch.isfinite(tensor).all() and (max_abs_diff / (tensor[0] + 1e-6) < 1e-6).all()) or (self.cfg.common.amp and not torch.isfinite(tensor).all()) # in case of amp non-finite grads are fine ) if not is_consistent(self._grad_norm_buf): pretty_detail = "\n".join( "rank {:3d} = {:.8f}".format(r, n) for r, n in enumerate(self._grad_norm_buf.tolist()) ) error_detail = "grad_norm across the workers:\n{}\n".format( pretty_detail ) # use FloatingPointError to trigger NanDetector raise FloatingPointError( "Fatal error: gradients are inconsistent between workers. " "Try --ddp-backend=legacy_ddp. " "Or are you mixing up different generation of GPUs in training?" + "\n" + "-" * 80 + "\n{}\n".format(error_detail) + "-" * 80 ) def _reduce_and_log_stats(self, logging_outputs, sample_size, grad_norm=None): if grad_norm is not None and ( not torch.is_tensor(grad_norm) or torch.isfinite(grad_norm) ): metrics.log_speed("ups", 1.0, priority=100, round=2) metrics.log_scalar("gnorm", grad_norm, priority=400, round=3) if self.cfg.optimization.clip_norm > 0: metrics.log_scalar( "clip", torch.where( grad_norm > self.cfg.optimization.clip_norm, grad_norm.new_tensor(100), grad_norm.new_tensor(0), ), priority=500, round=1, ) with metrics.aggregate() as agg: if logging_outputs is not None: self.task.reduce_metrics(logging_outputs, self.get_criterion()) del logging_outputs # extra warning for criterions that don't properly log a loss value if "loss" not in agg: if "loss" not in self._warn_once: self._warn_once.add("loss") logger.warning( "Criterion.reduce_metrics did not log a 'loss' value, " "which may break some functionality" ) metrics.log_scalar("loss", -1) # support legacy interface if self.tpu: logging_output = {} else: logging_output = agg.get_smoothed_values() logging_output["sample_size"] = sample_size for key_to_delete in ["ppl", "wps", "wpb", "bsz"]: if key_to_delete in logging_output: del logging_output[key_to_delete] return logging_output def _check_xla_compilation(self): import torch_xla.debug.metrics as met compile_stats = met.metric_data("CompileTime") if compile_stats is None: return num_xla_compiles = compile_stats[0] if num_xla_compiles > self._num_xla_compiles: logger.warning( "XLA compilation detected on device #{}; too many of these can lead " "to slow training, but we expect a few in the beginning".format( self.cfg.distributed_training.distributed_rank ) ) self._num_xla_compiles = num_xla_compiles def _xla_markstep_and_send_to_cpu(self, data=None): import torch_xla.core.xla_model as xm xm.mark_step() if data is not None: from fairseq.utils import xla_device_to_cpu return xla_device_to_cpu(data) def get_valid_stats( cfg: DictConfig, trainer: Trainer, stats: Dict[str, Any] ) -> Dict[str, Any]: stats["num_updates"] = trainer.get_num_updates() if hasattr(checkpoint_utils.save_checkpoint, "best"): key = "best_{0}".format(cfg.checkpoint.best_checkpoint_metric) best_function = max if cfg.checkpoint.maximize_best_checkpoint_metric else min stats[key] = best_function( checkpoint_utils.save_checkpoint.best, stats[cfg.checkpoint.best_checkpoint_metric], ) return stats
null
182,726
import unilm import argparse import logging import math import os import sys from typing import Any, Callable, Dict, List, Optional, Tuple logger = logging.getLogger("fairseq_cli.train") import numpy as np import torch from omegaconf import DictConfig, OmegaConf from fairseq import checkpoint_utils, options, quantization_utils, tasks, utils from fairseq.data import data_utils, iterators from fairseq.data.plasma_utils import PlasmaStore from fairseq.dataclass.configs import FairseqConfig from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.distributed import fsdp_enable_wrap, fsdp_wrap from fairseq.distributed import utils as distributed_utils from fairseq.file_io import PathManager from fairseq.logging import meters, metrics, progress_bar from fairseq.model_parallel.megatron_trainer import MegatronTrainer from fairseq.trainer import Trainer def main(cfg: FairseqConfig) -> None: if isinstance(cfg, argparse.Namespace): cfg = convert_namespace_to_omegaconf(cfg) utils.import_user_module(cfg.common) if ( distributed_utils.is_master(cfg.distributed_training) and "job_logging_cfg" in cfg ): # make hydra logging work with ddp (see # see https://github.com/facebookresearch/hydra/issues/1126) logging.config.dictConfig(OmegaConf.to_container(cfg.job_logging_cfg)) assert ( cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None ), "Must specify batch size either with --max-tokens or --batch-size" metrics.reset() if cfg.common.log_file is not None: handler = logging.FileHandler(filename=cfg.common.log_file) logger.addHandler(handler) np.random.seed(cfg.common.seed) utils.set_torch_seed(cfg.common.seed) # if distributed_utils.is_master(cfg.distributed_training): # checkpoint_utils.verify_checkpoint_directory(cfg.checkpoint.save_dir) # Print args logger.info(cfg) if cfg.checkpoint.write_checkpoints_asynchronously: try: import iopath # noqa: F401 except ImportError: logging.exception( "Asynchronous checkpoint writing is specified but iopath is " "not installed: `pip install iopath`" ) return # Setup task, e.g., translation, language modeling, etc. task = tasks.setup_task(cfg.task) assert cfg.criterion, "Please specify criterion to train a model" # Build model and criterion if cfg.distributed_training.ddp_backend == "fully_sharded": with fsdp_enable_wrap(cfg.distributed_training): model = fsdp_wrap(task.build_model(cfg.model)) else: model = task.build_model(cfg.model) criterion = task.build_criterion(cfg.criterion) logger.info(model) logger.info("task: {}".format(task.__class__.__name__)) logger.info("model: {}".format(model.__class__.__name__)) logger.info("criterion: {}".format(criterion.__class__.__name__)) logger.info( "num. shared model params: {:,} (num. trained: {:,})".format( sum( p.numel() for p in model.parameters() if not getattr(p, "expert", False) ), sum( p.numel() for p in model.parameters() if not getattr(p, "expert", False) and p.requires_grad ), ) ) logger.info( "num. expert model params: {} (num. trained: {})".format( sum(p.numel() for p in model.parameters() if getattr(p, "expert", False)), sum( p.numel() for p in model.parameters() if getattr(p, "expert", False) and p.requires_grad ), ) ) # Load valid dataset (we load training data below, based on the latest checkpoint) # We load the valid dataset AFTER building the model data_utils.raise_if_valid_subsets_unintentionally_ignored(cfg) if cfg.dataset.combine_valid_subsets: task.load_dataset("valid", combine=True, epoch=1) else: for valid_sub_split in cfg.dataset.valid_subset.split(","): task.load_dataset(valid_sub_split, combine=False, epoch=1) # (optionally) Configure quantization if cfg.common.quantization_config_path is not None: quantizer = quantization_utils.Quantizer( config_path=cfg.common.quantization_config_path, max_epoch=cfg.optimization.max_epoch, max_update=cfg.optimization.max_update, ) else: quantizer = None # Build trainer if cfg.common.model_parallel_size == 1: trainer = Trainer(cfg, task, model, criterion, quantizer) else: trainer = MegatronTrainer(cfg, task, model, criterion) logger.info( "training on {} devices (GPUs/TPUs)".format( cfg.distributed_training.distributed_world_size ) ) logger.info( "max tokens per device = {} and max sentences per device = {}".format( cfg.dataset.max_tokens, cfg.dataset.batch_size, ) ) # Load the latest checkpoint if one is available and restore the # corresponding train iterator extra_state, epoch_itr = checkpoint_utils.load_checkpoint( cfg.checkpoint, trainer, # don't cache epoch iterators for sharded datasets disable_iterator_cache=task.has_sharded_data("train"), ) if cfg.common.tpu: import torch_xla.core.xla_model as xm xm.rendezvous("load_checkpoint") # wait for all workers max_epoch = cfg.optimization.max_epoch or math.inf lr = trainer.get_lr() train_meter = meters.StopwatchMeter() train_meter.start() while epoch_itr.next_epoch_idx <= max_epoch: if lr <= cfg.optimization.stop_min_lr: logger.info( f"stopping training because current learning rate ({lr}) is smaller " "than or equal to minimum learning rate " f"(--stop-min-lr={cfg.optimization.stop_min_lr})" ) break # train for one epoch valid_losses, should_stop = train(cfg, trainer, task, epoch_itr) if should_stop: break # only use first validation loss to update the learning rate lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0]) epoch_itr = trainer.get_train_iterator( epoch_itr.next_epoch_idx, # sharded data: get train iterator for next epoch load_dataset=task.has_sharded_data("train"), # don't cache epoch iterators for sharded datasets disable_iterator_cache=task.has_sharded_data("train"), ) train_meter.stop() logger.info("done training in {:.1f} seconds".format(train_meter.sum)) # ioPath implementation to wait for all asynchronous file writes to complete. if cfg.checkpoint.write_checkpoints_asynchronously: logger.info( "ioPath PathManager waiting for all asynchronous checkpoint " "writes to finish." ) PathManager.async_close() logger.info("ioPath PathManager finished waiting.") class PlasmaStore: def __init__(self, path=DEFAULT_PLASMA_PATH, nbytes: int = GB100): self.server = self.start(path, nbytes) def __del__(self): self.server.kill() def start(path=DEFAULT_PLASMA_PATH, nbytes: int = GB100) -> subprocess.Popen: if not PYARROW_AVAILABLE: raise ImportError("please run pip install pyarrow to use --use_plasma_view") # best practice is to allocate more space than we need. The limitation seems to be the size of /dev/shm _server = subprocess.Popen(["plasma_store", "-m", str(nbytes), "-s", path]) plasma.connect(path, num_retries=200) # If we can't connect we fail immediately return _server def convert_namespace_to_omegaconf(args: Namespace) -> DictConfig: """Convert a flat argparse.Namespace to a structured DictConfig.""" # Here we are using field values provided in args to override counterparts inside config object overrides, deletes = override_module_args(args) # configs will be in fairseq/config after installation config_path = os.path.join("..", "config") GlobalHydra.instance().clear() with initialize(config_path=config_path): try: composed_cfg = compose("config", overrides=overrides, strict=False) except: logger.error("Error when composing. Overrides: " + str(overrides)) raise for k in deletes: composed_cfg[k] = None cfg = OmegaConf.create( OmegaConf.to_container(composed_cfg, resolve=True, enum_to_str=True) ) # hack to be able to set Namespace in dict config. this should be removed when we update to newer # omegaconf version that supports object flags, or when we migrate all existing models from omegaconf import _utils with omegaconf_no_object_check(): if cfg.task is None and getattr(args, "task", None): cfg.task = Namespace(**vars(args)) from fairseq.tasks import TASK_REGISTRY _set_legacy_defaults(cfg.task, TASK_REGISTRY[args.task]) cfg.task._name = args.task if cfg.model is None and getattr(args, "arch", None): cfg.model = Namespace(**vars(args)) from fairseq.models import ARCH_MODEL_REGISTRY _set_legacy_defaults(cfg.model, ARCH_MODEL_REGISTRY[args.arch]) cfg.model._name = args.arch if cfg.optimizer is None and getattr(args, "optimizer", None): cfg.optimizer = Namespace(**vars(args)) from fairseq.optim import OPTIMIZER_REGISTRY _set_legacy_defaults(cfg.optimizer, OPTIMIZER_REGISTRY[args.optimizer]) cfg.optimizer._name = args.optimizer if cfg.lr_scheduler is None and getattr(args, "lr_scheduler", None): cfg.lr_scheduler = Namespace(**vars(args)) from fairseq.optim.lr_scheduler import LR_SCHEDULER_REGISTRY _set_legacy_defaults( cfg.lr_scheduler, LR_SCHEDULER_REGISTRY[args.lr_scheduler] ) cfg.lr_scheduler._name = args.lr_scheduler if cfg.criterion is None and getattr(args, "criterion", None): cfg.criterion = Namespace(**vars(args)) from fairseq.criterions import CRITERION_REGISTRY _set_legacy_defaults(cfg.criterion, CRITERION_REGISTRY[args.criterion]) cfg.criterion._name = args.criterion OmegaConf.set_struct(cfg, True) return cfg def cli_main( modify_parser: Optional[Callable[[argparse.ArgumentParser], None]] = None ) -> None: parser = options.get_training_parser() args = options.parse_args_and_arch(parser, modify_parser=modify_parser) cfg = convert_namespace_to_omegaconf(args) if cfg.common.use_plasma_view: server = PlasmaStore(path=cfg.common.plasma_path) logger.info( f"Started plasma server pid {server.server.pid} {cfg.common.plasma_path}" ) if args.profile: with torch.cuda.profiler.profile(): with torch.autograd.profiler.emit_nvtx(): distributed_utils.call_main(cfg, main) else: distributed_utils.call_main(cfg, main) # if cfg.common.use_plasma_view: # server.server.kill()
null