file_path
stringlengths
3
280
file_language
stringclasses
66 values
content
stringlengths
1
1.04M
repo_name
stringlengths
5
92
repo_stars
int64
0
154k
repo_description
stringlengths
0
402
repo_primary_language
stringclasses
108 values
developer_username
stringlengths
1
25
developer_name
stringlengths
0
30
developer_company
stringlengths
0
82
translation/fairseq/modules/linearized_convolution.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import torch import torch.nn.functional as F from fairseq import utils from .conv_tbc import ConvTBC class LinearizedConvolution(ConvTBC): """An optimized version of nn.Conv1d. At training time, this module uses ConvTBC, which is an optimized version of Conv1d. At inference time, it optimizes incremental generation (i.e., one time step at a time) by replacing the convolutions with linear layers. Note that the input order changes from training to inference. """ def __init__(self, in_channels, out_channels, kernel_size, **kwargs): super().__init__(in_channels, out_channels, kernel_size, **kwargs) self._linearized_weight = None self.register_backward_hook(self._clear_linearized_weight) def forward(self, input, incremental_state=None): """ Args: incremental_state: Used to buffer signal; if not None, then input is expected to contain a single frame. If the input order changes between time steps, call reorder_incremental_state. Input: Time x Batch x Channel during training Batch x Time x Channel during inference """ if incremental_state is None: output = super().forward(input) if self.kernel_size[0] > 1 and self.padding[0] > 0: # remove future timesteps added by padding output = output[:-self.padding[0], :, :] return output # reshape weight weight = self._get_linearized_weight() kw = self.kernel_size[0] bsz = input.size(0) # input: bsz x len x dim if kw > 1: input = input.data input_buffer = self._get_input_buffer(incremental_state) if input_buffer is None: input_buffer = input.new(bsz, kw, input.size(2)).zero_() self._set_input_buffer(incremental_state, input_buffer) else: # shift buffer input_buffer[:, :-1, :] = input_buffer[:, 1:, :].clone() # append next input input_buffer[:, -1, :] = input[:, -1, :] input = input_buffer with torch.no_grad(): output = F.linear(input.view(bsz, -1), weight, self.bias) return output.view(bsz, 1, -1) def reorder_incremental_state(self, incremental_state, new_order): input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: input_buffer = input_buffer.index_select(0, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return utils.get_incremental_state(self, incremental_state, 'input_buffer') def _set_input_buffer(self, incremental_state, new_buffer): return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer) def _get_linearized_weight(self): if self._linearized_weight is None: kw = self.kernel_size[0] weight = self.weight.transpose(2, 1).transpose(1, 0).contiguous() assert weight.size() == (self.out_channels, kw, self.in_channels) self._linearized_weight = weight.view(self.out_channels, -1) return self._linearized_weight def _clear_linearized_weight(self, *args): self._linearized_weight = None
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/modules/multihead_attention.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import torch from torch import nn from torch.nn import Parameter import torch.nn.functional as F from fairseq import utils class MultiheadAttention(nn.Module): """Multi-headed attention. See "Attention Is All You Need" for more details. """ def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.in_proj_weight = Parameter(torch.Tensor(3 * embed_dim, embed_dim)) if bias: self.in_proj_bias = Parameter(torch.Tensor(3 * embed_dim)) else: self.register_parameter('in_proj_bias', None) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) if add_bias_kv: self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim)) self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim)) else: self.bias_k = self.bias_v = None self.add_zero_attn = add_zero_attn self.reset_parameters() self.onnx_trace = False def prepare_for_onnx_export_(self): self.onnx_trace = True def reset_parameters(self): nn.init.xavier_uniform_(self.in_proj_weight) nn.init.xavier_uniform_(self.out_proj.weight) if self.in_proj_bias is not None: nn.init.constant_(self.in_proj_bias, 0.) nn.init.constant_(self.out_proj.bias, 0.) if self.bias_k is not None: nn.init.xavier_normal_(self.bias_k) if self.bias_v is not None: nn.init.xavier_normal_(self.bias_v) def forward(self, query, key, value, key_padding_mask=None, incremental_state=None, need_weights=True, static_kv=False, attn_mask=None): """Input shape: Time x Batch x Channel Self-attention can be implemented by passing in the same arguments for query, key and value. Timesteps can be masked by supplying a T x T mask in the `attn_mask` argument. Padding elements can be excluded from the key by passing a binary ByteTensor (`key_padding_mask`) with shape: batch x src_len, where padding elements are indicated by 1s. """ qkv_same = query.data_ptr() == key.data_ptr() == value.data_ptr() kv_same = key.data_ptr() == value.data_ptr() tgt_len, bsz, embed_dim = query.size() assert embed_dim == self.embed_dim assert list(query.size()) == [tgt_len, bsz, embed_dim] assert key.size() == value.size() if incremental_state is not None: saved_state = self._get_input_buffer(incremental_state) if 'prev_key' in saved_state: # previous time steps are cached - no need to recompute # key and value if they are static if static_kv: assert kv_same and not qkv_same key = value = None else: saved_state = None if qkv_same: # self-attention q, k, v = self.in_proj_qkv(query) elif kv_same: # encoder-decoder attention q = self.in_proj_q(query) if key is None: assert value is None k = v = None else: k, v = self.in_proj_kv(key) else: q = self.in_proj_q(query) k = self.in_proj_k(key) v = self.in_proj_v(value) q *= self.scaling if saved_state is not None: if 'prev_key' in saved_state: if static_kv: k = saved_state['prev_key'] else: k = torch.cat((saved_state['prev_key'], k), dim=0) if 'prev_value' in saved_state: if static_kv: v = saved_state['prev_value'] else: v = torch.cat((saved_state['prev_value'], v), dim=0) saved_state['prev_key'] = k saved_state['prev_value'] = v self._set_input_buffer(incremental_state, saved_state) if self.bias_k is not None: assert self.bias_v is not None k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)]) v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)]) if attn_mask is not None: attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1) if key_padding_mask is not None: key_padding_mask = torch.cat( [key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1)], dim=1) src_len = k.size(0) if key_padding_mask is not None: assert key_padding_mask.size(0) == bsz assert key_padding_mask.size(1) == src_len q = q.contiguous().view(tgt_len, bsz * self.num_heads, self.head_dim).transpose(0, 1) k = k.contiguous().view(src_len, bsz * self.num_heads, self.head_dim).transpose(0, 1) v = v.contiguous().view(src_len, bsz * self.num_heads, self.head_dim).transpose(0, 1) if self.add_zero_attn: src_len += 1 k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1) v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1) if attn_mask is not None: attn_mask = torch.cat([attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1) if key_padding_mask is not None: key_padding_mask = torch.cat([key_padding_mask, key_padding_mask.new_zeros(key_padding_mask.size(0), 1)], dim=1) attn_weights = torch.bmm(q, k.transpose(1, 2)) assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len] if attn_mask is not None: attn_weights += attn_mask.unsqueeze(0) if key_padding_mask is not None: # don't attend to padding symbols attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.float().masked_fill( key_padding_mask.unsqueeze(1).unsqueeze(2), float('-inf'), ).type_as(attn_weights) # FP16 support: cast to float and back attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = F.softmax(attn_weights.float(), dim=-1).type_as(attn_weights) attn_weights = F.dropout(attn_weights, p=self.dropout, training=self.training) attn = torch.bmm(attn_weights, v) assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim] attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim) attn = self.out_proj(attn) if need_weights: # average attention weights over heads attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.sum(dim=1) / self.num_heads else: attn_weights = None return attn, attn_weights def in_proj_qkv(self, query): return self._in_proj(query).chunk(3, dim=-1) def in_proj_kv(self, key): return self._in_proj(key, start=self.embed_dim).chunk(2, dim=-1) def in_proj_q(self, query): return self._in_proj(query, end=self.embed_dim) def in_proj_k(self, key): return self._in_proj(key, start=self.embed_dim, end=2 * self.embed_dim) def in_proj_v(self, value): return self._in_proj(value, start=2 * self.embed_dim) def _in_proj(self, input, start=0, end=None): weight = self.in_proj_weight bias = self.in_proj_bias weight = weight[start:end, :] if bias is not None: bias = bias[start:end] return F.linear(input, weight, bias) def reorder_incremental_state(self, incremental_state, new_order): """Reorder buffered internal state (for incremental generation).""" input_buffer = self._get_input_buffer(incremental_state) if input_buffer is not None: for k in input_buffer.keys(): input_buffer[k] = input_buffer[k].index_select(1, new_order) self._set_input_buffer(incremental_state, input_buffer) def _get_input_buffer(self, incremental_state): return utils.get_incremental_state( self, incremental_state, 'attn_state', ) or {} def _set_input_buffer(self, incremental_state, buffer): utils.set_incremental_state( self, incremental_state, 'attn_state', buffer, )
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/modules/scalar_bias.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. # import torch class ScalarBias(torch.autograd.Function): """ Adds a vector of scalars, used in self-attention mechanism to allow the model to optionally attend to this vector instead of the past """ @staticmethod def forward(ctx, input, dim, bias_init): size = list(input.size()) size[dim] += 1 output = input.new(*size).fill_(bias_init) output.narrow(dim, 1, size[dim] - 1).copy_(input) ctx.dim = dim return output @staticmethod def backward(ctx, grad): return grad.narrow(ctx.dim, 1, grad.size(ctx.dim) - 1), None, None def scalar_bias(input, dim, bias_init=0): return ScalarBias.apply(input, dim, bias_init)
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/modules/sinusoidal_positional_embedding.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import math import torch import torch.nn as nn import torch.onnx.operators from fairseq import utils class SinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length. Padding symbols are ignored, but it is necessary to specify whether padding is added on the left side (left_pad=True) or right side (left_pad=False). """ def __init__(self, embedding_dim, padding_idx, left_pad, init_size=1024): super().__init__() self.embedding_dim = embedding_dim self.padding_idx = padding_idx self.left_pad = left_pad self.weights = SinusoidalPositionalEmbedding.get_embedding( init_size, embedding_dim, padding_idx, ) self.onnx_trace = False self.register_buffer('_float_tensor', torch.FloatTensor(1)) def prepare_for_onnx_export_(self): self.onnx_trace = True @staticmethod def get_embedding(num_embeddings, embedding_dim, padding_idx=None): """Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb) emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb def forward(self, input, incremental_state=None, timestep=None): """Input is expected to be of size [bsz x seqlen].""" bsz, seq_len = torch.onnx.operators.shape_as_tensor(input) max_pos = self.padding_idx + 1 + seq_len if self.weights is None or max_pos > self.weights.size(0): # recompute/expand embeddings if needed self.weights = SinusoidalPositionalEmbedding.get_embedding( max_pos, self.embedding_dim, self.padding_idx, ) self.weights = self.weights.type_as(self._float_tensor) if incremental_state is not None: # positions is the same for every token when decoding a single step pos = (timestep.int() + 1).long() if timestep is not None else seq_len if self.onnx_trace: return self.weights[self.padding_idx + pos, :].unsqueeze(1).repeat(bsz, 1, 1) return self.weights[self.padding_idx + pos, :].expand(bsz, 1, -1) positions = utils.make_positions(input, self.padding_idx, self.left_pad, self.onnx_trace) if self.onnx_trace: flat_embeddings = self.weights.detach().index_select(0, positions.view(-1)) embedding_shape = torch.cat((bsz.view(1), seq_len.view(1), torch.LongTensor([-1]))) embeddings = torch.onnx.operators.reshape_from_tensor_shape(flat_embeddings, embedding_shape) return embeddings return self.weights.index_select(0, positions.view(-1)).view(bsz, seq_len, -1).detach() def max_positions(self): """Maximum number of supported positions.""" return int(1e5) # an arbitrary large number
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/multiprocessing_pdb.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import multiprocessing import os import pdb import sys class MultiprocessingPdb(pdb.Pdb): """A Pdb wrapper that works in a multiprocessing environment. Usage: `from fairseq import pdb; pdb.set_trace()` """ _stdin_fd = sys.stdin.fileno() _stdin = None _stdin_lock = multiprocessing.Lock() def __init__(self): pdb.Pdb.__init__(self, nosigint=True) def _cmdloop(self): stdin_bak = sys.stdin with self._stdin_lock: try: if not self._stdin: self._stdin = os.fdopen(self._stdin_fd) sys.stdin = self._stdin self.cmdloop() finally: sys.stdin = stdin_bak pdb = MultiprocessingPdb()
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/optim/__init__.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import importlib import os from .fairseq_optimizer import FairseqOptimizer from .fp16_optimizer import FP16Optimizer OPTIMIZER_REGISTRY = {} OPTIMIZER_CLASS_NAMES = set() def build_optimizer(args, params): params = list(filter(lambda p: p.requires_grad, params)) return OPTIMIZER_REGISTRY[args.optimizer](args, params) def register_optimizer(name): """Decorator to register a new optimizer.""" def register_optimizer_cls(cls): if name in OPTIMIZER_REGISTRY: raise ValueError('Cannot register duplicate optimizer ({})'.format(name)) if not issubclass(cls, FairseqOptimizer): raise ValueError('Optimizer ({}: {}) must extend FairseqOptimizer'.format(name, cls.__name__)) if cls.__name__ in OPTIMIZER_CLASS_NAMES: # We use the optimizer class name as a unique identifier in # checkpoints, so all optimizer must have unique class names. raise ValueError('Cannot register optimizer with duplicate class name ({})'.format(cls.__name__)) OPTIMIZER_REGISTRY[name] = cls OPTIMIZER_CLASS_NAMES.add(cls.__name__) return cls return register_optimizer_cls # automatically import any Python files in the optim/ directory for file in os.listdir(os.path.dirname(__file__)): if file.endswith('.py') and not file.startswith('_'): module = file[:file.find('.py')] importlib.import_module('fairseq.optim.' + module)
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/optim/adagrad.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import torch.optim from . import FairseqOptimizer, register_optimizer @register_optimizer('adagrad') class Adagrad(FairseqOptimizer): def __init__(self, args, params): super().__init__(args, params) self._optimizer = torch.optim.Adagrad(params, **self.optimizer_config) @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { 'lr': self.args.lr[0], 'weight_decay': self.args.weight_decay, }
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/optim/adam.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import math import torch import torch.optim from . import FairseqOptimizer, register_optimizer @register_optimizer('adam') class FairseqAdam(FairseqOptimizer): def __init__(self, args, params): super().__init__(args, params) self._optimizer = Adam(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" parser.add_argument('--adam-betas', default='(0.9, 0.999)', metavar='B', help='betas for Adam optimizer') parser.add_argument('--adam-eps', type=float, default=1e-8, metavar='D', help='epsilon for Adam optimizer') @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { 'lr': self.args.lr[0], 'betas': eval(self.args.adam_betas), 'eps': self.args.adam_eps, 'weight_decay': self.args.weight_decay, } class Adam(torch.optim.Optimizer): """Implements Adam algorithm. This implementation is modified from torch.optim.Adam based on: `Fixed Weight Decay Regularization in Adam` (see https://arxiv.org/abs/1711.05101) It has been proposed in `Adam: A Method for Stochastic Optimization`_. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ .. _Adam\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False): defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad) super(Adam, self).__init__(params, defaults) def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead') amsgrad = group['amsgrad'] state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p.data) if amsgrad: # Maintains max of all exp. moving avg. of sq. grad. values state['max_exp_avg_sq'] = torch.zeros_like(p.data) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] if amsgrad: max_exp_avg_sq = state['max_exp_avg_sq'] beta1, beta2 = group['betas'] state['step'] += 1 # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(1 - beta1, grad) exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) # Use the max. for normalizing running avg. of gradient denom = max_exp_avg_sq.sqrt().add_(group['eps']) else: denom = exp_avg_sq.sqrt().add_(group['eps']) bias_correction1 = 1 - beta1 ** state['step'] bias_correction2 = 1 - beta2 ** state['step'] step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1 if group['weight_decay'] != 0: p.data.add_(-group['weight_decay'] * group['lr'], p.data) p.data.addcdiv_(-step_size, exp_avg, denom) return loss
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/optim/fairseq_optimizer.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import math import torch class FairseqOptimizer(object): def __init__(self, args, params): super().__init__() self.args = args self.params = list(params) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" pass @property def optimizer(self): """Return a torch.optim.optimizer.Optimizer instance.""" if not hasattr(self, '_optimizer'): raise NotImplementedError if not isinstance(self._optimizer, torch.optim.Optimizer): raise ValueError('_optimizer must be an instance of torch.optim.Optimizer') return self._optimizer @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ raise NotImplementedError def get_lr(self): """Return the current learning rate.""" return self.optimizer.param_groups[0]['lr'] def set_lr(self, lr): """Set the learning rate.""" for param_group in self.optimizer.param_groups: param_group['lr'] = lr def state_dict(self): """Return the optimizer's state dict.""" return self.optimizer.state_dict() def load_state_dict(self, state_dict, optimizer_overrides=None): """Load an optimizer state dict. In general we should prefer the configuration of the existing optimizer instance (e.g., learning rate) over that found in the state_dict. This allows us to resume training from a checkpoint using a new set of optimizer args. """ self.optimizer.load_state_dict(state_dict) if optimizer_overrides is not None and len(optimizer_overrides) > 0: # override learning rate, momentum, etc. with latest values for group in self.optimizer.param_groups: group.update(optimizer_overrides) def backward(self, loss): loss.backward() def multiply_grads(self, c): """Multiplies grads by a constant ``c``.""" for p in self.params: p.grad.data.mul_(c) def clip_grad_norm(self, max_norm): """Clips gradient norm.""" if max_norm > 0: return torch.nn.utils.clip_grad_norm_(self.params, max_norm) else: return math.sqrt(sum(p.grad.data.norm()**2 for p in self.params)) def step(self, closure=None): """Performs a single optimization step.""" self.optimizer.step(closure) def zero_grad(self): """Clears the gradients of all optimized parameters.""" self.optimizer.zero_grad()
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/optim/fp16_optimizer.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import torch from fairseq import optim, utils class DynamicLossScaler: def __init__(self, init_scale=2.**15, scale_factor=2., scale_window=2000): self.loss_scale = init_scale self.scale_factor = scale_factor self.scale_window = scale_window self._iter = 0 self._last_overflow_iter = -1 def update_scale(self, overflow): if overflow: self.loss_scale /= self.scale_factor self._last_overflow_iter = self._iter elif (self._iter - self._last_overflow_iter) % self.scale_window == 0: self.loss_scale *= self.scale_factor self._iter += 1 @staticmethod def has_overflow(grad_norm): # detect inf and nan if grad_norm == float('inf') or grad_norm != grad_norm: return True return False class FP16Optimizer(optim.FairseqOptimizer): def __init__(self, args, params, fp32_optimizer, fp32_params): super().__init__(args, params) self.fp32_optimizer = fp32_optimizer self.fp32_params = fp32_params self.scaler = DynamicLossScaler( init_scale=args.fp16_init_scale, scale_window=(2**14 / args.distributed_world_size), ) @staticmethod def build_optimizer(args, params): # create FP32 copy of parameters and grads total_param_size = sum(p.data.numel() for p in params) fp32_params = params[0].new(0).float().new(total_param_size) offset = 0 for p in params: numel = p.data.numel() fp32_params[offset:offset+numel].copy_(p.data.view(-1)) offset += numel fp32_params = torch.nn.Parameter(fp32_params) fp32_params.grad = fp32_params.data.new(total_param_size) fp32_optimizer = optim.build_optimizer(args, [fp32_params]) return FP16Optimizer(args, params, fp32_optimizer, fp32_params) @property def optimizer(self): return self.fp32_optimizer.optimizer @property def optimizer_config(self): return self.fp32_optimizer.optimizer_config def get_lr(self): return self.fp32_optimizer.get_lr() def set_lr(self, lr): self.fp32_optimizer.set_lr(lr) def state_dict(self): """Return the optimizer's state dict.""" state_dict = self.fp32_optimizer.state_dict() state_dict['loss_scale'] = self.scaler.loss_scale return state_dict def load_state_dict(self, state_dict, optimizer_overrides=None): """Load an optimizer state dict. In general we should prefer the configuration of the existing optimizer instance (e.g., learning rate) over that found in the state_dict. This allows us to resume training from a checkpoint using a new set of optimizer args. """ if 'loss_scale' in state_dict: self.scaler.loss_scale = state_dict['loss_scale'] self.fp32_optimizer.load_state_dict(state_dict, optimizer_overrides) def backward(self, loss): loss = loss * self.scaler.loss_scale loss.backward() self._needs_sync = True def _sync_fp16_grads_to_fp32(self, multiply_grads=1.): if self._needs_sync: # copy FP16 grads to FP32 offset = 0 for p in self.params: if not p.requires_grad: continue grad_data = p.grad.data if p.grad is not None else p.data.new_zeros(p.data.shape) numel = grad_data.numel() self.fp32_params.grad.data[offset:offset+numel].copy_(grad_data.view(-1)) offset += numel # correct for dynamic loss scaler self.fp32_params.grad.data.mul_(multiply_grads / self.scaler.loss_scale) self._needs_sync = False def multiply_grads(self, c): """Multiplies grads by a constant ``c``.""" if self._needs_sync: self._sync_fp16_grads_to_fp32(c) else: self.fp32_params.grad.data.mul_(c) def clip_grad_norm(self, max_norm): """Clips gradient norm and updates dynamic loss scaler.""" self._sync_fp16_grads_to_fp32() grad_norm = utils.clip_grad_norm_(self.fp32_params.grad.data, max_norm) # detect overflow and adjust loss scale overflow = DynamicLossScaler.has_overflow(grad_norm) self.scaler.update_scale(overflow) if overflow: if self.scaler.loss_scale <= self.args.min_loss_scale: raise Exception(( 'Minimum loss scale reached ({}). Your loss is probably exploding. ' 'Try lowering the learning rate, using gradient clipping or ' 'increasing the batch size.' ).format(self.args.min_loss_scale)) raise OverflowError('setting loss scale to: ' + str(self.scaler.loss_scale)) return grad_norm def step(self, closure=None): """Performs a single optimization step.""" self._sync_fp16_grads_to_fp32() self.fp32_optimizer.step(closure) # copy FP32 params back into FP16 model offset = 0 for p in self.params: if not p.requires_grad: continue numel = p.data.numel() p.data.copy_(self.fp32_params.data[offset:offset+numel].view_as(p.data)) offset += numel def zero_grad(self): """Clears the gradients of all optimized parameters.""" self.fp32_optimizer.zero_grad() for p in self.params: if p.grad is not None: p.grad.detach_() p.grad.zero_() self._needs_sync = False
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/optim/lr_scheduler/__init__.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import importlib import os from .fairseq_lr_scheduler import FairseqLRScheduler LR_SCHEDULER_REGISTRY = {} def build_lr_scheduler(args, optimizer): return LR_SCHEDULER_REGISTRY[args.lr_scheduler](args, optimizer) def register_lr_scheduler(name): """Decorator to register a new LR scheduler.""" def register_lr_scheduler_cls(cls): if name in LR_SCHEDULER_REGISTRY: raise ValueError('Cannot register duplicate LR scheduler ({})'.format(name)) if not issubclass(cls, FairseqLRScheduler): raise ValueError('LR Scheduler ({}: {}) must extend FairseqLRScheduler'.format(name, cls.__name__)) LR_SCHEDULER_REGISTRY[name] = cls return cls return register_lr_scheduler_cls # automatically import any Python files in the optim/lr_scheduler/ directory for file in os.listdir(os.path.dirname(__file__)): if file.endswith('.py') and not file.startswith('_'): module = file[:file.find('.py')] importlib.import_module('fairseq.optim.lr_scheduler.' + module)
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/optim/lr_scheduler/cosine_lr_scheduler.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import math from . import FairseqLRScheduler, register_lr_scheduler @register_lr_scheduler('cosine') class CosineSchedule(FairseqLRScheduler): """Assign LR based on a cyclical schedule that follows the cosine function. See https://arxiv.org/pdf/1608.03983.pdf for details We also support a warmup phase where we linearly increase the learning rate from some initial learning rate (`--warmup-init-lr`) until the configured learning rate (`--lr`). During warmup: lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates) lr = lrs[update_num] After warmup: lr = lr_min + 0.5*(lr_max - lr_min)*(1 + cos(t_curr / t_i)) where t_curr is current percentage of updates within the current period range t_i is the current period range, which is scaled by t_mul after every iteration """ def __init__(self, args, optimizer): super().__init__(args, optimizer) if len(args.lr) > 1: raise ValueError( 'Cannot use a fixed learning rate schedule with cosine.' ' Consider --lr-scheduler=fixed instead.' ) warmup_end_lr = args.max_lr if args.warmup_init_lr < 0: args.warmup_init_lr = args.lr[0] self.min_lr = args.lr[0] self.max_lr = args.max_lr assert self.max_lr > self.min_lr, 'max_lr must be more than lr' self.t_mult = args.t_mult self.period = args.lr_period_updates if args.warmup_updates > 0: # linearly warmup for the first args.warmup_updates self.lr_step = (warmup_end_lr - args.warmup_init_lr) / args.warmup_updates else: self.lr_step = 1 self.warmup_updates = args.warmup_updates self.lr_shrink = args.lr_shrink # initial learning rate self.lr = args.warmup_init_lr self.optimizer.set_lr(self.lr) @staticmethod def add_args(parser): """Add arguments to the parser for this LR scheduler.""" parser.add_argument('--warmup-updates', default=0, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates') parser.add_argument('--warmup-init-lr', default=-1, type=float, metavar='LR', help='initial learning rate during warmup phase; default is args.lr') parser.add_argument('--max-lr', required=True, type=float, metavar='LR', help='max learning rate, must be more than args.lr') parser.add_argument('--t-mult', default=1, type=float, metavar='LR', help='factor to grow the length of each period') parser.add_argument('--lr-period-updates', default=5000, type=float, metavar='LR', help='initial number of updates per period') def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) # we don't change the learning rate at epoch boundaries return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" if num_updates < self.args.warmup_updates: self.lr = self.args.warmup_init_lr + num_updates * self.lr_step else: curr_updates = num_updates - self.args.warmup_updates if self.t_mult != 1: i = math.floor(math.log(1 - curr_updates / self.period * (1 - self.t_mult), self.t_mult)) t_i = self.t_mult ** i * self.period t_curr = curr_updates - (1 - self.t_mult ** i) / (1 - self.t_mult) * self.period else: i = math.floor(curr_updates / self.period) t_i = self.period t_curr = curr_updates - (self.period * i) lr_shrink = self.lr_shrink ** i min_lr = self.min_lr * lr_shrink max_lr = self.max_lr * lr_shrink self.lr = min_lr + 0.5 * (max_lr - min_lr) * (1 + math.cos(math.pi * t_curr / t_i)) self.optimizer.set_lr(self.lr) return self.lr
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/optim/lr_scheduler/fairseq_lr_scheduler.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. from .. import FairseqOptimizer class FairseqLRScheduler(object): def __init__(self, args, optimizer): super().__init__() if not isinstance(optimizer, FairseqOptimizer): raise ValueError('optimizer must be an instance of FairseqOptimizer') self.args = args self.optimizer = optimizer self.best = None @staticmethod def add_args(parser): """Add arguments to the parser for this LR scheduler.""" pass def state_dict(self): """Return the LR scheduler state dict.""" return {'best': self.best} def load_state_dict(self, state_dict): """Load an LR scheduler state dict.""" self.best = state_dict['best'] def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" if val_loss is not None: if self.best is None: self.best = val_loss else: self.best = min(self.best, val_loss) def step_update(self, num_updates): """Update the learning rate after each update.""" return self.optimizer.get_lr()
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/optim/lr_scheduler/fixed_schedule.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. from . import FairseqLRScheduler, register_lr_scheduler @register_lr_scheduler('fixed') class FixedSchedule(FairseqLRScheduler): """Decay the LR on a fixed schedule.""" def __init__(self, args, optimizer): super().__init__(args, optimizer) # set defaults args.warmup_updates = getattr(args, 'warmup_updates', 0) or 0 self.lr = args.lr[0] if args.warmup_updates > 0: self.warmup_factor = 1. / args.warmup_updates else: self.warmup_factor = 1 @staticmethod def add_args(parser): """Add arguments to the parser for this LR scheduler.""" parser.add_argument('--force-anneal', '--fa', type=int, metavar='N', help='force annealing at specified epoch') parser.add_argument('--warmup-updates', default=0, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates') def get_next_lr(self, epoch): lrs = self.args.lr if self.args.force_anneal is None or epoch < self.args.force_anneal: # use fixed LR schedule next_lr = lrs[min(epoch, len(lrs) - 1)] else: # annneal based on lr_shrink next_lr = lrs[-1] * self.args.lr_shrink ** (epoch + 1 - self.args.force_anneal) return next_lr def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) self.lr = self.get_next_lr(epoch) self.optimizer.set_lr(self.warmup_factor * self.lr) return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" if self.args.warmup_updates > 0 and num_updates <= self.args.warmup_updates: self.warmup_factor = num_updates / float(self.args.warmup_updates) self.optimizer.set_lr(self.warmup_factor * self.lr) return self.optimizer.get_lr()
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/optim/lr_scheduler/inverse_square_root_schedule.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. from . import FairseqLRScheduler, register_lr_scheduler @register_lr_scheduler('inverse_sqrt') class InverseSquareRootSchedule(FairseqLRScheduler): """Decay the LR based on the inverse square root of the update number. We also support a warmup phase where we linearly increase the learning rate from some initial learning rate (`--warmup-init-lr`) until the configured learning rate (`--lr`). Thereafter we decay proportional to the number of updates, with a decay factor set to align with the configured learning rate. During warmup: lrs = torch.linspace(args.warmup_init_lr, args.lr, args.warmup_updates) lr = lrs[update_num] After warmup: lr = decay_factor / sqrt(update_num) where decay_factor = args.lr * sqrt(args.warmup_updates) """ def __init__(self, args, optimizer): super().__init__(args, optimizer) if len(args.lr) > 1: raise ValueError( 'Cannot use a fixed learning rate schedule with inverse_sqrt.' ' Consider --lr-scheduler=fixed instead.' ) warmup_end_lr = args.lr[0] if args.warmup_init_lr < 0: args.warmup_init_lr = warmup_end_lr # linearly warmup for the first args.warmup_updates self.lr_step = (warmup_end_lr - args.warmup_init_lr) / args.warmup_updates # then, decay prop. to the inverse square root of the update number self.decay_factor = warmup_end_lr * args.warmup_updates**0.5 # initial learning rate self.lr = args.warmup_init_lr self.optimizer.set_lr(self.lr) @staticmethod def add_args(parser): """Add arguments to the parser for this LR scheduler.""" parser.add_argument('--warmup-updates', default=4000, type=int, metavar='N', help='warmup the learning rate linearly for the first N updates') parser.add_argument('--warmup-init-lr', default=-1, type=float, metavar='LR', help='initial learning rate during warmup phase; default is args.lr') def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) # we don't change the learning rate at epoch boundaries return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" if num_updates < self.args.warmup_updates: self.lr = self.args.warmup_init_lr + num_updates*self.lr_step else: self.lr = self.decay_factor * num_updates**-0.5 self.optimizer.set_lr(self.lr) return self.lr
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/optim/lr_scheduler/reduce_lr_on_plateau.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import torch.optim.lr_scheduler from . import FairseqLRScheduler, register_lr_scheduler @register_lr_scheduler('reduce_lr_on_plateau') class ReduceLROnPlateau(FairseqLRScheduler): """Decay the LR by a factor every time the validation loss plateaus.""" def __init__(self, args, optimizer): super().__init__(args, optimizer) if len(args.lr) > 1: raise ValueError( 'Cannot use a fixed learning rate schedule with reduce_lr_on_plateau.' ' Consider --lr-scheduler=fixed instead.' ) self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( self.optimizer.optimizer, patience=0, factor=args.lr_shrink) def state_dict(self): """Return the LR scheduler state dict.""" return { 'best': self.lr_scheduler.best, 'last_epoch': self.lr_scheduler.last_epoch, } def load_state_dict(self, state_dict): """Load an LR scheduler state dict.""" self.lr_scheduler.best = state_dict['best'] if 'last_epoch' in state_dict: self.lr_scheduler.last_epoch = state_dict['last_epoch'] def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" if val_loss is not None: self.lr_scheduler.step(val_loss, epoch) else: self.lr_scheduler.last_epoch = epoch return self.optimizer.get_lr()
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/optim/lr_scheduler/triangular_lr_scheduler.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import math from . import FairseqLRScheduler, register_lr_scheduler @register_lr_scheduler('triangular') class TriangularSchedule(FairseqLRScheduler): """Assign LR based on a triangular cyclical schedule. See https://arxiv.org/pdf/1506.01186.pdf for details """ def __init__(self, args, optimizer): super().__init__(args, optimizer) if len(args.lr) > 1: raise ValueError( 'Cannot use a fixed learning rate schedule with triangular.' ' Consider --lr-scheduler=fixed instead.' ) lr = args.lr[0] assert args.max_lr > lr, 'max_lr must be more than lr' self.min_lr = lr self.max_lr = args.max_lr self.stepsize = args.lr_period_updates // 2 self.lr_shrink = args.lr_shrink self.shrink_min = args.shrink_min # initial learning rate self.lr = self.min_lr self.optimizer.set_lr(self.lr) @staticmethod def add_args(parser): """Add arguments to the parser for this LR scheduler.""" parser.add_argument('--max-lr', required=True, type=float, metavar='LR', help='max learning rate, must be more than args.lr') parser.add_argument('--lr-period-updates', default=5000, type=float, metavar='LR', help='initial number of updates per period (cycle length)') parser.add_argument('--shrink-min', action='store_true', help='if set, also shrinks min lr') def step(self, epoch, val_loss=None): """Update the learning rate at the end of the given epoch.""" super().step(epoch, val_loss) # we don't change the learning rate at epoch boundaries return self.optimizer.get_lr() def step_update(self, num_updates): """Update the learning rate after each update.""" cycle = math.floor(num_updates / (2 * self.stepsize)) lr_shrink = self.lr_shrink ** cycle max_lr = self.max_lr * lr_shrink if self.shrink_min: min_lr = self.min_lr * lr_shrink else: min_lr = self.min_lr x = abs(num_updates / self.stepsize - 2 * (cycle + 1) + 1) self.lr = min_lr + (max_lr - min_lr) * max(0, (1 - x)) self.optimizer.set_lr(self.lr) return self.lr
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/optim/nag.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. from torch.optim.optimizer import Optimizer, required from . import FairseqOptimizer, register_optimizer @register_optimizer('nag') class FairseqNAG(FairseqOptimizer): def __init__(self, args, params): super().__init__(args, params) self._optimizer = NAG(params, **self.optimizer_config) @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { 'lr': self.args.lr[0], 'momentum': self.args.momentum, 'weight_decay': self.args.weight_decay, } class NAG(Optimizer): def __init__(self, params, lr=required, momentum=0, weight_decay=0): defaults = dict(lr=lr, lr_old=lr, momentum=momentum, weight_decay=weight_decay) super(NAG, self).__init__(params, defaults) def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: weight_decay = group['weight_decay'] momentum = group['momentum'] lr = group['lr'] lr_old = group.get('lr_old', lr) lr_correct = lr / lr_old for p in group['params']: if p.grad is None: continue d_p = p.grad.data param_state = self.state[p] if 'momentum_buffer' not in param_state: param_state['momentum_buffer'] = d_p.clone().zero_() buf = param_state['momentum_buffer'] if weight_decay != 0: p.data.mul_(1 - lr * weight_decay) p.data.add_(momentum * momentum * lr_correct, buf) p.data.add_(-(1 + momentum) * lr, d_p) buf.mul_(momentum * lr_correct).add_(-lr, d_p) group['lr_old'] = lr return loss
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/optim/sgd.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import torch.optim from . import FairseqOptimizer, register_optimizer @register_optimizer('sgd') class SGD(FairseqOptimizer): def __init__(self, args, params): super().__init__(args, params) self._optimizer = torch.optim.SGD(params, **self.optimizer_config) @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { 'lr': self.args.lr[0], 'momentum': self.args.momentum, 'weight_decay': self.args.weight_decay, }
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/options.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import argparse import torch from fairseq.criterions import CRITERION_REGISTRY from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_CONFIG_REGISTRY from fairseq.optim import OPTIMIZER_REGISTRY from fairseq.optim.lr_scheduler import LR_SCHEDULER_REGISTRY from fairseq.tasks import TASK_REGISTRY def get_training_parser(default_task='translation'): parser = get_parser('Trainer', default_task) add_dataset_args(parser, train=True) add_distributed_training_args(parser) add_model_args(parser) add_optimization_args(parser) add_checkpoint_args(parser) return parser def get_generation_parser(interactive=False, default_task='translation'): parser = get_parser('Generation', default_task) add_dataset_args(parser, gen=True) add_generation_args(parser) if interactive: add_interactive_args(parser) return parser def get_interactive_generation_parser(default_task='translation'): return get_generation_parser(interactive=True, default_task=default_task) def get_eval_lm_parser(default_task='language_modeling'): parser = get_parser('Evaluate Language Model', default_task) add_dataset_args(parser, gen=True) add_eval_lm_args(parser) return parser def eval_str_list(x, type=float): if x is None: return None if isinstance(x, str): x = eval(x) try: return list(map(type, x)) except TypeError: return [type(x)] def eval_bool(x, default=False): if x is None: return default try: return bool(eval(x)) except TypeError: return default def parse_args_and_arch(parser, input_args=None, parse_known=False): # The parser doesn't know about model/criterion/optimizer-specific args, so # we parse twice. First we parse the model/criterion/optimizer, then we # parse a second time after adding the *-specific arguments. # If input_args is given, we will parse those args instead of sys.argv. args, _ = parser.parse_known_args(input_args) # Add model-specific args to parser. if hasattr(args, 'arch'): model_specific_group = parser.add_argument_group( 'Model-specific configuration', # Only include attributes which are explicitly given as command-line # arguments or which have default values. argument_default=argparse.SUPPRESS, ) ARCH_MODEL_REGISTRY[args.arch].add_args(model_specific_group) # Add *-specific args to parser. if hasattr(args, 'criterion'): CRITERION_REGISTRY[args.criterion].add_args(parser) if hasattr(args, 'optimizer'): OPTIMIZER_REGISTRY[args.optimizer].add_args(parser) if hasattr(args, 'lr_scheduler'): LR_SCHEDULER_REGISTRY[args.lr_scheduler].add_args(parser) if hasattr(args, 'task'): TASK_REGISTRY[args.task].add_args(parser) # Parse a second time. if parse_known: args, extra = parser.parse_known_args(input_args) else: args = parser.parse_args(input_args) extra = None # Post-process args. if hasattr(args, 'lr'): args.lr = eval_str_list(args.lr, type=float) if hasattr(args, 'update_freq'): args.update_freq = eval_str_list(args.update_freq, type=int) if hasattr(args, 'max_sentences_valid') and args.max_sentences_valid is None: args.max_sentences_valid = args.max_sentences # Apply architecture configuration. if hasattr(args, 'arch'): ARCH_CONFIG_REGISTRY[args.arch](args) if parse_known: return args, extra else: return args def get_parser(desc, default_task='translation'): parser = argparse.ArgumentParser() parser.add_argument('--no-progress-bar', action='store_true', help='disable progress bar') parser.add_argument('--log-interval', type=int, default=1000, metavar='N', help='log progress every N batches (when progress bar is disabled)') parser.add_argument('--log-format', default=None, help='log format to use', choices=['json', 'none', 'simple', 'tqdm']) parser.add_argument('--seed', default=1, type=int, metavar='N', help='pseudo random number generator seed') parser.add_argument('--fp16', action='store_true', help='use FP16') parser.add_argument('--fp16-init-scale', default=2**7, type=int, help='default FP16 loss scale') # Task definitions can be found under fairseq/tasks/ parser.add_argument( '--task', metavar='TASK', default=default_task, choices=TASK_REGISTRY.keys(), help='task', ) return parser def add_dataset_args(parser, train=False, gen=False): group = parser.add_argument_group('Dataset and data loading') group.add_argument('--skip-invalid-size-inputs-valid-test', action='store_true', help='ignore too long or too short lines in valid and test set') group.add_argument('--max-tokens', type=int, metavar='N', help='maximum number of tokens in a batch') group.add_argument('--max-sentences', '--batch-size', type=int, metavar='N', help='maximum number of sentences in a batch') if train: group.add_argument('--train-subset', default='train', metavar='SPLIT', choices=['train', 'valid', 'test'], help='data subset to use for training (train, valid, test)') group.add_argument('--valid-subset', default='valid', metavar='SPLIT', help='comma separated list of data subsets to use for validation' ' (train, valid, valid1, test, test1)') group.add_argument('--max-sentences-valid', type=int, metavar='N', help='maximum number of sentences in a validation batch' ' (defaults to --max-sentences)') if gen: group.add_argument('--gen-subset', default='test', metavar='SPLIT', help='data subset to generate (train, valid, test)') group.add_argument('--num-shards', default=1, type=int, metavar='N', help='shard generation over N shards') group.add_argument('--shard-id', default=0, type=int, metavar='ID', help='id of the shard to generate (id < num_shards)') return group def add_distributed_training_args(parser): group = parser.add_argument_group('Distributed training') group.add_argument('--distributed-world-size', type=int, metavar='N', default=torch.cuda.device_count(), help='total number of GPUs across all nodes (default: all visible GPUs)') group.add_argument('--distributed-rank', default=0, type=int, help='rank of the current worker') group.add_argument('--distributed-backend', default='nccl', type=str, help='distributed backend') group.add_argument('--distributed-init-method', default=None, type=str, help='typically tcp://hostname:port that will be used to ' 'establish initial connetion') group.add_argument('--distributed-port', default=-1, type=int, help='port number (not required if using --distributed-init-method)') group.add_argument('--device-id', default=0, type=int, help='which GPU to use (usually configured automatically)') group.add_argument('--ddp-backend', default='c10d', type=str, choices=['c10d', 'no_c10d'], help='DistributedDataParallel backend') group.add_argument('--bucket-cap-mb', default=150, type=int, metavar='MB', help='bucket size for reduction') return group def add_optimization_args(parser): group = parser.add_argument_group('Optimization') group.add_argument('--max-epoch', '--me', default=0, type=int, metavar='N', help='force stop training at specified epoch') group.add_argument('--max-update', '--mu', default=0, type=int, metavar='N', help='force stop training at specified update') group.add_argument('--clip-norm', default=25, type=float, metavar='NORM', help='clip threshold of gradients') group.add_argument('--sentence-avg', action='store_true', help='normalize gradients by the number of sentences in a batch' ' (default is to normalize by number of tokens)') group.add_argument('--update-freq', default='1', metavar='N', help='update parameters every N_i batches, when in epoch i') # Optimizer definitions can be found under fairseq/optim/ group.add_argument('--optimizer', default='nag', metavar='OPT', choices=OPTIMIZER_REGISTRY.keys(), help='Optimizer') group.add_argument('--lr', '--learning-rate', default='0.25', metavar='LR_1,LR_2,...,LR_N', help='learning rate for the first N epochs; all epochs >N using LR_N' ' (note: this may be interpreted differently depending on --lr-scheduler)') group.add_argument('--momentum', default=0.99, type=float, metavar='M', help='momentum factor') group.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD', help='weight decay') # Learning rate schedulers can be found under fairseq/optim/lr_scheduler/ group.add_argument('--lr-scheduler', default='reduce_lr_on_plateau', choices=LR_SCHEDULER_REGISTRY.keys(), help='Learning Rate Scheduler') group.add_argument('--lr-shrink', default=0.1, type=float, metavar='LS', help='learning rate shrink factor for annealing, lr_new = (lr * lr_shrink)') group.add_argument('--min-lr', default=1e-5, type=float, metavar='LR', help='minimum learning rate') group.add_argument('--min-loss-scale', default=1e-4, type=float, metavar='D', help='minimum loss scale (for FP16 training)') return group def add_checkpoint_args(parser): group = parser.add_argument_group('Checkpointing') group.add_argument('--save-dir', metavar='DIR', default='checkpoints', help='path to save checkpoints') group.add_argument('--restore-file', default='checkpoint_last.pt', help='filename in save-dir from which to load checkpoint') group.add_argument('--reset-optimizer', action='store_true', help='if set, does not load optimizer state from the checkpoint') group.add_argument('--reset-lr-scheduler', action='store_true', help='if set, does not load lr scheduler state from the checkpoint') group.add_argument('--optimizer-overrides', default="{}", type=str, metavar='DICT', help='a dictionary used to override optimizer args when loading a checkpoint') group.add_argument('--save-interval', type=int, default=1, metavar='N', help='save a checkpoint every N epochs') group.add_argument('--save-interval-updates', type=int, default=0, metavar='N', help='save a checkpoint (and validate) every N updates') group.add_argument('--keep-interval-updates', type=int, default=-1, metavar='N', help='keep last N checkpoints saved with --save-interval-updates') group.add_argument('--no-save', action='store_true', help='don\'t save models or checkpoints') group.add_argument('--no-epoch-checkpoints', action='store_true', help='only store last and best checkpoints') group.add_argument('--validate-interval', type=int, default=1, metavar='N', help='validate every N epochs') return group def add_common_eval_args(group): group.add_argument('--path', metavar='FILE', help='path(s) to model file(s), colon separated') group.add_argument('--remove-bpe', nargs='?', const='@@ ', default=None, help='remove BPE tokens before scoring') group.add_argument('--cpu', action='store_true', help='generate on CPU') group.add_argument('--quiet', action='store_true', help='only print final scores') def add_eval_lm_args(parser): group = parser.add_argument_group('LM Evaluation') add_common_eval_args(group) group.add_argument('--output-word-probs', action='store_true', help='if set, outputs words and their predicted log probabilities to standard output') group.add_argument('--output-word-stats', action='store_true', help='if set, outputs word statistics such as word count, average probability, etc') def add_generation_args(parser): group = parser.add_argument_group('Generation') add_common_eval_args(group) group.add_argument('--beam', default=5, type=int, metavar='N', help='beam size') group.add_argument('--nbest', default=1, type=int, metavar='N', help='number of hypotheses to output') group.add_argument('--max-len-a', default=0, type=float, metavar='N', help=('generate sequences of maximum length ax + b, ' 'where x is the source length')) group.add_argument('--max-len-b', default=200, type=int, metavar='N', help=('generate sequences of maximum length ax + b, ' 'where x is the source length')) group.add_argument('--min-len', default=1, type=float, metavar='N', help=('minimum generation length')) group.add_argument('--no-early-stop', action='store_true', help=('continue searching even after finalizing k=beam ' 'hypotheses; this is more correct, but increases ' 'generation time by 50%%')) group.add_argument('--unnormalized', action='store_true', help='compare unnormalized hypothesis scores') group.add_argument('--no-beamable-mm', action='store_true', help='don\'t use BeamableMM in attention layers') group.add_argument('--lenpen', default=1, type=float, help='length penalty: <1.0 favors shorter, >1.0 favors longer sentences') group.add_argument('--unkpen', default=0, type=float, help='unknown word penalty: <0 produces more unks, >0 produces fewer') group.add_argument('--replace-unk', nargs='?', const=True, default=None, help='perform unknown replacement (optionally with alignment dictionary)') group.add_argument('--score-reference', action='store_true', help='just score the reference translation') group.add_argument('--prefix-size', default=0, type=int, metavar='PS', help='initialize generation by target prefix of given length') group.add_argument('--sampling', action='store_true', help='sample hypotheses instead of using beam search') group.add_argument('--sampling-topk', default=-1, type=int, metavar='PS', help='sample from top K likely next words instead of all words') group.add_argument('--sampling-temperature', default=1, type=float, metavar='N', help='temperature for random sampling') group.add_argument('--diverse-beam-groups', default=1, type=int, metavar='N', help='number of groups for Diverse Beam Search') group.add_argument('--diverse-beam-strength', default=0.5, type=float, metavar='N', help='strength of diversity penalty for Diverse Beam Search') group.add_argument('--print-alignment', action='store_true', help='if set, uses attention feedback to compute and print alignment to source tokens') group.add_argument('--model-overrides', default="{}", type=str, metavar='DICT', help='a dictionary used to override model args at generation that were used during model training') return group def add_interactive_args(parser): group = parser.add_argument_group('Interactive') group.add_argument('--buffer-size', default=0, type=int, metavar='N', help='read this many sentences into a buffer before processing them') def add_model_args(parser): group = parser.add_argument_group('Model configuration') # Model definitions can be found under fairseq/models/ # # The model architecture can be specified in several ways. # In increasing order of priority: # 1) model defaults (lowest priority) # 2) --arch argument # 3) --encoder/decoder-* arguments (highest priority) group.add_argument( '--arch', '-a', default='fconv', metavar='ARCH', required=True, choices=ARCH_MODEL_REGISTRY.keys(), help='Model Architecture', ) # Criterion definitions can be found under fairseq/criterions/ group.add_argument( '--criterion', default='cross_entropy', metavar='CRIT', choices=CRITERION_REGISTRY.keys(), help='Training Criterion', ) return group
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/progress_bar.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. """ Wrapper around various loggers and progress bars (e.g., tqdm). """ from collections import OrderedDict import json from numbers import Number import sys from tqdm import tqdm from fairseq.meters import AverageMeter def build_progress_bar(args, iterator, epoch=None, prefix=None, default='tqdm', no_progress_bar='none'): if args.log_format is None: args.log_format = no_progress_bar if args.no_progress_bar else default if args.log_format == 'tqdm' and not sys.stderr.isatty(): args.log_format = 'simple' if args.log_format == 'json': bar = json_progress_bar(iterator, epoch, prefix, args.log_interval) elif args.log_format == 'none': bar = noop_progress_bar(iterator, epoch, prefix) elif args.log_format == 'simple': bar = simple_progress_bar(iterator, epoch, prefix, args.log_interval) elif args.log_format == 'tqdm': bar = tqdm_progress_bar(iterator, epoch, prefix) else: raise ValueError('Unknown log format: {}'.format(args.log_format)) return bar class progress_bar(object): """Abstract class for progress bars.""" def __init__(self, iterable, epoch=None, prefix=None): self.iterable = iterable self.epoch = epoch self.prefix = '' if epoch is not None: self.prefix += '| epoch {:03d}'.format(epoch) if prefix is not None: self.prefix += ' | {}'.format(prefix) def __enter__(self): return self def __exit__(self, *exc): return False def __iter__(self): raise NotImplementedError def log(self, stats): """Log intermediate stats according to log_interval.""" raise NotImplementedError def print(self, stats): """Print end-of-epoch stats.""" raise NotImplementedError def _str_commas(self, stats): return ', '.join(key + '=' + stats[key].strip() for key in stats.keys()) def _str_pipes(self, stats): return ' | '.join(key + ' ' + stats[key].strip() for key in stats.keys()) def _format_stats(self, stats): postfix = OrderedDict(stats) # Preprocess stats according to datatype for key in postfix.keys(): # Number: limit the length of the string if isinstance(postfix[key], Number): postfix[key] = '{:g}'.format(postfix[key]) # Meter: display both current and average value elif isinstance(postfix[key], AverageMeter): postfix[key] = '{:.2f} ({:.2f})'.format( postfix[key].val, postfix[key].avg) # Else for any other type, try to get the string conversion elif not isinstance(postfix[key], str): postfix[key] = str(postfix[key]) # Else if it's a string, don't need to preprocess anything return postfix class json_progress_bar(progress_bar): """Log output in JSON format.""" def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000): super().__init__(iterable, epoch, prefix) self.log_interval = log_interval self.stats = None def __iter__(self): size = float(len(self.iterable)) for i, obj in enumerate(self.iterable): yield obj if self.stats is not None and i > 0 and \ self.log_interval is not None and i % self.log_interval == 0: update = self.epoch - 1 + float(i / size) if self.epoch is not None else None stats = self._format_stats(self.stats, epoch=self.epoch, update=update) print(json.dumps(stats), flush=True) def log(self, stats): """Log intermediate stats according to log_interval.""" self.stats = stats def print(self, stats): """Print end-of-epoch stats.""" self.stats = stats stats = self._format_stats(self.stats, epoch=self.epoch) print(json.dumps(stats), flush=True) def _format_stats(self, stats, epoch=None, update=None): postfix = OrderedDict() if epoch is not None: postfix['epoch'] = epoch if update is not None: postfix['update'] = update # Preprocess stats according to datatype for key in stats.keys(): # Meter: display both current and average value if isinstance(stats[key], AverageMeter): postfix[key] = stats[key].val postfix[key + '_avg'] = stats[key].avg else: postfix[key] = stats[key] return postfix class noop_progress_bar(progress_bar): """No logging.""" def __init__(self, iterable, epoch=None, prefix=None): super().__init__(iterable, epoch, prefix) def __iter__(self): for obj in self.iterable: yield obj def log(self, stats): """Log intermediate stats according to log_interval.""" pass def print(self, stats): """Print end-of-epoch stats.""" pass class simple_progress_bar(progress_bar): """A minimal logger for non-TTY environments.""" def __init__(self, iterable, epoch=None, prefix=None, log_interval=1000): super().__init__(iterable, epoch, prefix) self.log_interval = log_interval self.stats = None def __iter__(self): size = len(self.iterable) for i, obj in enumerate(self.iterable): yield obj if self.stats is not None and i > 0 and \ self.log_interval is not None and i % self.log_interval == 0: postfix = self._str_commas(self.stats) print('{}: {:5d} / {:d} {}'.format(self.prefix, i, size, postfix), flush=True) def log(self, stats): """Log intermediate stats according to log_interval.""" self.stats = self._format_stats(stats) def print(self, stats): """Print end-of-epoch stats.""" postfix = self._str_pipes(self._format_stats(stats)) print('{} | {}'.format(self.prefix, postfix), flush=True) class tqdm_progress_bar(progress_bar): """Log to tqdm.""" def __init__(self, iterable, epoch=None, prefix=None): super().__init__(iterable, epoch, prefix) self.tqdm = tqdm(iterable, self.prefix, leave=False) def __iter__(self): return iter(self.tqdm) def log(self, stats): """Log intermediate stats according to log_interval.""" self.tqdm.set_postfix(self._format_stats(stats), refresh=False) def print(self, stats): """Print end-of-epoch stats.""" postfix = self._str_pipes(self._format_stats(stats)) self.tqdm.write('{} | {}'.format(self.tqdm.desc, postfix))
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/search.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import torch class Search(object): def __init__(self, tgt_dict): self.pad = tgt_dict.pad() self.unk = tgt_dict.unk() self.eos = tgt_dict.eos() self.vocab_size = len(tgt_dict) self.scores_buf = None self.indices_buf = None self.beams_buf = None def _init_buffers(self, t): if self.scores_buf is None: self.scores_buf = t.new() self.indices_buf = torch.LongTensor().to(device=t.device) self.beams_buf = torch.LongTensor().to(device=t.device) def step(self, step, lprobs, scores, beam_size): """Take a single search step. Args: step: the current search step, starting at 0 lprobs: (bsz x input_beam_size x vocab_size) the model's log-probabilities over the vocabulary at the current step scores: (bsz x input_beam_size x step) the historical model scores of each hypothesis up to this point Return: A tuple of (scores, indices, beams) where: scores: (bsz x output_beam_size) the scores of the chosen elements; output_beam_size can be larger than input_beam_size, e.g., we may return 2*input_beam_size to account for EOS indices: (bsz x output_beam_size) the indices of the chosen elements beams: (bsz x output_beam_size) the hypothesis ids of the chosen elements, in the range [0, input_beam_size) """ raise NotImplementedError class BeamSearch(Search): def __init__(self, tgt_dict): super().__init__(tgt_dict) def step(self, step, lprobs, scores): super()._init_buffers(lprobs) bsz, beam_size, vocab_size = lprobs.size() if step == 0: # at the first step all hypotheses are equally likely, so use # only the first beam lprobs = lprobs[:, ::beam_size, :].contiguous() else: # make probs contain cumulative scores for each hypothesis lprobs.add_(scores[:, :, step - 1].unsqueeze(-1)) torch.topk( lprobs.view(bsz, -1), k=min( # Take the best 2 x beam_size predictions. We'll choose the first # beam_size of these which don't predict eos to continue with. beam_size * 2, lprobs.view(bsz, -1).size(1) - 1, # -1 so we never select pad ), out=(self.scores_buf, self.indices_buf), ) torch.div(self.indices_buf, vocab_size, out=self.beams_buf) self.indices_buf.fmod_(vocab_size) return self.scores_buf, self.indices_buf, self.beams_buf class DiverseBeamSearch(Search): """Diverse Beam Search. See "Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence Models" for details. We only implement the Hamming Diversity penalty here, which performed best in the original paper. """ def __init__(self, tgt_dict, num_groups, diversity_strength): super().__init__(tgt_dict) self.num_groups = num_groups self.diversity_strength = -diversity_strength self.diversity_buf = None self.beam = BeamSearch(tgt_dict) def step(self, step, lprobs, scores): super()._init_buffers(lprobs) bsz, beam_size, vocab_size = lprobs.size() if beam_size % self.num_groups != 0: raise ValueError( 'DiverseBeamSearch requires --beam to be divisible by the number of groups' ) group_size = beam_size // self.num_groups # initialize diversity penalty if self.diversity_buf is None: self.diversity_buf = lprobs.new() torch.zeros(lprobs[:, 0, :].size(), out=self.diversity_buf) scores_G, indices_G, beams_G = [], [], [] for g in range(self.num_groups): lprobs_g = lprobs[:, g::self.num_groups, :] scores_g = scores[:, g::self.num_groups, :] if step > 0 else None # apply diversity penalty if g > 0: lprobs_g = torch.add(lprobs_g, self.diversity_strength, self.diversity_buf.unsqueeze(1)) else: lprobs_g = lprobs_g.contiguous() scores_buf, indices_buf, beams_buf = self.beam.step(step, lprobs_g, scores_g) beams_buf.mul_(self.num_groups).add_(g) scores_G.append(scores_buf.clone()) indices_G.append(indices_buf.clone()) beams_G.append(beams_buf.clone()) # update diversity penalty self.diversity_buf.scatter_add_( 1, indices_buf, self.diversity_buf.new_ones(indices_buf.size()) ) # interleave results from different groups self.scores_buf = torch.stack(scores_G, dim=2, out=self.scores_buf).view(bsz, -1) self.indices_buf = torch.stack(indices_G, dim=2, out=self.indices_buf).view(bsz, -1) self.beams_buf = torch.stack(beams_G, dim=2, out=self.beams_buf).view(bsz, -1) return self.scores_buf, self.indices_buf, self.beams_buf class Sampling(Search): def __init__(self, tgt_dict, sampling_topk=-1, sampling_temperature=1.): super().__init__(tgt_dict) self.sampling_topk = sampling_topk self.sampling_temperature = sampling_temperature def step(self, step, lprobs, scores): super()._init_buffers(lprobs) bsz, beam_size, vocab_size = lprobs.size() if step == 0: # at the first step all hypotheses are equally likely, so use # only the first beam lprobs = lprobs[:, ::beam_size, :].contiguous() # we exclude the first two vocab items, one of which is pad assert self.pad == 1, 'sampling assumes the first two symbols can be ignored' lprobs_nopad = lprobs[:, :, 2:] # only sample from top-k candidates if self.sampling_topk > 0: lprobs_nopad, topk_indices = lprobs_nopad.topk(self.sampling_topk) # sampling temperature if self.sampling_temperature != 1.: lprobs_nopad = lprobs_nopad.div_(self.sampling_temperature) # sample probs_nopad = lprobs_nopad.exp_() if step == 0: self.indices_buf = torch.multinomial( probs_nopad.view(bsz, -1), beam_size, replacement=True, out=self.indices_buf, ).view(bsz, beam_size) else: self.indices_buf = torch.multinomial( probs_nopad.view(bsz * beam_size, -1), 1, replacement=True, out=self.indices_buf, ).view(bsz, beam_size) if step == 0: # expand to beam size probs_nopad = probs_nopad.expand(bsz, beam_size, -1) # gather scores torch.gather( probs_nopad, dim=2, index=self.indices_buf.unsqueeze(-1), out=self.scores_buf, ) self.scores_buf = self.scores_buf.log_().view(bsz, -1) # remap indices if using top-k sampling if self.sampling_topk > 0: self.indices_buf = torch.gather( topk_indices.expand(bsz, beam_size, -1), dim=2, index=self.indices_buf.unsqueeze(-1), ).squeeze(2) # remap indices since we excluded the first two vocab items self.indices_buf.add_(2) if step == 0: self.beams_buf = self.indices_buf.new_zeros(bsz, beam_size) else: self.beams_buf = torch.arange(0, beam_size, out=self.beams_buf).repeat(bsz, 1) # make scores cumulative self.scores_buf.add_( torch.gather( scores[:, :, step - 1], dim=1, index=self.beams_buf, ) ) return self.scores_buf, self.indices_buf, self.beams_buf
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/sequence_generator.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import math import torch from fairseq import search, utils from fairseq.models import FairseqIncrementalDecoder class SequenceGenerator(object): def __init__( self, models, tgt_dict, beam_size=1, minlen=1, maxlen=None, stop_early=True, normalize_scores=True, len_penalty=1, unk_penalty=0, retain_dropout=False, sampling=False, sampling_topk=-1, sampling_temperature=1, diverse_beam_groups=-1, diverse_beam_strength=0.5, ): """Generates translations of a given source sentence. Args: min/maxlen: The length of the generated output will be bounded by minlen and maxlen (not including the end-of-sentence marker). stop_early: Stop generation immediately after we finalize beam_size hypotheses, even though longer hypotheses might have better normalized scores. normalize_scores: Normalize scores by the length of the output. """ self.models = models self.pad = tgt_dict.pad() self.unk = tgt_dict.unk() self.eos = tgt_dict.eos() self.vocab_size = len(tgt_dict) self.beam_size = beam_size self.minlen = minlen max_decoder_len = min(m.max_decoder_positions() for m in self.models) max_decoder_len -= 1 # we define maxlen not including the EOS marker self.maxlen = max_decoder_len if maxlen is None else min(maxlen, max_decoder_len) self.stop_early = stop_early self.normalize_scores = normalize_scores self.len_penalty = len_penalty self.unk_penalty = unk_penalty self.retain_dropout = retain_dropout assert sampling_topk < 0 or sampling, '--sampling-topk requires --sampling' if sampling: self.search = search.Sampling(tgt_dict, sampling_topk, sampling_temperature) elif diverse_beam_groups > 0: self.search = search.DiverseBeamSearch(tgt_dict, diverse_beam_groups, diverse_beam_strength) else: self.search = search.BeamSearch(tgt_dict) def cuda(self): for model in self.models: model.cuda() return self def generate_batched_itr( self, data_itr, beam_size=None, maxlen_a=0.0, maxlen_b=None, cuda=False, timer=None, prefix_size=0, ): """Iterate over a batched dataset and yield individual translations. Args: maxlen_a/b: generate sequences of maximum length ax + b, where x is the source sentence length. cuda: use GPU for generation timer: StopwatchMeter for timing generations. """ if maxlen_b is None: maxlen_b = self.maxlen for sample in data_itr: s = utils.move_to_cuda(sample) if cuda else sample if 'net_input' not in s: continue input = s['net_input'] # model.forward normally channels prev_output_tokens into the decoder # separately, but SequenceGenerator directly calls model.encoder encoder_input = { k: v for k, v in input.items() if k != 'prev_output_tokens' } srclen = encoder_input['src_tokens'].size(1) if timer is not None: timer.start() with torch.no_grad(): hypos = self.generate( encoder_input, beam_size=beam_size, maxlen=int(maxlen_a*srclen + maxlen_b), prefix_tokens=s['target'][:, :prefix_size] if prefix_size > 0 else None, ) if timer is not None: timer.stop(sum(len(h[0]['tokens']) for h in hypos)) for i, id in enumerate(s['id'].data): # remove padding src = utils.strip_pad(input['src_tokens'].data[i, :], self.pad) ref = utils.strip_pad(s['target'].data[i, :], self.pad) if s['target'] is not None else None yield id, src, ref, hypos[i] def generate(self, encoder_input, beam_size=None, maxlen=None, prefix_tokens=None): """Generate a batch of translations. Args: encoder_input: dictionary containing the inputs to model.encoder.forward beam_size: int overriding the beam size. defaults to self.beam_size max_len: maximum length of the generated sequence prefix_tokens: force decoder to begin with these tokens """ with torch.no_grad(): return self._generate(encoder_input, beam_size, maxlen, prefix_tokens) def _generate(self, encoder_input, beam_size=None, maxlen=None, prefix_tokens=None): """See generate""" src_tokens = encoder_input['src_tokens'] bsz, srclen = src_tokens.size() maxlen = min(maxlen, self.maxlen) if maxlen is not None else self.maxlen # the max beam size is the dictionary size - 1, since we never select pad beam_size = beam_size if beam_size is not None else self.beam_size beam_size = min(beam_size, self.vocab_size - 1) encoder_outs = [] incremental_states = {} for model in self.models: if not self.retain_dropout: model.eval() if isinstance(model.decoder, FairseqIncrementalDecoder): incremental_states[model] = {} else: incremental_states[model] = None # compute the encoder output for each beam encoder_out = model.encoder(**encoder_input) new_order = torch.arange(bsz).view(-1, 1).repeat(1, beam_size).view(-1) new_order = new_order.to(src_tokens.device) encoder_out = model.encoder.reorder_encoder_out(encoder_out, new_order) encoder_outs.append(encoder_out) # initialize buffers scores = src_tokens.data.new(bsz * beam_size, maxlen + 1).float().fill_(0) scores_buf = scores.clone() tokens = src_tokens.data.new(bsz * beam_size, maxlen + 2).fill_(self.pad) tokens_buf = tokens.clone() tokens[:, 0] = self.eos attn, attn_buf = None, None nonpad_idxs = None # list of completed sentences finalized = [[] for i in range(bsz)] finished = [False for i in range(bsz)] worst_finalized = [{'idx': None, 'score': -math.inf} for i in range(bsz)] num_remaining_sent = bsz # number of candidate hypos per step cand_size = 2 * beam_size # 2 x beam size in case half are EOS # offset arrays for converting between different indexing schemes bbsz_offsets = (torch.arange(0, bsz) * beam_size).unsqueeze(1).type_as(tokens) cand_offsets = torch.arange(0, cand_size).type_as(tokens) # helper function for allocating buffers on the fly buffers = {} def buffer(name, type_of=tokens): # noqa if name not in buffers: buffers[name] = type_of.new() return buffers[name] def is_finished(sent, step, unfinalized_scores=None): """ Check whether we've finished generation for a given sentence, by comparing the worst score among finalized hypotheses to the best possible score among unfinalized hypotheses. """ assert len(finalized[sent]) <= beam_size if len(finalized[sent]) == beam_size: if self.stop_early or step == maxlen or unfinalized_scores is None: return True # stop if the best unfinalized score is worse than the worst # finalized one best_unfinalized_score = unfinalized_scores[sent].max() if self.normalize_scores: best_unfinalized_score /= maxlen ** self.len_penalty if worst_finalized[sent]['score'] >= best_unfinalized_score: return True return False def finalize_hypos(step, bbsz_idx, eos_scores, unfinalized_scores=None): """ Finalize the given hypotheses at this step, while keeping the total number of finalized hypotheses per sentence <= beam_size. Note: the input must be in the desired finalization order, so that hypotheses that appear earlier in the input are preferred to those that appear later. Args: step: current time step bbsz_idx: A vector of indices in the range [0, bsz*beam_size), indicating which hypotheses to finalize eos_scores: A vector of the same size as bbsz_idx containing scores for each hypothesis unfinalized_scores: A vector containing scores for all unfinalized hypotheses """ assert bbsz_idx.numel() == eos_scores.numel() # clone relevant token and attention tensors tokens_clone = tokens.index_select(0, bbsz_idx) tokens_clone = tokens_clone[:, 1:step + 2] # skip the first index, which is EOS tokens_clone[:, step] = self.eos attn_clone = attn.index_select(0, bbsz_idx)[:, :, 1:step+2] if attn is not None else None # compute scores per token position pos_scores = scores.index_select(0, bbsz_idx)[:, :step+1] pos_scores[:, step] = eos_scores # convert from cumulative to per-position scores pos_scores[:, 1:] = pos_scores[:, 1:] - pos_scores[:, :-1] # normalize sentence-level scores if self.normalize_scores: eos_scores /= (step + 1) ** self.len_penalty cum_unfin = [] prev = 0 for f in finished: if f: prev += 1 else: cum_unfin.append(prev) sents_seen = set() for i, (idx, score) in enumerate(zip(bbsz_idx.tolist(), eos_scores.tolist())): unfin_idx = idx // beam_size sent = unfin_idx + cum_unfin[unfin_idx] sents_seen.add((sent, unfin_idx)) def get_hypo(): if attn_clone is not None: # remove padding tokens from attn scores hypo_attn = attn_clone[i][nonpad_idxs[sent]] _, alignment = hypo_attn.max(dim=0) else: hypo_attn = None alignment = None return { 'tokens': tokens_clone[i], 'score': score, 'attention': hypo_attn, # src_len x tgt_len 'alignment': alignment, 'positional_scores': pos_scores[i], } if len(finalized[sent]) < beam_size: finalized[sent].append(get_hypo()) elif not self.stop_early and score > worst_finalized[sent]['score']: # replace worst hypo for this sentence with new/better one worst_idx = worst_finalized[sent]['idx'] if worst_idx is not None: finalized[sent][worst_idx] = get_hypo() # find new worst finalized hypo for this sentence idx, s = min(enumerate(finalized[sent]), key=lambda r: r[1]['score']) worst_finalized[sent] = { 'score': s['score'], 'idx': idx, } newly_finished = [] for sent, unfin_idx in sents_seen: # check termination conditions for this sentence if not finished[sent] and is_finished(sent, step, unfinalized_scores): finished[sent] = True newly_finished.append(unfin_idx) return newly_finished reorder_state = None batch_idxs = None for step in range(maxlen + 1): # one extra step for EOS marker # reorder decoder internal states based on the prev choice of beams if reorder_state is not None: if batch_idxs is not None: # update beam indices to take into account removed sentences corr = batch_idxs - torch.arange(batch_idxs.numel()).type_as(batch_idxs) reorder_state.view(-1, beam_size).add_(corr.unsqueeze(-1) * beam_size) for i, model in enumerate(self.models): if isinstance(model.decoder, FairseqIncrementalDecoder): model.decoder.reorder_incremental_state(incremental_states[model], reorder_state) encoder_outs[i] = model.encoder.reorder_encoder_out(encoder_outs[i], reorder_state) lprobs, avg_attn_scores = self._decode(tokens[:, :step + 1], encoder_outs, incremental_states) lprobs[:, self.pad] = -math.inf # never select pad lprobs[:, self.unk] -= self.unk_penalty # apply unk penalty # Record attention scores if avg_attn_scores is not None: if attn is None: attn = scores.new(bsz * beam_size, src_tokens.size(1), maxlen + 2) attn_buf = attn.clone() nonpad_idxs = src_tokens.ne(self.pad) attn[:, :, step + 1].copy_(avg_attn_scores) scores = scores.type_as(lprobs) scores_buf = scores_buf.type_as(lprobs) eos_bbsz_idx = buffer('eos_bbsz_idx') eos_scores = buffer('eos_scores', type_of=scores) if step < maxlen: if prefix_tokens is not None and step < prefix_tokens.size(1): probs_slice = lprobs.view(bsz, -1, lprobs.size(-1))[:, 0, :] cand_scores = torch.gather( probs_slice, dim=1, index=prefix_tokens[:, step].view(-1, 1).data ).expand(-1, cand_size) cand_indices = prefix_tokens[:, step].view(-1, 1).expand(bsz, cand_size).data cand_beams = torch.zeros_like(cand_indices) else: cand_scores, cand_indices, cand_beams = self.search.step( step, lprobs.view(bsz, -1, self.vocab_size), scores.view(bsz, beam_size, -1)[:, :, :step], ) else: # make probs contain cumulative scores for each hypothesis lprobs.add_(scores[:, step - 1].unsqueeze(-1)) # finalize all active hypotheses once we hit maxlen # pick the hypothesis with the highest prob of EOS right now torch.sort( lprobs[:, self.eos], descending=True, out=(eos_scores, eos_bbsz_idx), ) num_remaining_sent -= len(finalize_hypos( step, eos_bbsz_idx, eos_scores)) assert num_remaining_sent == 0 break # cand_bbsz_idx contains beam indices for the top candidate # hypotheses, with a range of values: [0, bsz*beam_size), # and dimensions: [bsz, cand_size] cand_bbsz_idx = cand_beams.add(bbsz_offsets) # finalize hypotheses that end in eos eos_mask = cand_indices.eq(self.eos) finalized_sents = set() if step >= self.minlen: # only consider eos when it's among the top beam_size indices torch.masked_select( cand_bbsz_idx[:, :beam_size], mask=eos_mask[:, :beam_size], out=eos_bbsz_idx, ) if eos_bbsz_idx.numel() > 0: torch.masked_select( cand_scores[:, :beam_size], mask=eos_mask[:, :beam_size], out=eos_scores, ) finalized_sents = finalize_hypos( step, eos_bbsz_idx, eos_scores, cand_scores) num_remaining_sent -= len(finalized_sents) assert num_remaining_sent >= 0 if num_remaining_sent == 0: break assert step < maxlen if len(finalized_sents) > 0: new_bsz = bsz - len(finalized_sents) # construct batch_idxs which holds indices of batches to keep for the next pass batch_mask = cand_indices.new_ones(bsz) batch_mask[cand_indices.new(finalized_sents)] = 0 batch_idxs = batch_mask.nonzero().squeeze(-1) eos_mask = eos_mask[batch_idxs] cand_beams = cand_beams[batch_idxs] bbsz_offsets.resize_(new_bsz, 1) cand_bbsz_idx = cand_beams.add(bbsz_offsets) cand_scores = cand_scores[batch_idxs] cand_indices = cand_indices[batch_idxs] if prefix_tokens is not None: prefix_tokens = prefix_tokens[batch_idxs] scores = scores.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) scores_buf.resize_as_(scores) tokens = tokens.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, -1) tokens_buf.resize_as_(tokens) if attn is not None: attn = attn.view(bsz, -1)[batch_idxs].view(new_bsz * beam_size, attn.size(1), -1) attn_buf.resize_as_(attn) bsz = new_bsz else: batch_idxs = None # set active_mask so that values > cand_size indicate eos hypos # and values < cand_size indicate candidate active hypos. # After, the min values per row are the top candidate active hypos active_mask = buffer('active_mask') torch.add( eos_mask.type_as(cand_offsets) * cand_size, cand_offsets[:eos_mask.size(1)], out=active_mask, ) # get the top beam_size active hypotheses, which are just the hypos # with the smallest values in active_mask active_hypos, _ignore = buffer('active_hypos'), buffer('_ignore') torch.topk( active_mask, k=beam_size, dim=1, largest=False, out=(_ignore, active_hypos) ) active_bbsz_idx = buffer('active_bbsz_idx') torch.gather( cand_bbsz_idx, dim=1, index=active_hypos, out=active_bbsz_idx, ) active_scores = torch.gather( cand_scores, dim=1, index=active_hypos, out=scores[:, step].view(bsz, beam_size), ) active_bbsz_idx = active_bbsz_idx.view(-1) active_scores = active_scores.view(-1) # copy tokens and scores for active hypotheses torch.index_select( tokens[:, :step + 1], dim=0, index=active_bbsz_idx, out=tokens_buf[:, :step + 1], ) torch.gather( cand_indices, dim=1, index=active_hypos, out=tokens_buf.view(bsz, beam_size, -1)[:, :, step + 1], ) if step > 0: torch.index_select( scores[:, :step], dim=0, index=active_bbsz_idx, out=scores_buf[:, :step], ) torch.gather( cand_scores, dim=1, index=active_hypos, out=scores_buf.view(bsz, beam_size, -1)[:, :, step], ) # copy attention for active hypotheses if attn is not None: torch.index_select( attn[:, :, :step + 2], dim=0, index=active_bbsz_idx, out=attn_buf[:, :, :step + 2], ) # swap buffers tokens, tokens_buf = tokens_buf, tokens scores, scores_buf = scores_buf, scores if attn is not None: attn, attn_buf = attn_buf, attn # reorder incremental state in decoder reorder_state = active_bbsz_idx # sort by score descending for sent in range(len(finalized)): finalized[sent] = sorted(finalized[sent], key=lambda r: r['score'], reverse=True) return finalized def _decode(self, tokens, encoder_outs, incremental_states): if len(self.models) == 1: return self._decode_one(tokens, self.models[0], encoder_outs[0], incremental_states, log_probs=True) avg_probs = None avg_attn = None for model, encoder_out in zip(self.models, encoder_outs): probs, attn = self._decode_one(tokens, model, encoder_out, incremental_states, log_probs=False) if avg_probs is None: avg_probs = probs else: avg_probs.add_(probs) if attn is not None: if avg_attn is None: avg_attn = attn else: avg_attn.add_(attn) avg_probs.div_(len(self.models)) avg_probs.log_() if avg_attn is not None: avg_attn.div_(len(self.models)) return avg_probs, avg_attn def _decode_one(self, tokens, model, encoder_out, incremental_states, log_probs): with torch.no_grad(): if incremental_states[model] is not None: decoder_out = list(model.decoder(tokens, encoder_out, incremental_state=incremental_states[model])) else: decoder_out = list(model.decoder(tokens, encoder_out)) decoder_out[0] = decoder_out[0][:, -1, :] attn = decoder_out[1] if type(attn) is dict: attn = attn['attn'] if attn is not None: if type(attn) is dict: attn = attn['attn'] attn = attn[:, -1, :] probs = model.get_normalized_probs(decoder_out, log_probs=log_probs) return probs, attn
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/sequence_scorer.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import torch from fairseq import utils class SequenceScorer(object): """Scores the target for a given source sentence.""" def __init__(self, models, tgt_dict): self.models = models self.pad = tgt_dict.pad() def cuda(self): for model in self.models: model.cuda() return self def score_batched_itr(self, data_itr, cuda=False, timer=None): """Iterate over a batched dataset and yield scored translations.""" for sample in data_itr: s = utils.move_to_cuda(sample) if cuda else sample if timer is not None: timer.start() pos_scores, attn = self.score(s) for i, id in enumerate(s['id'].data): # remove padding from ref src = utils.strip_pad(s['net_input']['src_tokens'].data[i, :], self.pad) ref = utils.strip_pad(s['target'].data[i, :], self.pad) if s['target'] is not None else None tgt_len = ref.numel() pos_scores_i = pos_scores[i][:tgt_len] score_i = pos_scores_i.sum() / tgt_len if attn is not None: attn_i = attn[i] _, alignment = attn_i.max(dim=0) else: attn_i = alignment = None hypos = [{ 'tokens': ref, 'score': score_i, 'attention': attn_i, 'alignment': alignment, 'positional_scores': pos_scores_i, }] if timer is not None: timer.stop(s['ntokens']) # return results in the same format as SequenceGenerator yield id, src, ref, hypos def score(self, sample): """Score a batch of translations.""" net_input = sample['net_input'] # compute scores for each model in the ensemble avg_probs = None avg_attn = None for model in self.models: with torch.no_grad(): model.eval() decoder_out = model.forward(**net_input) attn = decoder_out[1] probs = model.get_normalized_probs(decoder_out, log_probs=False, sample=sample).data if avg_probs is None: avg_probs = probs else: avg_probs.add_(probs) if attn is not None: attn = attn.data if avg_attn is None: avg_attn = attn else: avg_attn.add_(attn) avg_probs.div_(len(self.models)) avg_probs.log_() if avg_attn is not None: avg_attn.div_(len(self.models)) avg_probs = avg_probs.gather( dim=2, index=sample['target'].data.unsqueeze(-1), ) return avg_probs.squeeze(2), avg_attn
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/tasks/__init__.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import argparse import importlib import os from .fairseq_task import FairseqTask TASK_REGISTRY = {} TASK_CLASS_NAMES = set() def setup_task(args): return TASK_REGISTRY[args.task].setup_task(args) def register_task(name): """ New tasks can be added to fairseq with the :func:`~fairseq.tasks.register_task` function decorator. For example:: @register_task('classification') class ClassificationTask(FairseqTask): (...) .. note:: All Tasks must implement the :class:`~fairseq.tasks.FairseqTask` interface. Please see the Args: name (str): the name of the task """ def register_task_cls(cls): if name in TASK_REGISTRY: raise ValueError('Cannot register duplicate task ({})'.format(name)) if not issubclass(cls, FairseqTask): raise ValueError('Task ({}: {}) must extend FairseqTask'.format(name, cls.__name__)) if cls.__name__ in TASK_CLASS_NAMES: raise ValueError('Cannot register task with duplicate class name ({})'.format(cls.__name__)) TASK_REGISTRY[name] = cls TASK_CLASS_NAMES.add(cls.__name__) return cls return register_task_cls # automatically import any Python files in the tasks/ directory for file in os.listdir(os.path.dirname(__file__)): if file.endswith('.py') and not file.startswith('_'): task_name = file[:file.find('.py')] importlib.import_module('fairseq.tasks.' + task_name) # expose `task_parser` for sphinx if task_name in TASK_REGISTRY: parser = argparse.ArgumentParser(add_help=False) group_task = parser.add_argument_group('Task name') group_task.add_argument( '--task', metavar=task_name, help='Enable this task with: ``--task=' + task_name + '``' ) group_args = parser.add_argument_group('Additional command-line arguments') TASK_REGISTRY[task_name].add_args(group_args) globals()[task_name + '_parser'] = parser
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/tasks/fairseq_task.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. from fairseq.data import data_utils, FairseqDataset, iterators class FairseqTask(object): """ Tasks store dictionaries and provide helpers for loading/iterating over Datasets, initializing the Model/Criterion and calculating the loss. """ @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" pass def __init__(self, args): self.args = args self.datasets = {} @classmethod def setup_task(cls, args, **kwargs): """Setup the task (e.g., load dictionaries). Args: args (argparse.Namespace): parsed command-line arguments """ return cls(args) def load_dataset(self, split, combine=False): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ raise NotImplementedError def dataset(self, split): """ Return a loaded dataset split. Args: split (str): name of the split (e.g., train, valid, test) Returns: a :class:`~fairseq.data.FairseqDataset` corresponding to *split* """ from fairseq.data import FairseqDataset if split not in self.datasets: raise KeyError('Dataset not loaded: ' + split) if not isinstance(self.datasets[split], FairseqDataset): raise TypeError('Datasets are expected to be of type FairseqDataset') return self.datasets[split] def get_batch_iterator( self, dataset, max_tokens=None, max_sentences=None, max_positions=None, ignore_invalid_inputs=False, required_batch_size_multiple=1, seed=1, num_shards=1, shard_id=0, ): """ Get an iterator that yields batches of data from the given dataset. Args: dataset (~fairseq.data.FairseqDataset): dataset to batch max_tokens (int, optional): max number of tokens in each batch. Default: ``None`` max_sentences (int, optional): max number of sentences in each batch. Default: ``None`` max_positions (optional): max sentence length supported by the model. Default: ``None`` ignore_invalid_inputs (bool, optional): don't raise Exception for sentences that are too long. Default: ``False`` required_batch_size_multiple (int, optional): require batch size to be a multiple of N. Default: ``1`` seed (int, optional): seed for random number generator for reproducibility. Default: ``1`` num_shards (int, optional): shard the data iterator into N shards. Default: ``1`` shard_id (int, optional): which shard of the data iterator to return. Default: ``0`` Returns: ~fairseq.iterators.EpochBatchIterator: a batched iterator over the given dataset split """ assert isinstance(dataset, FairseqDataset) # get indices ordered by example size with data_utils.numpy_seed(seed): indices = dataset.ordered_indices() # filter examples that are too large indices = data_utils.filter_by_size( indices, dataset.size, max_positions, raise_exception=(not ignore_invalid_inputs), ) # create mini-batches with given size constraints batch_sampler = data_utils.batch_by_size( indices, dataset.num_tokens, max_tokens=max_tokens, max_sentences=max_sentences, required_batch_size_multiple=required_batch_size_multiple, ) # return a reusable, sharded iterator return iterators.EpochBatchIterator( dataset=dataset, collate_fn=dataset.collater, batch_sampler=batch_sampler, seed=seed, num_shards=num_shards, shard_id=shard_id, ) def build_model(self, args): """ Build the :class:`~fairseq.models.BaseFairseqModel` instance for this task. Args: args (argparse.Namespace): parsed command-line arguments Returns: a :class:`~fairseq.models.BaseFairseqModel` instance """ from fairseq import models return models.build_model(args, self) def build_criterion(self, args): """ Build the :class:`~fairseq.criterions.FairseqCriterion` instance for this task. Args: args (argparse.Namespace): parsed command-line arguments Returns: a :class:`~fairseq.criterions.FairseqCriterion` instance """ from fairseq import criterions return criterions.build_criterion(args, self) def get_loss(self, model, criterion, sample): """ Return the loss as computed by *criterion* for the given *model* and *sample*. Args: model (~fairseq.models.BaseFairseqModel): the model criterion (~fairseq.criterions.FairseqCriterion): the criterion sample (dict): the mini-batch. The format is defined by the :class:`~fairseq.data.FairseqDataset`. """ return criterion(model, sample) def max_positions(self): """Return the max input length allowed by the task.""" return None @property def source_dictionary(self): """Return the source :class:`~fairseq.data.Dictionary` (if applicable for this task).""" raise NotImplementedError @property def target_dictionary(self): """Return the target :class:`~fairseq.data.Dictionary` (if applicable for this task).""" raise NotImplementedError
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/tasks/language_modeling.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import itertools import numpy as np import os from torch.utils.data import ConcatDataset from fairseq.data import ( Dictionary, IndexedInMemoryDataset, IndexedRawTextDataset, MonolingualDataset, TokenBlockDataset, TruncatedDictionary ) from . import FairseqTask, register_task @register_task('language_modeling') class LanguageModelingTask(FairseqTask): """ Train a language model. Args: dictionary (Dictionary): the dictionary for the input of the language model output_dictionary (Dictionary): the dictionary for the output of the language model. In most cases it will be the same as dictionary, but could possibly be a more limited version of the dictionary (if --output-dictionary-size is used). targets (List[str]): list of the target types that the language model should predict. Can be one of "self", "future", and "past". Defaults to "future". .. note:: The language modeling task is compatible with :mod:`train.py <train>`, :mod:`generate.py <generate>`, :mod:`interactive.py <interactive>` and :mod:`eval_lm.py <eval_lm>`. The language modeling task provides the following additional command-line arguments: .. argparse:: :ref: fairseq.tasks.language_modeling_parser :prog: """ @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" parser.add_argument('data', help='path to data directory') parser.add_argument('--sample-break-mode', choices=['none', 'complete', 'eos'], help='If omitted or "none", fills each sample with tokens-per-sample ' 'tokens. If set to "complete", splits samples only at the end ' 'of sentence, but may include multiple sentences per sample. ' 'If set to "eos", includes only one sentence per sample.') parser.add_argument('--tokens-per-sample', default=1024, type=int, help='max number of tokens per sample for LM dataset') parser.add_argument('--raw-text', default=False, action='store_true', help='load raw text dataset') parser.add_argument('--output-dictionary-size', default=-1, type=int, help='limit the size of output dictionary') parser.add_argument('--self-target', action='store_true', help='include self target') parser.add_argument('--future-target', action='store_true', help='include future target') parser.add_argument('--past-target', action='store_true', help='include past target') def __init__(self, args, dictionary, output_dictionary, targets=None): super().__init__(args) self.dictionary = dictionary self.output_dictionary = output_dictionary if targets is None: targets = ['future'] self.targets = targets @classmethod def setup_task(cls, args, **kwargs): """Setup the task (e.g., load dictionaries). Args: args (argparse.Namespace): parsed command-line arguments """ dictionary = Dictionary.load(os.path.join(args.data, 'dict.txt')) print('| dictionary: {} types'.format(len(dictionary))) output_dictionary = dictionary if args.output_dictionary_size >= 0: output_dictionary = TruncatedDictionary(dictionary, args.output_dictionary_size) # upgrade old checkpoints if hasattr(args, 'exclude_self_target'): args.self_target = not args.exclude_self_target targets = [] if args.self_target: targets.append('self') if args.future_target: targets.append('future') if args.past_target: targets.append('past') if len(targets) == 0: # standard language modeling targets = ['future'] return cls(args, dictionary, output_dictionary, targets=targets) def build_model(self, args): model = super().build_model(args) for target in self.targets: if target not in model.supported_targets: raise ValueError('Unsupported language modeling target: {}'.format(target)) return model def load_dataset(self, split, combine=False): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ loaded_datasets = [] for k in itertools.count(): split_k = split + (str(k) if k > 0 else '') path = os.path.join(self.args.data, split_k) if self.args.raw_text and IndexedRawTextDataset.exists(path): ds = IndexedRawTextDataset(path, self.dictionary) tokens = [t for l in ds.tokens_list for t in l] elif not self.args.raw_text and IndexedInMemoryDataset.exists(path): ds = IndexedInMemoryDataset(path, fix_lua_indexing=True) tokens = ds.buffer else: if k > 0: break else: raise FileNotFoundError('Dataset not found: {} ({})'.format(split, self.args.data)) loaded_datasets.append( TokenBlockDataset( tokens, ds.sizes, self.args.tokens_per_sample, pad=self.dictionary.pad(), eos=self.dictionary.eos(), break_mode=self.args.sample_break_mode, include_targets=True, )) print('| {} {} {} examples'.format(self.args.data, split_k, len(loaded_datasets[-1]))) if not combine: break if len(loaded_datasets) == 1: dataset = loaded_datasets[0] sizes = dataset.sizes else: dataset = ConcatDataset(loaded_datasets) sizes = np.concatenate([ds.sizes for ds in loaded_datasets]) add_eos_for_other_targets = self.args.sample_break_mode is not None and self.args.sample_break_mode != 'none' self.datasets[split] = MonolingualDataset( dataset, sizes, self.dictionary, self.output_dictionary, add_eos_for_other_targets=add_eos_for_other_targets, shuffle=False, targets=self.targets, ) @property def target_dictionary(self): """Return the :class:`~fairseq.data.Dictionary` for the language model.""" return self.output_dictionary
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/tasks/translation.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import itertools import numpy as np import os from torch.utils.data import ConcatDataset from fairseq import options from fairseq.data import ( data_utils, Dictionary, LanguagePairDataset, IndexedInMemoryDataset, IndexedRawTextDataset, ) from . import FairseqTask, register_task @register_task('translation') class TranslationTask(FairseqTask): """ Translate from one (source) language to another (target) language. Args: src_dict (Dictionary): dictionary for the source language tgt_dict (Dictionary): dictionary for the target language .. note:: The translation task is compatible with :mod:`train.py <train>`, :mod:`generate.py <generate>` and :mod:`interactive.py <interactive>`. The translation task provides the following additional command-line arguments: .. argparse:: :ref: fairseq.tasks.translation_parser :prog: """ @staticmethod def add_args(parser): """Add task-specific arguments to the parser.""" parser.add_argument('data', nargs='+', help='path(s) to data directorie(s)') parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='source language') parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='target language') parser.add_argument('--raw-text', action='store_true', help='load raw text dataset') parser.add_argument('--left-pad-source', default='True', type=str, metavar='BOOL', help='pad the source on the left') parser.add_argument('--left-pad-target', default='False', type=str, metavar='BOOL', help='pad the target on the left') parser.add_argument('--max-source-positions', default=1024, type=int, metavar='N', help='max number of tokens in the source sequence') parser.add_argument('--max-target-positions', default=1024, type=int, metavar='N', help='max number of tokens in the target sequence') parser.add_argument('--upsample-primary', default=1, type=int, help='amount to upsample primary dataset') def __init__(self, args, src_dict, tgt_dict): super().__init__(args) self.src_dict = src_dict self.tgt_dict = tgt_dict @classmethod def setup_task(cls, args, **kwargs): """Setup the task (e.g., load dictionaries). Args: args (argparse.Namespace): parsed command-line arguments """ args.left_pad_source = options.eval_bool(args.left_pad_source) args.left_pad_target = options.eval_bool(args.left_pad_target) # find language pair automatically if args.source_lang is None or args.target_lang is None: args.source_lang, args.target_lang = data_utils.infer_language_pair(args.data[0]) if args.source_lang is None or args.target_lang is None: raise Exception('Could not infer language pair, please provide it explicitly') # load dictionaries src_dict = Dictionary.load(os.path.join(args.data[0], 'dict.{}.txt'.format(args.source_lang))) tgt_dict = Dictionary.load(os.path.join(args.data[0], 'dict.{}.txt'.format(args.target_lang))) assert src_dict.pad() == tgt_dict.pad() assert src_dict.eos() == tgt_dict.eos() assert src_dict.unk() == tgt_dict.unk() print('| [{}] dictionary: {} types'.format(args.source_lang, len(src_dict))) print('| [{}] dictionary: {} types'.format(args.target_lang, len(tgt_dict))) return cls(args, src_dict, tgt_dict) def load_dataset(self, split, combine=False): """Load a given dataset split. Args: split (str): name of the split (e.g., train, valid, test) """ def split_exists(split, src, tgt, lang, data_path): filename = os.path.join(data_path, '{}.{}-{}.{}'.format(split, src, tgt, lang)) if self.args.raw_text and IndexedRawTextDataset.exists(filename): return True elif not self.args.raw_text and IndexedInMemoryDataset.exists(filename): return True return False def indexed_dataset(path, dictionary): if self.args.raw_text: return IndexedRawTextDataset(path, dictionary) elif IndexedInMemoryDataset.exists(path): return IndexedInMemoryDataset(path, fix_lua_indexing=True) return None src_datasets = [] tgt_datasets = [] data_paths = self.args.data for data_path in data_paths: for k in itertools.count(): split_k = split + (str(k) if k > 0 else '') # infer langcode src, tgt = self.args.source_lang, self.args.target_lang if split_exists(split_k, src, tgt, src, data_path): prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, src, tgt)) elif split_exists(split_k, tgt, src, src, data_path): prefix = os.path.join(data_path, '{}.{}-{}.'.format(split_k, tgt, src)) else: if k > 0: break else: raise FileNotFoundError('Dataset not found: {} ({})'.format(split, data_path)) src_datasets.append(indexed_dataset(prefix + src, self.src_dict)) tgt_datasets.append(indexed_dataset(prefix + tgt, self.tgt_dict)) print('| {} {} {} examples'.format(data_path, split_k, len(src_datasets[-1]))) if not combine: break assert len(src_datasets) == len(tgt_datasets) if len(src_datasets) == 1: src_dataset, tgt_dataset = src_datasets[0], tgt_datasets[0] src_sizes = src_dataset.sizes tgt_sizes = tgt_dataset.sizes else: if self.args.upsample_primary > 1: src_datasets.extend([src_datasets[0]] * (self.args.upsample_primary - 1)) tgt_datasets.extend([tgt_datasets[0]] * (self.args.upsample_primary - 1)) src_dataset = ConcatDataset(src_datasets) tgt_dataset = ConcatDataset(tgt_datasets) src_sizes = np.concatenate([ds.sizes for ds in src_datasets]) tgt_sizes = np.concatenate([ds.sizes for ds in tgt_datasets]) self.datasets[split] = LanguagePairDataset( src_dataset, src_sizes, self.src_dict, tgt_dataset, tgt_sizes, self.tgt_dict, left_pad_source=self.args.left_pad_source, left_pad_target=self.args.left_pad_target, max_source_positions=self.args.max_source_positions, max_target_positions=self.args.max_target_positions, ) def max_positions(self): """Return the max sentence length allowed by the task.""" return (self.args.max_source_positions, self.args.max_target_positions) @property def source_dictionary(self): """Return the source :class:`~fairseq.data.Dictionary`.""" return self.src_dict @property def target_dictionary(self): """Return the target :class:`~fairseq.data.Dictionary`.""" return self.tgt_dict
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/tokenizer.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. from collections import Counter import os, re import torch from multiprocessing import Pool SPACE_NORMALIZER = re.compile(r"\s+") def tokenize_line(line): line = SPACE_NORMALIZER.sub(" ", line) line = line.strip() return line.split() def safe_readline(f): pos = f.tell() while True: try: return f.readline() except UnicodeDecodeError: pos -= 1 f.seek(pos) # search where this character begins class Tokenizer: @staticmethod def add_file_to_dictionary_single_worker(filename, tokenize, eos_word, worker_id=0, num_workers=1): counter = Counter() with open(filename, 'r') as f: size = os.fstat(f.fileno()).st_size chunk_size = size // num_workers offset = worker_id * chunk_size end = offset + chunk_size f.seek(offset) if offset > 0: safe_readline(f) # drop first incomplete line line = f.readline() while line: for word in tokenize(line): counter.update([word]) counter.update([eos_word]) if f.tell() > end: break line = f.readline() return counter @staticmethod def add_file_to_dictionary(filename, dict, tokenize, num_workers): def merge_result(counter): for w, c in counter.items(): dict.add_symbol(w, c) if num_workers > 1: pool = Pool(processes=num_workers) results = [] for worker_id in range(num_workers): results.append(pool.apply_async( Tokenizer.add_file_to_dictionary_single_worker, (filename, tokenize, dict.eos_word, worker_id, num_workers) )) pool.close() pool.join() for r in results: merge_result(r.get()) else: merge_result(Tokenizer.add_file_to_dictionary_single_worker(filename, tokenize, dict.eos_word)) @staticmethod def binarize(filename, dict, consumer, tokenize=tokenize_line, append_eos=True, reverse_order=False, offset=0, end=-1): nseq, ntok = 0, 0 replaced = Counter() def replaced_consumer(word, idx): if idx == dict.unk_index and word != dict.unk_word: replaced.update([word]) with open(filename, 'r') as f: f.seek(offset) # next(f) breaks f.tell(), hence readline() must be used line = safe_readline(f) while line: if end > 0 and f.tell() > end: break ids = Tokenizer.tokenize( line=line, dict=dict, tokenize=tokenize, add_if_not_exist=False, consumer=replaced_consumer, append_eos=append_eos, reverse_order=reverse_order, ) nseq += 1 ntok += len(ids) consumer(ids) line = f.readline() return {'nseq': nseq, 'nunk': sum(replaced.values()), 'ntok': ntok, 'replaced': replaced} @staticmethod def find_offsets(filename, num_chunks): with open(filename, 'r') as f: size = os.fstat(f.fileno()).st_size chunk_size = size // num_chunks offsets = [0 for _ in range(num_chunks + 1)] for i in range(1, num_chunks): f.seek(chunk_size * i) safe_readline(f) offsets[i] = f.tell() return offsets @staticmethod def tokenize(line, dict, tokenize=tokenize_line, add_if_not_exist=True, consumer=None, append_eos=True, reverse_order=False): words = tokenize(line) if reverse_order: words = list(reversed(words)) nwords = len(words) ids = torch.IntTensor(nwords + 1 if append_eos else nwords) for i, word in enumerate(words): if add_if_not_exist: idx = dict.add_symbol(word) else: idx = dict.index(word) if consumer is not None: consumer(word, idx) ids[i] = idx if append_eos: ids[nwords] = dict.eos_index return ids
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/trainer.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. """ Train a network across multiple GPUs. """ from collections import defaultdict, OrderedDict import contextlib from itertools import chain import torch from fairseq import distributed_utils, models, optim, utils from fairseq.meters import AverageMeter, StopwatchMeter, TimeMeter from fairseq.optim import lr_scheduler class Trainer(object): """Main class for data parallel training. This class supports synchronous distributed data parallel training, where multiple workers each have a full model replica and gradients are accumulated across workers before each update. We use :class:`~torch.nn.parallel.DistributedDataParallel` to handle communication of the gradients across workers. """ def __init__(self, args, task, model, criterion, dummy_batch): if not torch.cuda.is_available(): raise NotImplementedError('Training on CPU is not supported') self.args = args self.task = task # copy model and criterion to current device self.criterion = criterion.cuda() if args.fp16: self._model = model.half().cuda() else: self._model = model.cuda() # initialize meters self.meters = OrderedDict() self.meters['train_loss'] = AverageMeter() self.meters['train_nll_loss'] = AverageMeter() self.meters['valid_loss'] = AverageMeter() self.meters['valid_nll_loss'] = AverageMeter() self.meters['wps'] = TimeMeter() # words per second self.meters['ups'] = TimeMeter() # updates per second self.meters['wpb'] = AverageMeter() # words per batch self.meters['bsz'] = AverageMeter() # sentences per batch self.meters['gnorm'] = AverageMeter() # gradient norm self.meters['clip'] = AverageMeter() # % of updates clipped self.meters['oom'] = AverageMeter() # out of memory if args.fp16: self.meters['loss_scale'] = AverageMeter() # dynamic loss scale self.meters['wall'] = TimeMeter() # wall time in seconds self.meters['train_wall'] = StopwatchMeter() # train wall time in seconds self._dummy_batch = dummy_batch self._num_updates = 0 self._optim_history = None self._optimizer = None self._wrapped_model = None @property def model(self): if self._wrapped_model is None: if self.args.distributed_world_size > 1: self._wrapped_model = models.DistributedFairseqModel( self.args, self._model, ) else: self._wrapped_model = self._model return self._wrapped_model @property def optimizer(self): if self._optimizer is None: self._build_optimizer() return self._optimizer def _build_optimizer(self): if self.args.fp16: if torch.cuda.get_device_capability(0)[0] < 7: print('| WARNING: your device does NOT support faster training with --fp16, ' 'please switch to FP32 which is likely to be faster') params = list(filter(lambda p: p.requires_grad, self.model.parameters())) self._optimizer = optim.FP16Optimizer.build_optimizer(self.args, params) else: if torch.cuda.get_device_capability(0)[0] >= 7: print('| NOTICE: your device may support faster training with --fp16') self._optimizer = optim.build_optimizer(self.args, self.model.parameters()) self.lr_scheduler = lr_scheduler.build_lr_scheduler(self.args, self._optimizer) def save_checkpoint(self, filename, extra_state): """Save all training state in a checkpoint file.""" if distributed_utils.is_master(self.args): # only save one checkpoint extra_state['train_meters'] = self.meters utils.save_state( filename, self.args, self.get_model(), self.criterion, self.optimizer, self.lr_scheduler, self._num_updates, self._optim_history, extra_state, ) def load_checkpoint(self, filename, reset_optimizer=False, reset_lr_scheduler=False, optimizer_overrides=None): """Load all training state from a checkpoint file.""" extra_state, self._optim_history, last_optim_state = \ utils.load_model_state(filename, self.get_model()) if last_optim_state is not None and not reset_optimizer: # rebuild optimizer after loading model, since params may have changed self._build_optimizer() # only reload optimizer and lr_scheduler if they match last_optim = self._optim_history[-1] assert last_optim['criterion_name'] == self.criterion.__class__.__name__, \ 'criterion does not match; please reset the optimizer (--reset-optimizer)' assert last_optim['optimizer_name'] == self.optimizer.__class__.__name__, \ 'optimizer does not match; please reset the optimizer (--reset-optimizer)' if not reset_lr_scheduler: self.lr_scheduler.load_state_dict(last_optim['lr_scheduler_state']) self.optimizer.load_state_dict(last_optim_state, optimizer_overrides) self._num_updates = last_optim['num_updates'] if extra_state is not None and 'train_meters' in extra_state: self.meters.update(extra_state['train_meters']) del extra_state['train_meters'] # reset TimeMeters, since their start times don't make sense anymore for meter in self.meters.values(): if isinstance(meter, TimeMeter): meter.reset() return extra_state def train_step(self, samples, dummy_batch=False): """Do forward, backward and parameter update.""" # Set seed based on args.seed and the update number so that we get # reproducible results when resuming from checkpoints seed = self.args.seed + self.get_num_updates() torch.manual_seed(seed) torch.cuda.manual_seed(seed) self.model.train() self.zero_grad() if not dummy_batch: self.meters['train_wall'].start() # forward and backward pass logging_outputs, sample_sizes, ooms = [], [], 0 for i, sample in enumerate(samples): sample = self._prepare_sample(sample) if sample is None: # when sample is None, run forward/backward on a dummy batch # and ignore the resulting gradients sample = self._prepare_sample(self._dummy_batch) ignore_grad = True else: ignore_grad = False try: # forward loss, sample_size, logging_output = self.task.get_loss( self.model, self.criterion, sample, ) if ignore_grad: loss *= 0 if self.args.distributed_world_size > 1: # only all-reduce gradients in the last backwards pass if i < len(samples) - 1: self.model.need_reduction = False else: self.model.need_reduction = True # backward self.optimizer.backward(loss) if not ignore_grad: logging_outputs.append(logging_output) sample_sizes.append(sample_size) except RuntimeError as e: if 'out of memory' in str(e): print('| WARNING: ran out of memory, skipping batch') ooms += 1 self.zero_grad() else: raise e if dummy_batch: return None # gather logging outputs from all replicas if self.args.distributed_world_size > 1: logging_outputs, sample_sizes, ooms = zip(*distributed_utils.all_gather_list( [logging_outputs, sample_sizes, ooms], )) logging_outputs = list(chain.from_iterable(logging_outputs)) sample_sizes = list(chain.from_iterable(sample_sizes)) ooms = sum(ooms) if ooms == self.args.distributed_world_size: print('| WARNING: OOM in all workers, skipping update') self.zero_grad() return None # aggregate logging outputs and sample sizes logging_output = self.criterion.__class__.aggregate_logging_outputs(logging_outputs) sample_size = self.criterion.__class__.grad_denom(sample_sizes) if not all(k in logging_output for k in ['ntokens', 'nsentences']): raise Exception(( 'Please update the {}.aggregate_logging_outputs() method to ' 'return ntokens and nsentences' ).format(self.criterion.__class__.__name__)) try: # normalize grads by sample size self.optimizer.multiply_grads(self.args.distributed_world_size / float(sample_size)) # clip grads grad_norm = self.optimizer.clip_grad_norm(self.args.clip_norm) # take an optimization step self.optimizer.step() self._num_updates += 1 # update learning rate self.lr_scheduler.step_update(self._num_updates) # update meters ntokens = logging_output.get('ntokens', 0) nsentences = logging_output.get('nsentences', 0) self.meters['wps'].update(ntokens) self.meters['ups'].update(1.) self.meters['wpb'].update(ntokens) self.meters['bsz'].update(nsentences) self.meters['gnorm'].update(grad_norm) self.meters['clip'].update( 1. if grad_norm > self.args.clip_norm and self.args.clip_norm > 0 else 0. ) self.meters['oom'].update(ooms) self.meters['train_loss'].update(logging_output.get('loss', 0), sample_size) if 'nll_loss' in logging_output: self.meters['train_nll_loss'].update(logging_output.get('nll_loss', 0), ntokens) except OverflowError as e: print('| WARNING: overflow detected, ' + str(e)) self.zero_grad() logging_output = None if self.args.fp16: self.meters['loss_scale'].reset() self.meters['loss_scale'].update(self.optimizer.scaler.loss_scale) self.meters['train_wall'].stop() return logging_output def valid_step(self, sample, raise_oom=False): """Do forward pass in evaluation mode.""" with torch.no_grad(): self.model.eval() sample = self._prepare_sample(sample) if sample is None: sample = self._prepare_sample(self._dummy_batch) ignore_results = True else: ignore_results = False try: _loss, sample_size, logging_output = self.task.get_loss( self.model, self.criterion, sample, ) except RuntimeError as e: if 'out of memory' in str(e) and not raise_oom: print('| WARNING: ran out of memory, retrying batch') for p in self.model.parameters(): if p.grad is not None: del p.grad # free some memory torch.cuda.empty_cache() return self.valid_step(sample, raise_oom=True) else: raise e if ignore_results: logging_output, sample_size = {}, 0 # gather logging outputs from all replicas if self.args.distributed_world_size > 1: logging_output, sample_size = zip(*distributed_utils.all_gather_list( [logging_output, sample_size], )) logging_output = list(logging_output) sample_size = list(sample_size) else: logging_output = [logging_output] sample_size = [sample_size] # aggregate logging outputs and sample sizes logging_output = self.criterion.__class__.aggregate_logging_outputs(logging_output) sample_size = self.criterion.__class__.grad_denom(sample_size) # update meters for validation ntokens = logging_output.get('ntokens', 0) self.meters['valid_loss'].update(logging_output.get('loss', 0), sample_size) if 'nll_loss' in logging_output: self.meters['valid_nll_loss'].update(logging_output.get('nll_loss', 0), ntokens) return logging_output def dummy_train_step(self, dummy_batch): """Dummy training step for warming caching allocator.""" self.train_step(dummy_batch, dummy_batch=True) self.zero_grad() def zero_grad(self): self.optimizer.zero_grad() def lr_step(self, epoch, val_loss=None): """Adjust the learning rate based on the validation loss.""" return self.lr_scheduler.step(epoch, val_loss) def lr_step_update(self, num_updates): """Update the learning rate after each update.""" return self.lr_scheduler.step_update(num_updates) def get_lr(self): """Get the current learning rate.""" return self.optimizer.get_lr() def get_model(self): """Get the (non-wrapped) model instance.""" return self._model def get_meter(self, name): """Get a specific meter by name.""" if name not in self.meters: return None return self.meters[name] def get_num_updates(self): """Get the number of parameters updates.""" return self._num_updates def _prepare_sample(self, sample): if sample is None or len(sample) == 0: return None return utils.move_to_cuda(sample)
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/fairseq/utils.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. from collections import defaultdict, OrderedDict import logging import os import re import torch import traceback from torch.serialization import default_restore_location def torch_persistent_save(*args, **kwargs): for i in range(3): try: return torch.save(*args, **kwargs) except Exception: if i == 2: logging.error(traceback.format_exc()) def convert_state_dict_type(state_dict, ttype=torch.FloatTensor): if isinstance(state_dict, dict): cpu_dict = OrderedDict() for k, v in state_dict.items(): cpu_dict[k] = convert_state_dict_type(v) return cpu_dict elif isinstance(state_dict, list): return [convert_state_dict_type(v) for v in state_dict] elif torch.is_tensor(state_dict): return state_dict.type(ttype) else: return state_dict def save_state(filename, args, model, criterion, optimizer, lr_scheduler, num_updates, optim_history=None, extra_state=None): if optim_history is None: optim_history = [] if extra_state is None: extra_state = {} state_dict = { 'args': args, 'model': model.state_dict() if model else {}, 'optimizer_history': optim_history + [ { 'criterion_name': criterion.__class__.__name__, 'optimizer_name': optimizer.__class__.__name__, 'lr_scheduler_state': lr_scheduler.state_dict(), 'num_updates': num_updates, } ], 'last_optimizer_state': convert_state_dict_type(optimizer.state_dict()), 'extra_state': extra_state, } torch_persistent_save(state_dict, filename) def load_model_state(filename, model): if not os.path.exists(filename): return None, [], None state = torch.load(filename, map_location=lambda s, l: default_restore_location(s, 'cpu')) state = _upgrade_state_dict(state) model.upgrade_state_dict(state['model']) # load model parameters try: model.load_state_dict(state['model'], strict=True) except Exception: raise Exception('Cannot load model parameters from checkpoint, ' 'please ensure that the architectures match') return state['extra_state'], state['optimizer_history'], state['last_optimizer_state'] def _upgrade_state_dict(state): """Helper for upgrading old model checkpoints.""" # add optimizer_history if 'optimizer_history' not in state: state['optimizer_history'] = [ { 'criterion_name': 'CrossEntropyCriterion', 'best_loss': state['best_loss'], }, ] state['last_optimizer_state'] = state['optimizer'] del state['optimizer'] del state['best_loss'] # move extra_state into sub-dictionary if 'epoch' in state and 'extra_state' not in state: state['extra_state'] = { 'epoch': state['epoch'], 'batch_offset': state['batch_offset'], 'val_loss': state['val_loss'], } del state['epoch'] del state['batch_offset'] del state['val_loss'] # reduce optimizer history's memory usage (only keep the last state) if 'optimizer' in state['optimizer_history'][-1]: state['last_optimizer_state'] = state['optimizer_history'][-1]['optimizer'] for optim_hist in state['optimizer_history']: del optim_hist['optimizer'] # record the optimizer class name if 'optimizer_name' not in state['optimizer_history'][-1]: state['optimizer_history'][-1]['optimizer_name'] = 'FairseqNAG' # move best_loss into lr_scheduler_state if 'lr_scheduler_state' not in state['optimizer_history'][-1]: state['optimizer_history'][-1]['lr_scheduler_state'] = { 'best': state['optimizer_history'][-1]['best_loss'], } del state['optimizer_history'][-1]['best_loss'] # keep track of number of updates if 'num_updates' not in state['optimizer_history'][-1]: state['optimizer_history'][-1]['num_updates'] = 0 # old model checkpoints may not have separate source/target positions if hasattr(state['args'], 'max_positions') and not hasattr(state['args'], 'max_source_positions'): state['args'].max_source_positions = state['args'].max_positions state['args'].max_target_positions = state['args'].max_positions # use stateful training data iterator if 'train_iterator' not in state['extra_state']: state['extra_state']['train_iterator'] = { 'epoch': state['extra_state']['epoch'], 'iterations_in_epoch': state['extra_state'].get('batch_offset', 0), } return state def load_ensemble_for_inference(filenames, task, model_arg_overrides=None): """Load an ensemble of models for inference. model_arg_overrides allows you to pass a dictionary model_arg_overrides -- {'arg_name': arg} -- to override model args that were used during model training """ # load model architectures and weights states = [] for filename in filenames: if not os.path.exists(filename): raise IOError('Model file not found: {}'.format(filename)) state = torch.load(filename, map_location=lambda s, l: default_restore_location(s, 'cpu')) state = _upgrade_state_dict(state) states.append(state) ensemble = [] for state in states: args = state['args'] if model_arg_overrides is not None: args = _override_model_args(args, model_arg_overrides) # build model for ensemble model = task.build_model(args) model.upgrade_state_dict(state['model']) model.load_state_dict(state['model'], strict=True) ensemble.append(model) return ensemble, args def _override_model_args(args, model_arg_overrides): # Uses model_arg_overrides {'arg_name': arg} to override model args for arg_name, arg_val in model_arg_overrides.items(): setattr(args, arg_name, arg_val) return args def move_to_cuda(sample): if len(sample) == 0: return {} def _move_to_cuda(maybe_tensor): if torch.is_tensor(maybe_tensor): return maybe_tensor.cuda() elif isinstance(maybe_tensor, dict): return { key: _move_to_cuda(value) for key, value in maybe_tensor.items() } elif isinstance(maybe_tensor, list): return [_move_to_cuda(x) for x in maybe_tensor] else: return maybe_tensor return _move_to_cuda(sample) INCREMENTAL_STATE_INSTANCE_ID = defaultdict(lambda: 0) def _get_full_incremental_state_key(module_instance, key): module_name = module_instance.__class__.__name__ # assign a unique ID to each module instance, so that incremental state is # not shared across module instances if not hasattr(module_instance, '_fairseq_instance_id'): INCREMENTAL_STATE_INSTANCE_ID[module_name] += 1 module_instance._fairseq_instance_id = INCREMENTAL_STATE_INSTANCE_ID[module_name] return '{}.{}.{}'.format(module_name, module_instance._fairseq_instance_id, key) def get_incremental_state(module, incremental_state, key): """Helper for getting incremental state for an nn.Module.""" full_key = _get_full_incremental_state_key(module, key) if incremental_state is None or full_key not in incremental_state: return None return incremental_state[full_key] def set_incremental_state(module, incremental_state, key, value): """Helper for setting incremental state for an nn.Module.""" if incremental_state is not None: full_key = _get_full_incremental_state_key(module, key) incremental_state[full_key] = value def load_align_dict(replace_unk): if replace_unk is None: align_dict = None elif isinstance(replace_unk, str): # Load alignment dictionary for unknown word replacement if it was passed as an argument. align_dict = {} with open(replace_unk, 'r') as f: for line in f: cols = line.split() align_dict[cols[0]] = cols[1] else: # No alignment dictionary provided but we still want to perform unknown word replacement by copying the # original source word. align_dict = {} return align_dict def print_embed_overlap(embed_dict, vocab_dict): embed_keys = set(embed_dict.keys()) vocab_keys = set(vocab_dict.symbols) overlap = len(embed_keys & vocab_keys) print("| Found {}/{} types in embedding file.".format(overlap, len(vocab_dict))) def parse_embedding(embed_path): """Parse embedding text file into a dictionary of word and embedding tensors. The first line can have vocabulary size and dimension. The following lines should contain word and embedding separated by spaces. Example: 2 5 the -0.0230 -0.0264 0.0287 0.0171 0.1403 at -0.0395 -0.1286 0.0275 0.0254 -0.0932 """ embed_dict = {} with open(embed_path) as f_embed: next(f_embed) # skip header for line in f_embed: pieces = line.rstrip().split(" ") embed_dict[pieces[0]] = torch.Tensor([float(weight) for weight in pieces[1:]]) return embed_dict def load_embedding(embed_dict, vocab, embedding): for idx in range(len(vocab)): token = vocab[idx] if token in embed_dict: embedding.weight.data[idx] = embed_dict[token] return embedding def replace_unk(hypo_str, src_str, alignment, align_dict, unk): from fairseq import tokenizer # Tokens are strings here hypo_tokens = tokenizer.tokenize_line(hypo_str) # TODO: Very rare cases where the replacement is '<eos>' should be handled gracefully src_tokens = tokenizer.tokenize_line(src_str) + ['<eos>'] for i, ht in enumerate(hypo_tokens): if ht == unk: src_token = src_tokens[alignment[i]] # Either take the corresponding value in the aligned dictionary or just copy the original value. hypo_tokens[i] = align_dict.get(src_token, src_token) return ' '.join(hypo_tokens) def post_process_prediction(hypo_tokens, src_str, alignment, align_dict, tgt_dict, remove_bpe): from fairseq import tokenizer hypo_str = tgt_dict.string(hypo_tokens, remove_bpe) if align_dict is not None: hypo_str = replace_unk(hypo_str, src_str, alignment, align_dict, tgt_dict.unk_string()) if align_dict is not None or remove_bpe is not None: # Convert back to tokens for evaluating with unk replacement or without BPE # Note that the dictionary can be modified inside the method. hypo_tokens = tokenizer.Tokenizer.tokenize(hypo_str, tgt_dict, add_if_not_exist=True) return hypo_tokens, hypo_str, alignment def make_positions(tensor, padding_idx, left_pad, onnx_trace=False): """Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored, but it is necessary to specify whether padding is added on the left side (left_pad=True) or right side (left_pad=False). """ if onnx_trace: range_buf = torch._dim_arange(like=tensor, dim=1) + padding_idx + 1 mask = tensor.ne(padding_idx) positions = range_buf.expand_as(tensor) if left_pad: positions = positions - mask.size(1) + mask.long().sum(dim=1).unsqueeze(1) return positions * mask.long() + positions * (1 - mask.long()) max_pos = padding_idx + 1 + tensor.size(1) if not hasattr(make_positions, 'range_buf'): make_positions.range_buf = tensor.new() make_positions.range_buf = make_positions.range_buf.type_as(tensor) if make_positions.range_buf.numel() < max_pos: torch.arange(padding_idx + 1, max_pos, out=make_positions.range_buf) mask = tensor.ne(padding_idx) positions = make_positions.range_buf[:tensor.size(1)].expand_as(tensor) if left_pad: positions = positions - mask.size(1) + mask.long().sum(dim=1).unsqueeze(1) return tensor.clone().masked_scatter_(mask, positions[mask]) def strip_pad(tensor, pad): return tensor[tensor.ne(pad)] def buffered_arange(max): if not hasattr(buffered_arange, 'buf'): buffered_arange.buf = torch.LongTensor() if max > buffered_arange.buf.numel(): torch.arange(max, out=buffered_arange.buf) return buffered_arange.buf[:max] def convert_padding_direction(src_tokens, padding_idx, right_to_left=False, left_to_right=False): assert right_to_left ^ left_to_right pad_mask = src_tokens.eq(padding_idx) if not pad_mask.any(): # no padding, return early return src_tokens if left_to_right and not pad_mask[:, 0].any(): # already right padded return src_tokens if right_to_left and not pad_mask[:, -1].any(): # already left padded return src_tokens max_len = src_tokens.size(1) range = buffered_arange(max_len).type_as(src_tokens).expand_as(src_tokens) num_pads = pad_mask.long().sum(dim=1, keepdim=True) if right_to_left: index = torch.remainder(range - num_pads, max_len) else: index = torch.remainder(range + num_pads, max_len) return src_tokens.gather(1, index) def item(tensor): if hasattr(tensor, 'item'): return tensor.item() if hasattr(tensor, '__getitem__'): return tensor[0] return tensor def clip_grad_norm_(tensor, max_norm): grad_norm = item(torch.norm(tensor)) if grad_norm > max_norm > 0: clip_coef = max_norm / (grad_norm + 1e-6) tensor.mul_(clip_coef) return grad_norm def fill_with_neg_inf(t): """FP16-compatible function that fills a tensor with -inf.""" return t.float().fill_(float('-inf')).type_as(t) def checkpoint_paths(path, pattern=r'checkpoint(\d+)\.pt'): """Retrieves all checkpoints found in `path` directory. Checkpoints are identified by matching filename to the specified pattern. If the pattern contains groups, the result will be sorted by the first group in descending order. """ pt_regexp = re.compile(pattern) files = os.listdir(path) entries = [] for i, f in enumerate(files): m = pt_regexp.fullmatch(f) if m is not None: idx = int(m.group(1)) if len(m.groups()) > 0 else i entries.append((idx, m.group(0))) return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)] def resolve_max_positions(*args): """Resolve max position constraints from multiple sources.""" def nullsafe_min(l): minim = None for item in l: if minim is None: minim = item elif item is not None and item < minim: minim = item return minim max_positions = None for arg in args: if max_positions is None: max_positions = arg elif arg is not None: if isinstance(arg, float) or isinstance(arg, int): max_positions = min(max_positions, arg) else: max_positions = tuple( map(nullsafe_min, zip(max_positions, arg)) ) return max_positions
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/generate.py
Python
#!/usr/bin/env python3 -u # Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. """ Translate pre-processed data with a trained model. """ import torch from fairseq import bleu, data, options, progress_bar, tasks, tokenizer, utils from fairseq.meters import StopwatchMeter, TimeMeter from fairseq.sequence_generator import SequenceGenerator from fairseq.sequence_scorer import SequenceScorer def main(args): assert args.path is not None, '--path required for generation!' assert not args.sampling or args.nbest == args.beam, \ '--sampling requires --nbest to be equal to --beam' assert args.replace_unk is None or args.raw_text, \ '--replace-unk requires a raw text dataset (--raw-text)' if args.max_tokens is None and args.max_sentences is None: args.max_tokens = 12000 print(args) use_cuda = torch.cuda.is_available() and not args.cpu # Load dataset splits task = tasks.setup_task(args) task.load_dataset(args.gen_subset) print('| {} {} {} examples'.format(args.data, args.gen_subset, len(task.dataset(args.gen_subset)))) # Set dictionaries src_dict = task.source_dictionary tgt_dict = task.target_dictionary # Load ensemble print('| loading model(s) from {}'.format(args.path)) models, _ = utils.load_ensemble_for_inference(args.path.split(':'), task, model_arg_overrides=eval(args.model_overrides)) # Optimize ensemble for generation for model in models: model.make_generation_fast_( beamable_mm_beam_size=None if args.no_beamable_mm else args.beam, need_attn=args.print_alignment, ) if args.fp16: model.half() # Load alignment dictionary for unknown word replacement # (None if no unknown word replacement, empty if no path to align dictionary) align_dict = utils.load_align_dict(args.replace_unk) # Load dataset (possibly sharded) itr = task.get_batch_iterator( dataset=task.dataset(args.gen_subset), max_tokens=args.max_tokens, max_sentences=args.max_sentences, max_positions=utils.resolve_max_positions( task.max_positions(), *[model.max_positions() for model in models] ), ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=8, num_shards=args.num_shards, shard_id=args.shard_id, ).next_epoch_itr(shuffle=False) # Initialize generator gen_timer = StopwatchMeter() if args.score_reference: translator = SequenceScorer(models, task.target_dictionary) else: translator = SequenceGenerator( models, task.target_dictionary, beam_size=args.beam, minlen=args.min_len, stop_early=(not args.no_early_stop), normalize_scores=(not args.unnormalized), len_penalty=args.lenpen, unk_penalty=args.unkpen, sampling=args.sampling, sampling_topk=args.sampling_topk, sampling_temperature=args.sampling_temperature, diverse_beam_groups=args.diverse_beam_groups, diverse_beam_strength=args.diverse_beam_strength, ) if use_cuda: translator.cuda() # Generate and compute BLEU score scorer = bleu.Scorer(tgt_dict.pad(), tgt_dict.eos(), tgt_dict.unk()) num_sentences = 0 has_target = True with progress_bar.build_progress_bar(args, itr) as t: if args.score_reference: translations = translator.score_batched_itr(t, cuda=use_cuda, timer=gen_timer) else: translations = translator.generate_batched_itr( t, maxlen_a=args.max_len_a, maxlen_b=args.max_len_b, cuda=use_cuda, timer=gen_timer, prefix_size=args.prefix_size, ) wps_meter = TimeMeter() for sample_id, src_tokens, target_tokens, hypos in translations: # Process input and ground truth has_target = target_tokens is not None target_tokens = target_tokens.int().cpu() if has_target else None # Either retrieve the original sentences or regenerate them from tokens. if align_dict is not None: src_str = task.dataset(args.gen_subset).src.get_original_text(sample_id) target_str = task.dataset(args.gen_subset).tgt.get_original_text(sample_id) else: src_str = src_dict.string(src_tokens, args.remove_bpe) if has_target: target_str = tgt_dict.string(target_tokens, args.remove_bpe, escape_unk=True) if not args.quiet: print('S-{}\t{}'.format(sample_id, src_str)) if has_target: print('T-{}\t{}'.format(sample_id, target_str)) # Process top predictions for i, hypo in enumerate(hypos[:min(len(hypos), args.nbest)]): hypo_tokens, hypo_str, alignment = utils.post_process_prediction( hypo_tokens=hypo['tokens'].int().cpu(), src_str=src_str, alignment=hypo['alignment'].int().cpu() if hypo['alignment'] is not None else None, align_dict=align_dict, tgt_dict=tgt_dict, remove_bpe=args.remove_bpe, ) if not args.quiet: print('H-{}\t{}\t{}'.format(sample_id, hypo['score'], hypo_str)) print('P-{}\t{}'.format( sample_id, ' '.join(map( lambda x: '{:.4f}'.format(x), hypo['positional_scores'].tolist(), )) )) if args.print_alignment: print('A-{}\t{}'.format( sample_id, ' '.join(map(lambda x: str(utils.item(x)), alignment)) )) # Score only the top hypothesis if has_target and i == 0: if align_dict is not None or args.remove_bpe is not None: # Convert back to tokens for evaluation with unk replacement and/or without BPE target_tokens = tokenizer.Tokenizer.tokenize( target_str, tgt_dict, add_if_not_exist=True) scorer.add(target_tokens, hypo_tokens) wps_meter.update(src_tokens.size(0)) t.log({'wps': round(wps_meter.avg)}) num_sentences += 1 print('| Translated {} sentences ({} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)'.format( num_sentences, gen_timer.n, gen_timer.sum, num_sentences / gen_timer.sum, 1. / gen_timer.avg)) if has_target: print('| Generate {} with beam={}: {}'.format(args.gen_subset, args.beam, scorer.result_string())) if __name__ == '__main__': parser = options.get_generation_parser() args = options.parse_args_and_arch(parser) main(args)
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/interactive.py
Python
#!/usr/bin/env python3 -u # Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. """ Translate raw text with a trained model. Batches data on-the-fly. """ from collections import namedtuple import numpy as np import sys import torch from fairseq import data, options, tasks, tokenizer, utils from fairseq.sequence_generator import SequenceGenerator Batch = namedtuple('Batch', 'srcs tokens lengths') Translation = namedtuple('Translation', 'src_str hypos pos_scores alignments') def buffered_read(buffer_size): buffer = [] for src_str in sys.stdin: buffer.append(src_str.strip()) if len(buffer) >= buffer_size: yield buffer buffer = [] if len(buffer) > 0: yield buffer def make_batches(lines, args, task, max_positions): tokens = [ tokenizer.Tokenizer.tokenize(src_str, task.source_dictionary, add_if_not_exist=False).long() for src_str in lines ] lengths = np.array([t.numel() for t in tokens]) itr = task.get_batch_iterator( dataset=data.LanguagePairDataset(tokens, lengths, task.source_dictionary), max_tokens=args.max_tokens, max_sentences=args.max_sentences, max_positions=max_positions, ).next_epoch_itr(shuffle=False) for batch in itr: yield Batch( srcs=[lines[i] for i in batch['id']], tokens=batch['net_input']['src_tokens'], lengths=batch['net_input']['src_lengths'], ), batch['id'] def main(args): if args.buffer_size < 1: args.buffer_size = 1 if args.max_tokens is None and args.max_sentences is None: args.max_sentences = 1 assert not args.sampling or args.nbest == args.beam, \ '--sampling requires --nbest to be equal to --beam' assert not args.max_sentences or args.max_sentences <= args.buffer_size, \ '--max-sentences/--batch-size cannot be larger than --buffer-size' print(args) use_cuda = torch.cuda.is_available() and not args.cpu # Setup task, e.g., translation task = tasks.setup_task(args) # Load ensemble print('| loading model(s) from {}'.format(args.path)) model_paths = args.path.split(':') models, model_args = utils.load_ensemble_for_inference(model_paths, task, model_arg_overrides=eval(args.model_overrides)) # Set dictionaries tgt_dict = task.target_dictionary # Optimize ensemble for generation for model in models: model.make_generation_fast_( beamable_mm_beam_size=None if args.no_beamable_mm else args.beam, need_attn=args.print_alignment, ) if args.fp16: model.half() # Initialize generator translator = SequenceGenerator( models, tgt_dict, beam_size=args.beam, minlen=args.min_len, stop_early=(not args.no_early_stop), normalize_scores=(not args.unnormalized), len_penalty=args.lenpen, unk_penalty=args.unkpen, sampling=args.sampling, sampling_topk=args.sampling_topk, sampling_temperature=args.sampling_temperature, diverse_beam_groups=args.diverse_beam_groups, diverse_beam_strength=args.diverse_beam_strength, ) if use_cuda: translator.cuda() # Load alignment dictionary for unknown word replacement # (None if no unknown word replacement, empty if no path to align dictionary) align_dict = utils.load_align_dict(args.replace_unk) def make_result(src_str, hypos): result = Translation( src_str='O\t{}'.format(src_str), hypos=[], pos_scores=[], alignments=[], ) # Process top predictions for hypo in hypos[:min(len(hypos), args.nbest)]: hypo_tokens, hypo_str, alignment = utils.post_process_prediction( hypo_tokens=hypo['tokens'].int().cpu(), src_str=src_str, alignment=hypo['alignment'].int().cpu() if hypo['alignment'] is not None else None, align_dict=align_dict, tgt_dict=tgt_dict, remove_bpe=args.remove_bpe, ) result.hypos.append('H\t{}\t{}'.format(hypo['score'], hypo_str)) result.pos_scores.append('P\t{}'.format( ' '.join(map( lambda x: '{:.4f}'.format(x), hypo['positional_scores'].tolist(), )) )) result.alignments.append( 'A\t{}'.format(' '.join(map(lambda x: str(utils.item(x)), alignment))) if args.print_alignment else None ) return result def process_batch(batch): tokens = batch.tokens lengths = batch.lengths if use_cuda: tokens = tokens.cuda() lengths = lengths.cuda() encoder_input = {'src_tokens': tokens, 'src_lengths': lengths} translations = translator.generate( encoder_input, maxlen=int(args.max_len_a * tokens.size(1) + args.max_len_b), ) return [make_result(batch.srcs[i], t) for i, t in enumerate(translations)] max_positions = utils.resolve_max_positions( task.max_positions(), *[model.max_positions() for model in models] ) if args.buffer_size > 1: print('| Sentence buffer size:', args.buffer_size) print('| Type the input sentence and press return:') for inputs in buffered_read(args.buffer_size): indices = [] results = [] for batch, batch_indices in make_batches(inputs, args, task, max_positions): indices.extend(batch_indices) results += process_batch(batch) for i in np.argsort(indices): result = results[i] print(result.src_str) for hypo, pos_scores, align in zip(result.hypos, result.pos_scores, result.alignments): print(hypo) print(pos_scores) if align is not None: print(align) if __name__ == '__main__': parser = options.get_generation_parser(interactive=True) args = options.parse_args_and_arch(parser) main(args)
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/macaron-scripts/data-preprocessing/prepare-iwslt14.sh
Shell
#!/usr/bin/env bash # # Adapted from https://github.com/facebookresearch/MIXER/blob/master/prepareData.sh echo 'Cloning Moses github repository (for tokenization scripts)...' git clone https://github.com/moses-smt/mosesdecoder.git echo 'Cloning Subword NMT repository (for BPE pre-processing)...' git clone https://github.com/rsennrich/subword-nmt.git SCRIPTS=mosesdecoder/scripts TOKENIZER=$SCRIPTS/tokenizer/tokenizer.perl LC=$SCRIPTS/tokenizer/lowercase.perl CLEAN=$SCRIPTS/training/clean-corpus-n.perl BPEROOT=subword-nmt BPE_TOKENS=10000 URL="https://wit3.fbk.eu/archive/2014-01/texts/de/en/de-en.tgz" GZ=de-en.tgz if [ ! -d "$SCRIPTS" ]; then echo "Please set SCRIPTS variable correctly to point to Moses scripts." exit fi src=de tgt=en lang=de-en prep=iwslt14.tokenized.de-en tmp=$prep/tmp orig=orig mkdir -p $orig $tmp $prep echo "Downloading data from ${URL}..." cd $orig wget "$URL" if [ -f $GZ ]; then echo "Data successfully downloaded." else echo "Data not successfully downloaded." exit fi tar zxvf $GZ cd .. echo "pre-processing train data..." for l in $src $tgt; do f=train.tags.$lang.$l tok=train.tags.$lang.tok.$l cat $orig/$lang/$f | \ grep -v '<url>' | \ grep -v '<talkid>' | \ grep -v '<keywords>' | \ sed -e 's/<title>//g' | \ sed -e 's/<\/title>//g' | \ sed -e 's/<description>//g' | \ sed -e 's/<\/description>//g' | \ perl $TOKENIZER -threads 8 -l $l > $tmp/$tok echo "" done perl $CLEAN -ratio 1.5 $tmp/train.tags.$lang.tok $src $tgt $tmp/train.tags.$lang.clean 1 175 for l in $src $tgt; do perl $LC < $tmp/train.tags.$lang.clean.$l > $tmp/train.tags.$lang.$l done echo "pre-processing valid/test data..." for l in $src $tgt; do for o in `ls $orig/$lang/IWSLT14.TED*.$l.xml`; do fname=${o##*/} f=$tmp/${fname%.*} echo $o $f grep '<seg id' $o | \ sed -e 's/<seg id="[0-9]*">\s*//g' | \ sed -e 's/\s*<\/seg>\s*//g' | \ sed -e "s/\’/\'/g" | \ perl $TOKENIZER -threads 8 -l $l | \ perl $LC > $f echo "" done done echo "creating train, valid, test..." for l in $src $tgt; do awk '{if (NR%23 == 0) print $0; }' $tmp/train.tags.de-en.$l > $tmp/valid.$l awk '{if (NR%23 != 0) print $0; }' $tmp/train.tags.de-en.$l > $tmp/train.$l cat $tmp/IWSLT14.TED.dev2010.de-en.$l \ $tmp/IWSLT14.TEDX.dev2012.de-en.$l \ $tmp/IWSLT14.TED.tst2010.de-en.$l \ $tmp/IWSLT14.TED.tst2011.de-en.$l \ $tmp/IWSLT14.TED.tst2012.de-en.$l \ > $tmp/test.$l done TRAIN=$tmp/train.en-de BPE_CODE=$prep/code rm -f $TRAIN for l in $src $tgt; do cat $tmp/train.$l >> $TRAIN done echo "learn_bpe.py on ${TRAIN}..." python $BPEROOT/learn_bpe.py -s $BPE_TOKENS < $TRAIN > $BPE_CODE for L in $src $tgt; do for f in train.$L valid.$L test.$L; do echo "apply_bpe.py to ${f}..." python $BPEROOT/apply_bpe.py -c $BPE_CODE < $tmp/$f > $prep/$f done done
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/macaron-scripts/data-preprocessing/prepare-wmt14en2de.sh
Shell
#!/usr/bin/env bash # Adapted from https://github.com/pytorch/fairseq/blob/v0.6.0/examples/translation/prepare-wmt14en2de.sh echo 'Cloning Moses github repository (for tokenization scripts)...' git clone https://github.com/moses-smt/mosesdecoder.git echo 'Cloning Subword NMT repository (for BPE pre-processing)...' git clone https://github.com/rsennrich/subword-nmt.git SCRIPTS=mosesdecoder/scripts TOKENIZER=$SCRIPTS/tokenizer/tokenizer.perl CLEAN=$SCRIPTS/training/clean-corpus-n.perl NORM_PUNC=$SCRIPTS/tokenizer/normalize-punctuation.perl REM_NON_PRINT_CHAR=$SCRIPTS/tokenizer/remove-non-printing-char.perl BPEROOT=subword-nmt BPE_TOKENS=32764 URLS=( "http://statmt.org/wmt13/training-parallel-europarl-v7.tgz" "http://statmt.org/wmt13/training-parallel-commoncrawl.tgz" "http://data.statmt.org/wmt17/translation-task/training-parallel-nc-v12.tgz" "http://data.statmt.org/wmt17/translation-task/dev.tgz" "http://statmt.org/wmt14/test-full.tgz" ) FILES=( "training-parallel-europarl-v7.tgz" "training-parallel-commoncrawl.tgz" "training-parallel-nc-v12.tgz" "dev.tgz" "test-full.tgz" ) CORPORA=( "training/europarl-v7.de-en" "commoncrawl.de-en" "training/news-commentary-v12.de-en" ) if [ ! -d "$SCRIPTS" ]; then echo "Please set SCRIPTS variable correctly to point to Moses scripts." exit fi src=en tgt=de lang=en-de prep=wmt14_en_de tmp=$prep/tmp orig=orig dev=dev/newstest2013 mkdir -p $orig $tmp $prep cd $orig for ((i=0;i<${#URLS[@]};++i)); do file=${FILES[i]} if [ -f $file ]; then echo "$file already exists, skipping download" else url=${URLS[i]} wget "$url" if [ -f $file ]; then echo "$url successfully downloaded." else echo "$url not successfully downloaded." exit -1 fi if [ ${file: -4} == ".tgz" ]; then tar zxvf $file elif [ ${file: -4} == ".tar" ]; then tar xvf $file fi fi done cd .. echo "pre-processing train data..." for l in $src $tgt; do rm $tmp/train.tags.$lang.tok.$l for f in "${CORPORA[@]}"; do cat $orig/$f.$l | \ perl $NORM_PUNC $l | \ perl $REM_NON_PRINT_CHAR | \ perl $TOKENIZER -threads 8 -a -l $l >> $tmp/train.tags.$lang.tok.$l done done echo "pre-processing test data..." for l in $src $tgt; do if [ "$l" == "$src" ]; then t="src" else t="ref" fi grep '<seg id' $orig/test-full/newstest2014-deen-$t.$l.sgm | \ sed -e 's/<seg id="[0-9]*">\s*//g' | \ sed -e 's/\s*<\/seg>\s*//g' | \ sed -e "s/\’/\'/g" | \ perl $TOKENIZER -threads 8 -a -l $l > $tmp/test.$l echo "" done perl $CLEAN $tmp/train.tags.$lang.tok $src $tgt $tmp/train 1 80 # use newstest2013 for valid echo "pre-processing valid data..." for l in $src $tgt; do rm $tmp/valid.$l cat $orig/$dev.$l | \ perl $NORM_PUNC $l | \ perl $REM_NON_PRINT_CHAR | \ perl $TOKENIZER -threads 8 -a -l $l >> $tmp/valid.$l done TRAIN=$tmp/train.de-en BPE_CODE=$prep/code rm -f $TRAIN for l in $src $tgt; do cat $tmp/train.$l >> $TRAIN done echo "learn_bpe.py on ${TRAIN}..." python $BPEROOT/learn_bpe.py -s $BPE_TOKENS < $TRAIN > $BPE_CODE for L in $src $tgt; do for f in train.$L valid.$L test.$L; do echo "apply_bpe.py to ${f}..." python $BPEROOT/apply_bpe.py -c $BPE_CODE < $tmp/$f > $tmp/bpe.$f done done for L in $src $tgt; do cp $tmp/bpe.train.$L $prep/train.$L cp $tmp/bpe.valid.$L $prep/valid.$L done for L in $src $tgt; do cp $tmp/bpe.test.$L $prep/test.$L done
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/macaron-scripts/test/test-iwslt14.sh
Shell
#!/usr/bin/env bash CODE_PATH=. cd $CODE_PATH export PYTHONPATH=$CODE_PATH:$PYTHONPATH PROBLEM=iwslt14_de_en ARCH=transformer_iwslt_de_en_macaron_v2 BEAM_SIZE=5 LPEN=1.0 DATA_PATH=data-bin/iwslt14.tokenized.de-en.joined/ MOSES_PATH=macaron-scripts/data-preprocessing/mosesdecoder OUTPUT_PATH=log/$PROBLEM/$ARCH TRANS_PATH=$OUTPUT_PATH/trans CKPT=$1 CKPT_ID=$(echo $CKPT | sed 's/checkpoint//g' | sed 's/\.pt//g' | sed 's/^_//g') RESULT_PATH=$TRANS_PATH/$CKPT_ID/ mkdir -p $RESULT_PATH python generate.py \ $DATA_PATH \ --path $OUTPUT_PATH/$CKPT \ --batch-size 128 \ --beam $BEAM_SIZE \ --lenpen $LPEN \ --remove-bpe \ --log-format simple \ --source-lang de \ --target-lang en \ > $RESULT_PATH/res.txt echo -n $CKPT_ID "" tail -n 1 $RESULT_PATH/res.txt
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/macaron-scripts/test/test-wmt14-base.sh
Shell
#!/usr/bin/env bash CODE_PATH=. cd $CODE_PATH export PYTHONPATH=$CODE_PATH:$PYTHONPATH PROBLEM=wmt14_en_de ARCH=transformer_wmt_en_de_macaron_v2 SRC=en TGT=de BEAM_SIZE=4 LPEN=0.6 DATA_PATH=data-bin/wmt14_en_de_joined_dict TEST_DATA_PATH=$DATA_PATH/raw-test MOSES_PATH=macaron-scripts/data-preprocessing/mosesdecoder OUTPUT_PATH=log/$PROBLEM/$ARCH TRANS_PATH=$OUTPUT_PATH/trans TEST_TAG=wmt14 CKPT=$1 CKPT_ID=$(echo $CKPT | sed 's/checkpoint//g' | sed 's/\.pt//g' | sed 's/^_//g') RESULT_PATH=$TRANS_PATH/$CKPT_ID/ mkdir -p $RESULT_PATH python interactive.py \ $DATA_PATH \ --path $OUTPUT_PATH/$CKPT \ --batch-size 128 \ --beam $BEAM_SIZE \ --lenpen $LPEN \ --remove-bpe \ --log-format simple \ --buffer-size 12800 \ --source-lang en \ --target-lang de \ < $TEST_DATA_PATH/en-de.en.bpe \ > $RESULT_PATH/res.txt cat $RESULT_PATH/res.txt | awk -F '\t' '/^H\t/ {print $3}' > $RESULT_PATH/hyp.txt cat $RESULT_PATH/hyp.txt | perl $MOSES_PATH/scripts/tokenizer/detokenizer.perl -q -threads 8 -a -l $TGT > $RESULT_PATH/hyp.detok.txt cat $RESULT_PATH/hyp.detok.txt | perl $MOSES_PATH/scripts/tokenizer/tokenizer.perl -l $TGT > $RESULT_PATH/hyp.tok.txt cat $RESULT_PATH/hyp.tok.txt | perl -ple 's{(\S)-(\S)}{$1 ##AT##-##AT## $2}g' > $RESULT_PATH/hyp.tok.atat.txt perl $MOSES_PATH/scripts/generic/multi-bleu.perl $TEST_DATA_PATH/en-de.de.tok.atat < $RESULT_PATH/hyp.tok.atat.txt > $RESULT_PATH/bleu.txt # cat $RESULT_PATH/hyp.detok.txt | sacrebleu -t $TEST_TAG -l $SRC-$TGT --width 2 > $RESULT_PATH/bleu.txt echo -n $CKPT_ID "" cat $RESULT_PATH/bleu.txt
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/macaron-scripts/test/test-wmt14-big.sh
Shell
#!/usr/bin/env bash CODE_PATH=. cd $CODE_PATH export PYTHONPATH=$CODE_PATH:$PYTHONPATH PROBLEM=wmt14_en_de ARCH=transformer_wmt_en_de_big_t2t_macaron SRC=en TGT=de BEAM_SIZE=4 LPEN=0.6 DATA_PATH=data-bin/wmt14_en_de_joined_dict TEST_DATA_PATH=$DATA_PATH/raw-test MOSES_PATH=macaron-scripts/data-preprocessing/mosesdecoder OUTPUT_PATH=log/$PROBLEM/$ARCH TRANS_PATH=$OUTPUT_PATH/trans TEST_TAG=wmt14 CKPT=$1 CKPT_ID=$(echo $CKPT | sed 's/checkpoint//g' | sed 's/\.pt//g' | sed 's/^_//g') RESULT_PATH=$TRANS_PATH/$CKPT_ID/ mkdir -p $RESULT_PATH python interactive.py \ $DATA_PATH \ --path $OUTPUT_PATH/$CKPT \ --batch-size 128 \ --beam $BEAM_SIZE \ --lenpen $LPEN \ --remove-bpe \ --log-format simple \ --buffer-size 12800 \ --source-lang en \ --target-lang de \ < $TEST_DATA_PATH/en-de.en.bpe \ > $RESULT_PATH/res.txt cat $RESULT_PATH/res.txt | awk -F '\t' '/^H\t/ {print $3}' > $RESULT_PATH/hyp.txt cat $RESULT_PATH/hyp.txt | perl $MOSES_PATH/scripts/tokenizer/detokenizer.perl -q -threads 8 -a -l $TGT > $RESULT_PATH/hyp.detok.txt cat $RESULT_PATH/hyp.detok.txt | perl $MOSES_PATH/scripts/tokenizer/tokenizer.perl -l $TGT > $RESULT_PATH/hyp.tok.txt cat $RESULT_PATH/hyp.tok.txt | perl -ple 's{(\S)-(\S)}{$1 ##AT##-##AT## $2}g' > $RESULT_PATH/hyp.tok.atat.txt perl $MOSES_PATH/scripts/generic/multi-bleu.perl $TEST_DATA_PATH/en-de.de.tok.atat < $RESULT_PATH/hyp.tok.atat.txt > $RESULT_PATH/bleu.txt # cat $RESULT_PATH/hyp.detok.txt | sacrebleu -t $TEST_TAG -l $SRC-$TGT --width 2 > $RESULT_PATH/bleu.txt echo -n $CKPT_ID "" cat $RESULT_PATH/bleu.txt
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/macaron-scripts/train/train-iwslt14.sh
Shell
#!/usr/bin/env bash CODE_PATH=. cd $CODE_PATH export PYTHONPATH=$CODE_PATH:$PYTHONPATH model=transformer PROBLEM=iwslt14_de_en ARCH=transformer_iwslt_de_en_macaron_v2 DATA_PATH=data-bin/iwslt14.tokenized.de-en.joined/ OUTPUT_PATH=log/$PROBLEM/$ARCH mkdir -p $OUTPUT_PATH python train.py $DATA_PATH \ --seed 1 \ --arch $ARCH --share-all-embeddings \ --optimizer adam --adam-betas '(0.9, 0.98)' --clip-norm 0.0 \ --dropout 0.3 \ --lr-scheduler inverse_sqrt --warmup-init-lr 1e-07 --warmup-updates 8000 \ --lr 0.0015 --min-lr 1e-09 \ --criterion label_smoothed_cross_entropy --label-smoothing 0.1 --weight-decay 0.0001 \ --max-tokens 4096 --save-dir $OUTPUT_PATH \ --update-freq 1 --no-progress-bar --log-interval 50 \ --ddp-backend no_c10d \ --save-interval-updates 10000 --keep-interval-updates 20 \ | tee -a $OUTPUT_PATH/train_log.txt
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/macaron-scripts/train/train-wmt14-base.sh
Shell
#!/usr/bin/env bash CODE_PATH=. cd $CODE_PATH export PYTHONPATH=$CODE_PATH:$PYTHONPATH model=transformer PROBLEM=wmt14_en_de ARCH=transformer_wmt_en_de_macaron_v2 DATA_PATH=data-bin/wmt14_en_de_joined_dict/ OUTPUT_PATH=log/$PROBLEM/$ARCH mkdir -p $OUTPUT_PATH # Assume training on 4 P40 GPUs. Change the --max-tokens and --update-freq to match your hardware settings. python train.py $DATA_PATH \ --seed 1 \ --arch $ARCH --share-all-embeddings \ --optimizer adam --adam-betas '(0.9, 0.98)' --clip-norm 0.0 \ --lr-scheduler inverse_sqrt --warmup-init-lr 1e-07 --warmup-updates 8000 \ --lr 0.0015 --min-lr 1e-09 \ --criterion label_smoothed_cross_entropy --label-smoothing 0.1 --weight-decay 0.0 \ --max-tokens 8192 --save-dir $OUTPUT_PATH \ --update-freq 1 --no-progress-bar --log-interval 50 \ --ddp-backend no_c10d \ --save-interval-updates 10000 --keep-interval-updates 20 \ | tee -a $OUTPUT_PATH/train_log.txt
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/macaron-scripts/train/train-wmt14-big-distributed.sh
Shell
#!/usr/bin/env bash CODE_PATH=. cd $CODE_PATH export PYTHONPATH=$CODE_PATH:$PYTHONPATH model=transformer PROBLEM=wmt14_en_de ARCH=transformer_wmt_en_de_big_t2t_macaron DATA_PATH=data-bin/wmt14_en_de_joined_dict/ OUTPUT_PATH=log/$PROBLEM/$ARCH mkdir -p $OUTPUT_PATH # Example usage with 8 * 4 = 32 P40 GPUs. Change the --max-tokens and --update-freq to match your hardware settings. MASTER_HOST="0.0.0.0" # Replace it with your master's IP python distributed_train.py $DATA_PATH \ --distributed-init-method tcp://$MASTER_HOST:23456 \ --distributed-world-size $OMPI_COMM_WORLD_SIZE \ --distributed-rank $OMPI_COMM_WORLD_RANK \ --device-id $OMPI_COMM_WORLD_LOCAL_RANK \ --distributed-backend nccl \ --seed 1 \ --arch $ARCH --share-all-embeddings \ --optimizer adam --adam-betas '(0.9, 0.98)' --clip-norm 0.0 \ --lr-scheduler inverse_sqrt --warmup-init-lr 1e-07 --warmup-updates 8000 \ --lr 0.003 --min-lr 1e-09 \ --criterion label_smoothed_cross_entropy --label-smoothing 0.1 --weight-decay 0.0 \ --max-tokens 4096 --save-dir $OUTPUT_PATH \ --update-freq 16 --no-progress-bar --log-interval 50 \ --ddp-backend no_c10d \ --save-interval-updates 10000 --keep-interval-updates 20 \ | tee -a $OUTPUT_PATH/train_log.txt
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/macaron-scripts/train/train-wmt14-big.sh
Shell
#!/usr/bin/env bash CODE_PATH=. cd $CODE_PATH export PYTHONPATH=$CODE_PATH:$PYTHONPATH model=transformer PROBLEM=wmt14_en_de ARCH=transformer_wmt_en_de_big_t2t_macaron DATA_PATH=data-bin/wmt14_en_de_joined_dict/ OUTPUT_PATH=log/$PROBLEM/$ARCH mkdir -p $OUTPUT_PATH # Assume training on 4 P40 GPUs. Change the --max-tokens and --update-freq to match your hardware settings. python train.py $DATA_PATH \ --seed 1 \ --arch $ARCH --share-all-embeddings \ --optimizer adam --adam-betas '(0.9, 0.98)' --clip-norm 0.0 \ --lr-scheduler inverse_sqrt --warmup-init-lr 1e-07 --warmup-updates 8000 \ --lr 0.003 --min-lr 1e-09 \ --criterion label_smoothed_cross_entropy --label-smoothing 0.1 --weight-decay 0.0 \ --max-tokens 4096 --save-dir $OUTPUT_PATH \ --update-freq 16 --no-progress-bar --log-interval 50 \ --ddp-backend no_c10d \ --save-interval-updates 10000 --keep-interval-updates 20 \ | tee -a $OUTPUT_PATH/train_log.txt
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/multiprocessing_train.py
Python
#!/usr/bin/env python3 -u # Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import os import random import signal import torch from fairseq import distributed_utils, options from train import main as single_process_main def main(args): # Set distributed training parameters for a single node. args.distributed_world_size = torch.cuda.device_count() port = random.randint(10000, 20000) args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port) args.distributed_init_host = 'localhost' args.distributed_port = port + 1 mp = torch.multiprocessing.get_context('spawn') # Create a thread to listen for errors in the child processes. error_queue = mp.SimpleQueue() error_handler = ErrorHandler(error_queue) # Train with multiprocessing. procs = [] for i in range(args.distributed_world_size): args.distributed_rank = i args.device_id = i procs.append(mp.Process(target=run, args=(args, error_queue, ), daemon=True)) procs[i].start() error_handler.add_child(procs[i].pid) for p in procs: p.join() def run(args, error_queue): try: args.distributed_rank = distributed_utils.distributed_init(args) single_process_main(args) except KeyboardInterrupt: pass # killed by parent, do nothing except Exception: # propagate exception to parent process, keeping original traceback import traceback error_queue.put((args.distributed_rank, traceback.format_exc())) class ErrorHandler(object): """A class that listens for exceptions in children processes and propagates the tracebacks to the parent process.""" def __init__(self, error_queue): import signal import threading self.error_queue = error_queue self.children_pids = [] self.error_thread = threading.Thread(target=self.error_listener, daemon=True) self.error_thread.start() signal.signal(signal.SIGUSR1, self.signal_handler) def add_child(self, pid): self.children_pids.append(pid) def error_listener(self): (rank, original_trace) = self.error_queue.get() self.error_queue.put((rank, original_trace)) os.kill(os.getpid(), signal.SIGUSR1) def signal_handler(self, signalnum, stackframe): for pid in self.children_pids: os.kill(pid, signal.SIGINT) # kill children processes (rank, original_trace) = self.error_queue.get() msg = "\n\n-- Tracebacks above this line can probably be ignored --\n\n" msg += original_trace raise Exception(msg) if __name__ == '__main__': parser = options.get_training_parser() args = options.parse_args_and_arch(parser) main(args)
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/preprocess.py
Python
#!/usr/bin/env python3 # Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. """ Data pre-processing: build vocabularies and binarize training data. """ import argparse from collections import Counter from itertools import zip_longest import os import shutil from fairseq.data import indexed_dataset, dictionary from fairseq.tokenizer import Tokenizer, tokenize_line from multiprocessing import Pool, Manager, Process def get_parser(): parser = argparse.ArgumentParser() parser.add_argument('-s', '--source-lang', default=None, metavar='SRC', help='source language') parser.add_argument('-t', '--target-lang', default=None, metavar='TARGET', help='target language') parser.add_argument('--trainpref', metavar='FP', default=None, help='train file prefix') parser.add_argument('--validpref', metavar='FP', default=None, help='comma separated, valid file prefixes') parser.add_argument('--testpref', metavar='FP', default=None, help='comma separated, test file prefixes') parser.add_argument('--destdir', metavar='DIR', default='data-bin', help='destination dir') parser.add_argument('--thresholdtgt', metavar='N', default=0, type=int, help='map words appearing less than threshold times to unknown') parser.add_argument('--thresholdsrc', metavar='N', default=0, type=int, help='map words appearing less than threshold times to unknown') parser.add_argument('--tgtdict', metavar='FP', help='reuse given target dictionary') parser.add_argument('--srcdict', metavar='FP', help='reuse given source dictionary') parser.add_argument('--nwordstgt', metavar='N', default=-1, type=int, help='number of target words to retain') parser.add_argument('--nwordssrc', metavar='N', default=-1, type=int, help='number of source words to retain') parser.add_argument('--alignfile', metavar='ALIGN', default=None, help='an alignment file (optional)') parser.add_argument('--output-format', metavar='FORMAT', default='binary', choices=['binary', 'raw'], help='output format (optional)') parser.add_argument('--joined-dictionary', action='store_true', help='Generate joined dictionary') parser.add_argument('--only-source', action='store_true', help='Only process the source language') parser.add_argument('--padding-factor', metavar='N', default=8, type=int, help='Pad dictionary size to be multiple of N') parser.add_argument('--workers', metavar='N', default=1, type=int, help='number of parallel workers') return parser def main(args): print(args) os.makedirs(args.destdir, exist_ok=True) target = not args.only_source def build_dictionary(filenames): d = dictionary.Dictionary() for filename in filenames: Tokenizer.add_file_to_dictionary(filename, d, tokenize_line, args.workers) return d def train_path(lang): return '{}{}'.format(args.trainpref, ('.' + lang) if lang else '') def file_name(prefix, lang): fname = prefix if lang is not None: fname += f'.{lang}' return fname def dest_path(prefix, lang): return os.path.join(args.destdir, file_name(prefix, lang)) def dict_path(lang): return dest_path('dict', lang) + '.txt' if args.joined_dictionary: assert not args.srcdict, 'cannot combine --srcdict and --joined-dictionary' assert not args.tgtdict, 'cannot combine --tgtdict and --joined-dictionary' src_dict = build_dictionary(set([ train_path(lang) for lang in [args.source_lang, args.target_lang] ])) tgt_dict = src_dict else: if args.srcdict: src_dict = dictionary.Dictionary.load(args.srcdict) else: assert args.trainpref, "--trainpref must be set if --srcdict is not specified" src_dict = build_dictionary([train_path(args.source_lang)]) if target: if args.tgtdict: tgt_dict = dictionary.Dictionary.load(args.tgtdict) else: assert args.trainpref, "--trainpref must be set if --tgtdict is not specified" tgt_dict = build_dictionary([train_path(args.target_lang)]) src_dict.finalize( threshold=args.thresholdsrc, nwords=args.nwordssrc, padding_factor=args.padding_factor, ) src_dict.save(dict_path(args.source_lang)) if target: if not args.joined_dictionary: tgt_dict.finalize( threshold=args.thresholdtgt, nwords=args.nwordstgt, padding_factor=args.padding_factor, ) tgt_dict.save(dict_path(args.target_lang)) def make_binary_dataset(input_prefix, output_prefix, lang, num_workers): dict = dictionary.Dictionary.load(dict_path(lang)) print('| [{}] Dictionary: {} types'.format(lang, len(dict) - 1)) n_seq_tok = [0, 0] replaced = Counter() def merge_result(worker_result): replaced.update(worker_result['replaced']) n_seq_tok[0] += worker_result['nseq'] n_seq_tok[1] += worker_result['ntok'] input_file = '{}{}'.format(input_prefix, ('.' + lang) if lang is not None else '') offsets = Tokenizer.find_offsets(input_file, num_workers) pool = None if num_workers > 1: pool = Pool(processes=num_workers-1) for worker_id in range(1, num_workers): prefix = "{}{}".format(output_prefix, worker_id) pool.apply_async(binarize, (args, input_file, dict, prefix, lang, offsets[worker_id], offsets[worker_id + 1]), callback=merge_result) pool.close() ds = indexed_dataset.IndexedDatasetBuilder(dataset_dest_file(args, output_prefix, lang, 'bin')) merge_result(Tokenizer.binarize(input_file, dict, lambda t: ds.add_item(t), offset=0, end=offsets[1])) if num_workers > 1: pool.join() for worker_id in range(1, num_workers): prefix = "{}{}".format(output_prefix, worker_id) temp_file_path = dataset_dest_prefix(args, prefix, lang) ds.merge_file_(temp_file_path) os.remove(indexed_dataset.data_file_path(temp_file_path)) os.remove(indexed_dataset.index_file_path(temp_file_path)) ds.finalize(dataset_dest_file(args, output_prefix, lang, 'idx')) print('| [{}] {}: {} sents, {} tokens, {:.3}% replaced by {}'.format( lang, input_file, n_seq_tok[0], n_seq_tok[1], 100 * sum(replaced.values()) / n_seq_tok[1], dict.unk_word)) def make_dataset(input_prefix, output_prefix, lang, num_workers=1): if args.output_format == 'binary': make_binary_dataset(input_prefix, output_prefix, lang, num_workers) elif args.output_format == 'raw': # Copy original text file to destination folder output_text_file = dest_path( output_prefix + '.{}-{}'.format(args.source_lang, args.target_lang), lang, ) shutil.copyfile(file_name(input_prefix, lang), output_text_file) def make_all(lang): if args.trainpref: make_dataset(args.trainpref, 'train', lang, num_workers=args.workers) if args.validpref: for k, validpref in enumerate(args.validpref.split(',')): outprefix = 'valid{}'.format(k) if k > 0 else 'valid' make_dataset(validpref, outprefix, lang) if args.testpref: for k, testpref in enumerate(args.testpref.split(',')): outprefix = 'test{}'.format(k) if k > 0 else 'test' make_dataset(testpref, outprefix, lang) make_all(args.source_lang) if target: make_all(args.target_lang) print('| Wrote preprocessed data to {}'.format(args.destdir)) if args.alignfile: assert args.trainpref, "--trainpref must be set if --alignfile is specified" src_file_name = train_path(args.source_lang) tgt_file_name = train_path(args.target_lang) src_dict = dictionary.Dictionary.load(dict_path(args.source_lang)) tgt_dict = dictionary.Dictionary.load(dict_path(args.target_lang)) freq_map = {} with open(args.alignfile, 'r') as align_file: with open(src_file_name, 'r') as src_file: with open(tgt_file_name, 'r') as tgt_file: for a, s, t in zip_longest(align_file, src_file, tgt_file): si = Tokenizer.tokenize(s, src_dict, add_if_not_exist=False) ti = Tokenizer.tokenize(t, tgt_dict, add_if_not_exist=False) ai = list(map(lambda x: tuple(x.split('-')), a.split())) for sai, tai in ai: srcidx = si[int(sai)] tgtidx = ti[int(tai)] if srcidx != src_dict.unk() and tgtidx != tgt_dict.unk(): assert srcidx != src_dict.pad() assert srcidx != src_dict.eos() assert tgtidx != tgt_dict.pad() assert tgtidx != tgt_dict.eos() if srcidx not in freq_map: freq_map[srcidx] = {} if tgtidx not in freq_map[srcidx]: freq_map[srcidx][tgtidx] = 1 else: freq_map[srcidx][tgtidx] += 1 align_dict = {} for srcidx in freq_map.keys(): align_dict[srcidx] = max(freq_map[srcidx], key=freq_map[srcidx].get) with open(os.path.join(args.destdir, 'alignment.{}-{}.txt'.format( args.source_lang, args.target_lang)), 'w') as f: for k, v in align_dict.items(): print('{} {}'.format(src_dict[k], tgt_dict[v]), file=f) def binarize(args, filename, dict, output_prefix, lang, offset, end): ds = indexed_dataset.IndexedDatasetBuilder(dataset_dest_file(args, output_prefix, lang, 'bin')) def consumer(tensor): ds.add_item(tensor) res = Tokenizer.binarize(filename, dict, consumer, offset=offset, end=end) ds.finalize(dataset_dest_file(args, output_prefix, lang, 'idx')) return res def dataset_dest_prefix(args, output_prefix, lang): base = f'{args.destdir}/{output_prefix}' lang_part = f'.{args.source_lang}-{args.target_lang}.{lang}' if lang is not None else '' return f'{base}{lang_part}' def dataset_dest_file(args, output_prefix, lang, extension): base = dataset_dest_prefix(args, output_prefix, lang) return f'{base}.{extension}' if __name__ == '__main__': parser = get_parser() args = parser.parse_args() main(args)
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/score.py
Python
#!/usr/bin/env python3 # Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. """ BLEU scoring of generated translations against reference translations. """ import argparse import os import sys from fairseq import bleu, tokenizer from fairseq.data import dictionary def get_parser(): parser = argparse.ArgumentParser(description='Command-line script for BLEU scoring.') parser.add_argument('-s', '--sys', default='-', help='system output') parser.add_argument('-r', '--ref', required=True, help='references') parser.add_argument('-o', '--order', default=4, metavar='N', type=int, help='consider ngrams up to this order') parser.add_argument('--ignore-case', action='store_true', help='case-insensitive scoring') return parser def main(): parser = get_parser() args = parser.parse_args() print(args) assert args.sys == '-' or os.path.exists(args.sys), \ "System output file {} does not exist".format(args.sys) assert os.path.exists(args.ref), \ "Reference file {} does not exist".format(args.ref) dict = dictionary.Dictionary() def readlines(fd): for line in fd.readlines(): if args.ignore_case: yield line.lower() yield line def score(fdsys): with open(args.ref) as fdref: scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk()) for sys_tok, ref_tok in zip(readlines(fdsys), readlines(fdref)): sys_tok = tokenizer.Tokenizer.tokenize(sys_tok, dict) ref_tok = tokenizer.Tokenizer.tokenize(ref_tok, dict) scorer.add(ref_tok, sys_tok) print(scorer.result_string(args.order)) if args.sys == '-': score(sys.stdin) else: with open(args.sys, 'r') as f: score(f) if __name__ == '__main__': main()
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/scripts/average_checkpoints.py
Python
#!/usr/bin/env python3 import argparse import collections import torch import os import re def average_checkpoints(inputs): """Loads checkpoints from inputs and returns a model with averaged weights. Args: inputs: An iterable of string paths of checkpoints to load from. Returns: A dict of string keys mapping to various values. The 'model' key from the returned dict should correspond to an OrderedDict mapping string parameter names to torch Tensors. """ params_dict = collections.OrderedDict() params_keys = None new_state = None for f in inputs: state = torch.load( f, map_location=( lambda s, _: torch.serialization.default_restore_location(s, 'cpu') ), ) # Copies over the settings from the first checkpoint if new_state is None: new_state = state model_params = state['model'] model_params_keys = list(model_params.keys()) if params_keys is None: params_keys = model_params_keys elif params_keys != model_params_keys: raise KeyError( 'For checkpoint {}, expected list of params: {}, ' 'but found: {}'.format(f, params_keys, model_params_keys) ) for k in params_keys: if k not in params_dict: params_dict[k] = [] p = model_params[k] if isinstance(p, torch.HalfTensor): p = p.float() params_dict[k].append(p) averaged_params = collections.OrderedDict() # v should be a list of torch Tensor. for k, v in params_dict.items(): summed_v = None for x in v: summed_v = summed_v + x if summed_v is not None else x averaged_params[k] = summed_v / len(v) new_state['model'] = averaged_params return new_state def last_n_checkpoints(paths, n, update_based): assert len(paths) == 1 path = paths[0] if update_based: pt_regexp = re.compile(r'checkpoint_\d+_(\d+)\.pt') else: pt_regexp = re.compile(r'checkpoint(\d+)\.pt') files = os.listdir(path) entries = [] for f in files: m = pt_regexp.fullmatch(f) if m is not None: entries.append((int(m.group(1)), m.group(0))) if len(entries) < n: raise Exception('Found {} checkpoint files but need at least {}', len(entries), n) return [os.path.join(path, x[1]) for x in sorted(entries, reverse=True)[:n]] def main(): parser = argparse.ArgumentParser( description='Tool to average the params of input checkpoints to ' 'produce a new checkpoint', ) parser.add_argument( '--inputs', required=True, nargs='+', help='Input checkpoint file paths.', ) parser.add_argument( '--output', required=True, metavar='FILE', help='Write the new checkpoint containing the averaged weights to this ' 'path.', ) num_group = parser.add_mutually_exclusive_group() num_group.add_argument( '--num-epoch-checkpoints', type=int, help='if set, will try to find checkpoints with names checkpoint_xx.pt in the path specified by input, ' 'and average last this many of them.', ) num_group.add_argument( '--num-update-checkpoints', type=int, help='if set, will try to find checkpoints with names checkpoint_ee_xx.pt in the path specified by input, ' 'and average last this many of them.', ) args = parser.parse_args() print(args) num = None is_update_based = False if args.num_update_checkpoints is not None: num = args.num_update_checkpoints is_update_based = True elif args.num_epoch_checkpoints is not None: num = args.num_epoch_checkpoints if num is not None: args.inputs = last_n_checkpoints(args.inputs, num, is_update_based) print('averaging checkpoints: ', args.inputs) new_state = average_checkpoints(args.inputs) torch.save(new_state, args.output) print('Finished writing averaged checkpoint to {}.'.format(args.output)) if __name__ == '__main__': main()
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/scripts/build_sym_alignment.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. # """ Use this script in order to build symmetric alignments for your translation dataset. This script depends on fast_align and mosesdecoder tools. You will need to build those before running the script. fast_align: github: http://github.com/clab/fast_align instructions: follow the instructions in README.md mosesdecoder: github: http://github.com/moses-smt/mosesdecoder instructions: http://www.statmt.org/moses/?n=Development.GetStarted The script produces the following files under --output_dir: text.joined - concatenation of lines from the source_file and the target_file. align.forward - forward pass of fast_align. align.backward - backward pass of fast_align. aligned.sym_heuristic - symmetrized alignment. """ import argparse import os from itertools import zip_longest def main(): parser = argparse.ArgumentParser(description='symmetric alignment builer') parser.add_argument('--fast_align_dir', help='path to fast_align build directory') parser.add_argument('--mosesdecoder_dir', help='path to mosesdecoder root directory') parser.add_argument('--sym_heuristic', help='heuristic to use for symmetrization', default='grow-diag-final-and') parser.add_argument('--source_file', help='path to a file with sentences ' 'in the source language') parser.add_argument('--target_file', help='path to a file with sentences ' 'in the target language') parser.add_argument('--output_dir', help='output directory') args = parser.parse_args() fast_align_bin = os.path.join(args.fast_align_dir, 'fast_align') symal_bin = os.path.join(args.mosesdecoder_dir, 'bin', 'symal') sym_fast_align_bin = os.path.join( args.mosesdecoder_dir, 'scripts', 'ems', 'support', 'symmetrize-fast-align.perl') # create joined file joined_file = os.path.join(args.output_dir, 'text.joined') with open(args.source_file, 'r') as src, open(args.target_file, 'r') as tgt: with open(joined_file, 'w') as joined: for s, t in zip_longest(src, tgt): print('{} ||| {}'.format(s.strip(), t.strip()), file=joined) bwd_align_file = os.path.join(args.output_dir, 'align.backward') # run forward alignment fwd_align_file = os.path.join(args.output_dir, 'align.forward') fwd_fast_align_cmd = '{FASTALIGN} -i {JOINED} -d -o -v > {FWD}'.format( FASTALIGN=fast_align_bin, JOINED=joined_file, FWD=fwd_align_file) assert os.system(fwd_fast_align_cmd) == 0 # run backward alignment bwd_align_file = os.path.join(args.output_dir, 'align.backward') bwd_fast_align_cmd = '{FASTALIGN} -i {JOINED} -d -o -v -r > {BWD}'.format( FASTALIGN=fast_align_bin, JOINED=joined_file, BWD=bwd_align_file) assert os.system(bwd_fast_align_cmd) == 0 # run symmetrization sym_out_file = os.path.join(args.output_dir, 'aligned') sym_cmd = '{SYMFASTALIGN} {FWD} {BWD} {SRC} {TGT} {OUT} {HEURISTIC} {SYMAL}'.format( SYMFASTALIGN=sym_fast_align_bin, FWD=fwd_align_file, BWD=bwd_align_file, SRC=args.source_file, TGT=args.target_file, OUT=sym_out_file, HEURISTIC=args.sym_heuristic, SYMAL=symal_bin ) assert os.system(sym_cmd) == 0 if __name__ == '__main__': main()
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/scripts/convert_dictionary.lua
Lua
-- Copyright (c) 2017-present, Facebook, Inc. -- All rights reserved. -- -- This source code is licensed under the license found in the LICENSE file in -- the root directory of this source tree. An additional grant of patent rights -- can be found in the PATENTS file in the same directory. -- -- Usage: convert_dictionary.lua <dict.th7> require 'fairseq' require 'torch' require 'paths' if #arg < 1 then print('usage: convert_dictionary.lua <dict.th7>') os.exit(1) end if not paths.filep(arg[1]) then print('error: file does not exit: ' .. arg[1]) os.exit(1) end dict = torch.load(arg[1]) dst = paths.basename(arg[1]):gsub('.th7', '.txt') assert(dst:match('.txt$')) f = io.open(dst, 'w') for idx, symbol in ipairs(dict.index_to_symbol) do if idx > dict.cutoff then break end f:write(symbol) f:write(' ') f:write(dict.index_to_freq[idx]) f:write('\n') end f:close()
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/scripts/convert_model.lua
Lua
-- Copyright (c) 2017-present, Facebook, Inc. -- All rights reserved. -- -- This source code is licensed under the license found in the LICENSE file in -- the root directory of this source tree. An additional grant of patent rights -- can be found in the PATENTS file in the same directory. -- -- Usage: convert_model.lua <model_epoch1.th7> require 'torch' local fairseq = require 'fairseq' model = torch.load(arg[1]) function find_weight_norm(container, module) for _, wn in ipairs(container:listModules()) do if torch.type(wn) == 'nn.WeightNorm' and wn.modules[1] == module then return wn end end end function push_state(dict, key, module) if torch.type(module) == 'nn.Linear' then local wn = find_weight_norm(model.module, module) assert(wn) dict[key .. '.weight_v'] = wn.v:float() dict[key .. '.weight_g'] = wn.g:float() elseif torch.type(module) == 'nn.TemporalConvolutionTBC' then local wn = find_weight_norm(model.module, module) assert(wn) local v = wn.v:float():view(wn.viewOut):transpose(2, 3) dict[key .. '.weight_v'] = v dict[key .. '.weight_g'] = wn.g:float():view(module.weight:size(3), 1, 1) else dict[key .. '.weight'] = module.weight:float() end if module.bias then dict[key .. '.bias'] = module.bias:float() end end encoder_dict = {} decoder_dict = {} combined_dict = {} function encoder_state(encoder) luts = encoder:findModules('nn.LookupTable') push_state(encoder_dict, 'embed_tokens', luts[1]) push_state(encoder_dict, 'embed_positions', luts[2]) fcs = encoder:findModules('nn.Linear') assert(#fcs >= 2) local nInputPlane = fcs[1].weight:size(1) push_state(encoder_dict, 'fc1', table.remove(fcs, 1)) push_state(encoder_dict, 'fc2', table.remove(fcs, #fcs)) for i, module in ipairs(encoder:findModules('nn.TemporalConvolutionTBC')) do push_state(encoder_dict, 'convolutions.' .. tostring(i - 1), module) if nInputPlane ~= module.weight:size(3) / 2 then push_state(encoder_dict, 'projections.' .. tostring(i - 1), table.remove(fcs, 1)) end nInputPlane = module.weight:size(3) / 2 end assert(#fcs == 0) end function decoder_state(decoder) luts = decoder:findModules('nn.LookupTable') push_state(decoder_dict, 'embed_tokens', luts[1]) push_state(decoder_dict, 'embed_positions', luts[2]) fcs = decoder:findModules('nn.Linear') local nInputPlane = fcs[1].weight:size(1) push_state(decoder_dict, 'fc1', table.remove(fcs, 1)) push_state(decoder_dict, 'fc2', fcs[#fcs - 1]) push_state(decoder_dict, 'fc3', fcs[#fcs]) table.remove(fcs, #fcs) table.remove(fcs, #fcs) for i, module in ipairs(decoder:findModules('nn.TemporalConvolutionTBC')) do if nInputPlane ~= module.weight:size(3) / 2 then push_state(decoder_dict, 'projections.' .. tostring(i - 1), table.remove(fcs, 1)) end nInputPlane = module.weight:size(3) / 2 local prefix = 'attention.' .. tostring(i - 1) push_state(decoder_dict, prefix .. '.in_projection', table.remove(fcs, 1)) push_state(decoder_dict, prefix .. '.out_projection', table.remove(fcs, 1)) push_state(decoder_dict, 'convolutions.' .. tostring(i - 1), module) end assert(#fcs == 0) end _encoder = model.module.modules[2] _decoder = model.module.modules[3] encoder_state(_encoder) decoder_state(_decoder) for k, v in pairs(encoder_dict) do combined_dict['encoder.' .. k] = v end for k, v in pairs(decoder_dict) do combined_dict['decoder.' .. k] = v end torch.save('state_dict.t7', combined_dict)
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/scripts/read_binarized.py
Python
#!/usr/bin/env python3 # Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. # import argparse from fairseq.data import dictionary from fairseq.data import IndexedDataset def get_parser(): parser = argparse.ArgumentParser( description='writes text from binarized file to stdout') parser.add_argument('--dict', metavar='FP', required=True, help='dictionary containing known words') parser.add_argument('--input', metavar='FP', required=True, help='binarized file to read') return parser def main(args): dict = dictionary.Dictionary.load(args.dict) ds = IndexedDataset(args.input, fix_lua_indexing=True) for tensor_line in ds: print(dict.string(tensor_line)) if __name__ == '__main__': parser = get_parser() args = parser.parse_args() main(args)
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/setup.py
Python
#!/usr/bin/env python3 # Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. from setuptools import setup, find_packages, Extension import sys if sys.version_info < (3,): sys.exit('Sorry, Python3 is required for fairseq.') with open('README.md') as f: readme = f.read() with open('LICENSE') as f: license = f.read() with open('requirements.txt') as f: reqs = f.read() bleu = Extension( 'fairseq.libbleu', sources=[ 'fairseq/clib/libbleu/libbleu.cpp', 'fairseq/clib/libbleu/module.cpp', ], extra_compile_args=['-std=c++11'], ) setup( name='fairseq', version='0.6.0', description='Facebook AI Research Sequence-to-Sequence Toolkit', long_description=readme, license=license, install_requires=reqs.strip().split('\n'), packages=find_packages(), ext_modules=[bleu], test_suite='tests', )
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/tests/test_average_checkpoints.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import collections import os import tempfile import unittest import numpy as np import torch from scripts.average_checkpoints import average_checkpoints class TestAverageCheckpoints(unittest.TestCase): def test_average_checkpoints(self): params_0 = collections.OrderedDict( [ ('a', torch.DoubleTensor([100.0])), ('b', torch.FloatTensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])), ('c', torch.IntTensor([7, 8, 9])), ] ) params_1 = collections.OrderedDict( [ ('a', torch.DoubleTensor([1.0])), ('b', torch.FloatTensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])), ('c', torch.IntTensor([2, 2, 2])), ] ) params_avg = collections.OrderedDict( [ ('a', torch.DoubleTensor([50.5])), ('b', torch.FloatTensor([[1.0, 1.5, 2.0], [2.5, 3.0, 3.5]])), # We expect truncation for integer division ('c', torch.IntTensor([4, 5, 5])), ] ) fd_0, path_0 = tempfile.mkstemp() fd_1, path_1 = tempfile.mkstemp() torch.save(collections.OrderedDict([('model', params_0)]), path_0) torch.save(collections.OrderedDict([('model', params_1)]), path_1) output = average_checkpoints([path_0, path_1])['model'] os.close(fd_0) os.remove(path_0) os.close(fd_1) os.remove(path_1) for (k_expected, v_expected), (k_out, v_out) in zip( params_avg.items(), output.items()): self.assertEqual( k_expected, k_out, 'Key mismatch - expected {} but found {}. ' '(Expected list of keys: {} vs actual list of keys: {})'.format( k_expected, k_out, params_avg.keys(), output.keys() ) ) np.testing.assert_allclose( v_expected.numpy(), v_out.numpy(), err_msg='Tensor value mismatch for key {}'.format(k_expected) ) if __name__ == '__main__': unittest.main()
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/tests/test_backtranslation_dataset.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import argparse import unittest import tests.utils as test_utils import torch from fairseq.data.backtranslation_dataset import BacktranslationDataset class TestBacktranslationDataset(unittest.TestCase): def setUp(self): self.tgt_dict, self.w1, self.w2, self.src_tokens, self.src_lengths, self.model = ( test_utils.sequence_generator_setup() ) backtranslation_args = argparse.Namespace() """ Same as defaults from fairseq/options.py """ backtranslation_args.backtranslation_unkpen = 0 backtranslation_args.backtranslation_sampling = False backtranslation_args.backtranslation_max_len_a = 0 backtranslation_args.backtranslation_max_len_b = 200 backtranslation_args.backtranslation_beam = 2 self.backtranslation_args = backtranslation_args dummy_src_samples = self.src_tokens self.tgt_dataset = test_utils.TestDataset(data=dummy_src_samples) def test_backtranslation_dataset(self): backtranslation_dataset = BacktranslationDataset( args=self.backtranslation_args, tgt_dataset=self.tgt_dataset, tgt_dict=self.tgt_dict, backtranslation_model=self.model, ) dataloader = torch.utils.data.DataLoader( backtranslation_dataset, batch_size=2, collate_fn=backtranslation_dataset.collater, ) backtranslation_batch_result = next(iter(dataloader)) eos, pad, w1, w2 = self.tgt_dict.eos(), self.tgt_dict.pad(), self.w1, self.w2 # Note that we sort by src_lengths and add left padding, so actually # ids will look like: [1, 0] expected_src = torch.LongTensor([[w1, w2, w1, eos], [pad, pad, w1, eos]]) expected_tgt = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]]) generated_src = backtranslation_batch_result["net_input"]["src_tokens"] tgt_tokens = backtranslation_batch_result["target"] self.assertTensorEqual(expected_src, generated_src) self.assertTensorEqual(expected_tgt, tgt_tokens) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) if __name__ == "__main__": unittest.main()
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/tests/test_binaries.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import contextlib from io import StringIO import os import random import sys import tempfile import unittest import torch from fairseq import options import preprocess import train import generate import interactive import eval_lm class TestTranslation(unittest.TestCase): def test_fconv(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_fconv') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'fconv_iwslt_de_en') generate_main(data_dir) def test_raw(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_fconv_raw') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ['--output-format', 'raw']) train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--raw-text']) generate_main(data_dir, ['--raw-text']) def test_fp16(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_fp16') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--fp16']) generate_main(data_dir) def test_update_freq(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_update_freq') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'fconv_iwslt_de_en', ['--update-freq', '3']) generate_main(data_dir) def test_max_positions(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_max_positions') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) with self.assertRaises(Exception) as context: train_translation_model( data_dir, 'fconv_iwslt_de_en', ['--max-target-positions', '5'], ) self.assertTrue( 'skip this example with --skip-invalid-size-inputs-valid-test' \ in str(context.exception) ) train_translation_model( data_dir, 'fconv_iwslt_de_en', ['--max-target-positions', '5', '--skip-invalid-size-inputs-valid-test'], ) with self.assertRaises(Exception) as context: generate_main(data_dir) generate_main(data_dir, ['--skip-invalid-size-inputs-valid-test']) def test_generation(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_sampling') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'fconv_iwslt_de_en') generate_main(data_dir, [ '--sampling', '--sampling-temperature', '2', '--beam', '2', '--nbest', '2', ]) generate_main(data_dir, [ '--sampling', '--sampling-topk', '3', '--beam', '2', '--nbest', '2', ]) generate_main(data_dir, ['--prefix-size', '2']) def test_lstm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_lstm') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'lstm_wiseman_iwslt_de_en', [ '--encoder-layers', '2', '--decoder-layers', '2', ]) generate_main(data_dir) def test_lstm_bidirectional(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_lstm_bidirectional') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'lstm', [ '--encoder-layers', '2', '--encoder-bidirectional', '--encoder-hidden-size', '256', '--decoder-layers', '2', ]) generate_main(data_dir) def test_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_transformer') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, 'transformer_iwslt_de_en') generate_main(data_dir) class TestStories(unittest.TestCase): def test_fconv_self_att_wp(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_fconv_self_att_wp') as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) config = [ '--encoder-layers', '[(512, 3)] * 2', '--decoder-layers', '[(512, 3)] * 2', '--decoder-attention', 'True', '--encoder-attention', 'False', '--gated-attention', 'True', '--self-attention', 'True', '--project-input', 'True', ] train_translation_model(data_dir, 'fconv_self_att_wp', config) generate_main(data_dir) # fusion model os.rename(os.path.join(data_dir, 'checkpoint_last.pt'), os.path.join(data_dir, 'pretrained.pt')) config.extend([ '--pretrained', 'True', '--pretrained-checkpoint', os.path.join(data_dir, 'pretrained.pt'), '--save-dir', os.path.join(data_dir, 'fusion_model'), ]) train_translation_model(data_dir, 'fconv_self_att_wp', config) class TestLanguageModeling(unittest.TestCase): def test_fconv_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory('test_fconv_lm') as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model(data_dir, 'fconv_lm') eval_lm_main(data_dir) def create_dummy_data(data_dir, num_examples=1000, maxlen=20): def _create_dummy_data(filename): data = torch.rand(num_examples * maxlen) data = 97 + torch.floor(26 * data).int() with open(os.path.join(data_dir, filename), 'w') as h: offset = 0 for _ in range(num_examples): ex_len = random.randint(1, maxlen) ex_str = ' '.join(map(chr, data[offset:offset+ex_len])) print(ex_str, file=h) offset += ex_len _create_dummy_data('train.in') _create_dummy_data('train.out') _create_dummy_data('valid.in') _create_dummy_data('valid.out') _create_dummy_data('test.in') _create_dummy_data('test.out') def preprocess_translation_data(data_dir, extra_flags=None): preprocess_parser = preprocess.get_parser() preprocess_args = preprocess_parser.parse_args( [ '--source-lang', 'in', '--target-lang', 'out', '--trainpref', os.path.join(data_dir, 'train'), '--validpref', os.path.join(data_dir, 'valid'), '--testpref', os.path.join(data_dir, 'test'), '--thresholdtgt', '0', '--thresholdsrc', '0', '--destdir', data_dir, ] + (extra_flags or []), ) preprocess.main(preprocess_args) def train_translation_model(data_dir, arch, extra_flags=None): train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ '--task', 'translation', data_dir, '--save-dir', data_dir, '--arch', arch, '--optimizer', 'nag', '--lr', '0.05', '--max-tokens', '500', '--max-epoch', '1', '--no-progress-bar', '--distributed-world-size', '1', '--source-lang', 'in', '--target-lang', 'out', ] + (extra_flags or []), ) train.main(train_args) def generate_main(data_dir, extra_flags=None): generate_parser = options.get_generation_parser() generate_args = options.parse_args_and_arch( generate_parser, [ data_dir, '--path', os.path.join(data_dir, 'checkpoint_last.pt'), '--beam', '3', '--batch-size', '64', '--max-len-b', '5', '--gen-subset', 'valid', '--no-progress-bar', '--print-alignment', ] + (extra_flags or []), ) # evaluate model in batch mode generate.main(generate_args) # evaluate model interactively generate_args.buffer_size = 0 generate_args.max_sentences = None orig_stdin = sys.stdin sys.stdin = StringIO('h e l l o\n') interactive.main(generate_args) sys.stdin = orig_stdin def preprocess_lm_data(data_dir): preprocess_parser = preprocess.get_parser() preprocess_args = preprocess_parser.parse_args([ '--only-source', '--trainpref', os.path.join(data_dir, 'train.out'), '--validpref', os.path.join(data_dir, 'valid.out'), '--testpref', os.path.join(data_dir, 'test.out'), '--destdir', data_dir, ]) preprocess.main(preprocess_args) def train_language_model(data_dir, arch): train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ '--task', 'language_modeling', data_dir, '--arch', arch, '--optimizer', 'nag', '--lr', '1.0', '--criterion', 'adaptive_loss', '--adaptive-softmax-cutoff', '5,10,15', '--decoder-layers', '[(850, 3)] * 2 + [(1024,4)]', '--decoder-embed-dim', '280', '--max-tokens', '500', '--tokens-per-sample', '500', '--save-dir', data_dir, '--max-epoch', '1', '--no-progress-bar', '--distributed-world-size', '1', '--ddp-backend', 'no_c10d', ], ) train.main(train_args) def eval_lm_main(data_dir): eval_lm_parser = options.get_eval_lm_parser() eval_lm_args = options.parse_args_and_arch( eval_lm_parser, [ data_dir, '--path', os.path.join(data_dir, 'checkpoint_last.pt'), '--no-progress-bar', ], ) eval_lm.main(eval_lm_args) if __name__ == '__main__': unittest.main()
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/tests/test_character_token_embedder.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import torch import unittest from fairseq.data import Dictionary from fairseq.modules import CharacterTokenEmbedder class TestCharacterTokenEmbedder(unittest.TestCase): def test_character_token_embedder(self): vocab = Dictionary() vocab.add_symbol('hello') vocab.add_symbol('there') embedder = CharacterTokenEmbedder(vocab, [(2, 16), (4, 32), (8, 64), (16, 2)], 64, 5, 2) test_sents = [['hello', 'unk', 'there'], ['there'], ['hello', 'there']] max_len = max(len(s) for s in test_sents) input = torch.LongTensor(len(test_sents), max_len + 2).fill_(vocab.pad()) for i in range(len(test_sents)): input[i][0] = vocab.eos() for j in range(len(test_sents[i])): input[i][j + 1] = vocab.index(test_sents[i][j]) input[i][j + 2] = vocab.eos() embs = embedder(input) assert embs.size() == (len(test_sents), max_len + 2, 5) self.assertAlmostEqual(embs[0][0], embs[1][0]) self.assertAlmostEqual(embs[0][0], embs[0][-1]) self.assertAlmostEqual(embs[0][1], embs[2][1]) self.assertAlmostEqual(embs[0][3], embs[1][1]) embs.sum().backward() assert embedder.char_embeddings.weight.grad is not None def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-6) if __name__ == '__main__': unittest.main()
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/tests/test_convtbc.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import torch import unittest from fairseq.modules import ConvTBC import torch.nn as nn class TestConvTBC(unittest.TestCase): def test_convtbc(self): # ksz, in_channels, out_channels conv_tbc = ConvTBC(4, 5, kernel_size=3, padding=1) # out_channels, in_channels, ksz conv1d = nn.Conv1d(4, 5, kernel_size=3, padding=1) conv_tbc.weight.data.copy_(conv1d.weight.data.transpose(0, 2)) conv_tbc.bias.data.copy_(conv1d.bias.data) input_tbc = torch.randn(7, 2, 4, requires_grad=True) input1d = input_tbc.data.transpose(0, 1).transpose(1, 2) input1d.requires_grad = True output_tbc = conv_tbc(input_tbc) output1d = conv1d(input1d) self.assertAlmostEqual(output_tbc.data.transpose(0, 1).transpose(1, 2), output1d.data) grad_tbc = torch.randn(output_tbc.size()) grad1d = grad_tbc.transpose(0, 1).transpose(1, 2).contiguous() output_tbc.backward(grad_tbc) output1d.backward(grad1d) self.assertAlmostEqual(conv_tbc.weight.grad.data.transpose(0, 2), conv1d.weight.grad.data) self.assertAlmostEqual(conv_tbc.bias.grad.data, conv1d.bias.grad.data) self.assertAlmostEqual(input_tbc.grad.data.transpose(0, 1).transpose(1, 2), input1d.grad.data) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) if __name__ == '__main__': unittest.main()
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/tests/test_dictionary.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import tempfile import unittest import torch from fairseq.data import Dictionary from fairseq.tokenizer import Tokenizer class TestDictionary(unittest.TestCase): def test_finalize(self): txt = [ 'A B C D', 'B C D', 'C D', 'D', ] ref_ids1 = list(map(torch.IntTensor, [ [4, 5, 6, 7, 2], [5, 6, 7, 2], [6, 7, 2], [7, 2], ])) ref_ids2 = list(map(torch.IntTensor, [ [7, 6, 5, 4, 2], [6, 5, 4, 2], [5, 4, 2], [4, 2], ])) # build dictionary d = Dictionary() for line in txt: Tokenizer.tokenize(line, d, add_if_not_exist=True) def get_ids(dictionary): ids = [] for line in txt: ids.append(Tokenizer.tokenize(line, dictionary, add_if_not_exist=False)) return ids def assertMatch(ids, ref_ids): for toks, ref_toks in zip(ids, ref_ids): self.assertEqual(toks.size(), ref_toks.size()) self.assertEqual(0, (toks != ref_toks).sum().item()) ids = get_ids(d) assertMatch(ids, ref_ids1) # check finalized dictionary d.finalize() finalized_ids = get_ids(d) assertMatch(finalized_ids, ref_ids2) # write to disk and reload with tempfile.NamedTemporaryFile(mode='w') as tmp_dict: d.save(tmp_dict.name) d = Dictionary.load(tmp_dict.name) reload_ids = get_ids(d) assertMatch(reload_ids, ref_ids2) assertMatch(finalized_ids, reload_ids) if __name__ == '__main__': unittest.main()
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/tests/test_iterators.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import unittest from fairseq.data import iterators class TestIterators(unittest.TestCase): def test_counting_iterator(self): x = list(range(10)) itr = iterators.CountingIterator(x) self.assertTrue(itr.has_next()) self.assertEqual(next(itr), 0) self.assertEqual(next(itr), 1) itr.skip(3) self.assertEqual(next(itr), 5) itr.skip(3) self.assertEqual(next(itr), 9) self.assertFalse(itr.has_next()) if __name__ == '__main__': unittest.main()
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/tests/test_label_smoothing.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import argparse import copy import unittest import torch from fairseq.criterions.cross_entropy import CrossEntropyCriterion from fairseq.criterions.label_smoothed_cross_entropy import LabelSmoothedCrossEntropyCriterion import tests.utils as test_utils class TestLabelSmoothing(unittest.TestCase): def setUp(self): # build dictionary self.d = test_utils.dummy_dictionary(3) vocab = len(self.d) self.assertEqual(vocab, 4 + 3) # 4 special + 3 tokens self.assertEqual(self.d.pad(), 1) self.assertEqual(self.d.eos(), 2) self.assertEqual(self.d.unk(), 3) pad, eos, unk, w1, w2, w3 = 1, 2, 3, 4, 5, 6 # noqa: F841 # build dataset self.data = [ # the first batch item has padding {'source': torch.LongTensor([w1, eos]), 'target': torch.LongTensor([w1, eos])}, {'source': torch.LongTensor([w1, eos]), 'target': torch.LongTensor([w1, w1, eos])}, ] self.sample = next(test_utils.dummy_dataloader(self.data)) # build model self.args = argparse.Namespace() self.args.sentence_avg = False self.args.probs = torch.FloatTensor([ # pad eos unk w1 w2 w3 [0.05, 0.05, 0.1, 0.05, 0.3, 0.4, 0.05], [0.05, 0.10, 0.2, 0.05, 0.2, 0.3, 0.10], [0.05, 0.15, 0.3, 0.05, 0.1, 0.2, 0.15], ]).unsqueeze(0).expand(2, 3, 7) # add batch dimension self.task = test_utils.TestTranslationTask.setup_task(self.args, self.d, self.d) self.model = self.task.build_model(self.args) def test_nll_loss(self): self.args.label_smoothing = 0.1 nll_crit = CrossEntropyCriterion(self.args, self.task) smooth_crit = LabelSmoothedCrossEntropyCriterion(self.args, self.task) nll_loss, nll_sample_size, nll_logging_output = nll_crit(self.model, self.sample) smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit(self.model, self.sample) self.assertLess(abs(nll_loss - nll_logging_output['loss']), 1e-6) self.assertLess(abs(nll_loss - smooth_logging_output['nll_loss']), 1e-6) def test_padding(self): self.args.label_smoothing = 0.1 crit = LabelSmoothedCrossEntropyCriterion(self.args, self.task) loss, _, logging_output = crit(self.model, self.sample) def get_one_no_padding(idx): # create a new sample with just a single batch item so that there's # no padding sample1 = next(test_utils.dummy_dataloader([self.data[idx]])) args1 = copy.copy(self.args) args1.probs = args1.probs[idx, :, :].unsqueeze(0) model1 = self.task.build_model(args1) loss1, _, _ = crit(model1, sample1) return loss1 loss1 = get_one_no_padding(0) loss2 = get_one_no_padding(1) self.assertAlmostEqual(loss, loss1 + loss2) def test_reduction(self): self.args.label_smoothing = 0.1 crit = LabelSmoothedCrossEntropyCriterion(self.args, self.task) loss, _, logging_output = crit(self.model, self.sample, reduce=True) unreduced_loss, _, _ = crit(self.model, self.sample, reduce=False) self.assertAlmostEqual(loss, unreduced_loss.sum()) def test_zero_eps(self): self.args.label_smoothing = 0.0 nll_crit = CrossEntropyCriterion(self.args, self.task) smooth_crit = LabelSmoothedCrossEntropyCriterion(self.args, self.task) nll_loss, nll_sample_size, nll_logging_output = nll_crit(self.model, self.sample) smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit(self.model, self.sample) self.assertAlmostEqual(nll_loss, smooth_loss) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-6) if __name__ == '__main__': unittest.main()
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/tests/test_reproducibility.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import contextlib from io import StringIO import json import os import tempfile import unittest import torch from fairseq import options from . import test_binaries class TestReproducibility(unittest.TestCase): def _test_reproducibility(self, name, extra_flags=None): if extra_flags is None: extra_flags = [] with tempfile.TemporaryDirectory(name) as data_dir: with contextlib.redirect_stdout(StringIO()): test_binaries.create_dummy_data(data_dir) test_binaries.preprocess_translation_data(data_dir) # train epochs 1 and 2 together stdout = StringIO() with contextlib.redirect_stdout(stdout): test_binaries.train_translation_model( data_dir, 'fconv_iwslt_de_en', [ '--dropout', '0.0', '--log-format', 'json', '--log-interval', '1', '--max-epoch', '3', ] + extra_flags, ) stdout = stdout.getvalue() train_log, valid_log = map(json.loads, stdout.split('\n')[-4:-2]) # train epoch 2, resuming from previous checkpoint 1 os.rename( os.path.join(data_dir, 'checkpoint1.pt'), os.path.join(data_dir, 'checkpoint_last.pt'), ) stdout = StringIO() with contextlib.redirect_stdout(stdout): test_binaries.train_translation_model( data_dir, 'fconv_iwslt_de_en', [ '--dropout', '0.0', '--log-format', 'json', '--log-interval', '1', '--max-epoch', '3', ] + extra_flags, ) stdout = stdout.getvalue() train_res_log, valid_res_log = map(json.loads, stdout.split('\n')[-4:-2]) def cast(s): return round(float(s), 3) for k in ['loss', 'ppl', 'num_updates', 'gnorm']: self.assertEqual(cast(train_log[k]), cast(train_res_log[k])) for k in ['valid_loss', 'valid_ppl', 'num_updates', 'best']: self.assertEqual(cast(valid_log[k]), cast(valid_res_log[k])) def test_reproducibility(self): self._test_reproducibility('test_reproducibility') def test_reproducibility_fp16(self): self._test_reproducibility('test_reproducibility_fp16', [ '--fp16', '--fp16-init-scale', '4096', ]) if __name__ == '__main__': unittest.main()
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/tests/test_sequence_generator.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import argparse import unittest import torch from fairseq.sequence_generator import SequenceGenerator import tests.utils as test_utils class TestSequenceGenerator(unittest.TestCase): def setUp(self): self.tgt_dict, self.w1, self.w2, src_tokens, src_lengths, self.model = ( test_utils.sequence_generator_setup() ) self.encoder_input = { 'src_tokens': src_tokens, 'src_lengths': src_lengths, } def test_with_normalization(self): generator = SequenceGenerator([self.model], self.tgt_dict) hypos = generator.generate(self.encoder_input, beam_size=2) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0]) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0]) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0]) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6]) def test_without_normalization(self): # Sentence 1: unchanged from the normalized case # Sentence 2: beams swap order generator = SequenceGenerator([self.model], self.tgt_dict, normalize_scores=False) hypos = generator.generate(self.encoder_input, beam_size=2) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0], normalized=False) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], normalized=False) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], normalized=False) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], normalized=False) def test_with_lenpen_favoring_short_hypos(self): lenpen = 0.6 generator = SequenceGenerator([self.model], self.tgt_dict, len_penalty=lenpen) hypos = generator.generate(self.encoder_input, beam_size=2) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0], lenpen=lenpen) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], lenpen=lenpen) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen) def test_with_lenpen_favoring_long_hypos(self): lenpen = 5.0 generator = SequenceGenerator([self.model], self.tgt_dict, len_penalty=lenpen) hypos = generator.generate(self.encoder_input, beam_size=2) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][0], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w1, eos]) self.assertHypoScore(hypos[0][1], [0.9, 1.0], lenpen=lenpen) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6], lenpen=lenpen) def test_maxlen(self): generator = SequenceGenerator([self.model], self.tgt_dict, maxlen=2) hypos = generator.generate(self.encoder_input, beam_size=2) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0]) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.1, 0.6]) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6]) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w2, w2, eos]) self.assertHypoScore(hypos[1][1], [0.3, 0.9, 0.01]) def test_no_stop_early(self): generator = SequenceGenerator([self.model], self.tgt_dict, stop_early=False) hypos = generator.generate(self.encoder_input, beam_size=2) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0]) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0]) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w2, w2, w2, w2, eos]) self.assertHypoScore(hypos[1][0], [0.3, 0.9, 0.99, 0.4, 1.0]) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0]) def assertHypoTokens(self, hypo, tokens): self.assertTensorEqual(hypo['tokens'], torch.LongTensor(tokens)) def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.): pos_scores = torch.FloatTensor(pos_probs).log() self.assertAlmostEqual(hypo['positional_scores'], pos_scores) self.assertEqual(pos_scores.numel(), hypo['tokens'].numel()) score = pos_scores.sum() if normalized: score /= pos_scores.numel()**lenpen self.assertLess(abs(score - hypo['score']), 1e-6) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) class TestDiverseBeamSearch(unittest.TestCase): def setUp(self): # construct dummy dictionary d = test_utils.dummy_dictionary(vocab_size=2) self.assertEqual(d.pad(), 1) self.assertEqual(d.eos(), 2) self.assertEqual(d.unk(), 3) self.eos = d.eos() self.w1 = 4 self.w2 = 5 # construct source data self.src_tokens = torch.LongTensor([ [self.w1, self.w2, self.eos], [self.w1, self.w2, self.eos], ]) self.src_lengths = torch.LongTensor([2, 2]) args = argparse.Namespace() unk = 0. args.beam_probs = [ # step 0: torch.FloatTensor([ # eos w1 w2 # sentence 1: [0.0, unk, 0.9, 0.1], # beam 1 [0.0, unk, 0.9, 0.1], # beam 2 # sentence 2: [0.0, unk, 0.7, 0.3], [0.0, unk, 0.7, 0.3], ]), # step 1: torch.FloatTensor([ # eos w1 w2 # sentence 1: [0.0, unk, 0.6, 0.4], [0.0, unk, 0.6, 0.4], # sentence 2: [0.25, unk, 0.35, 0.4], [0.25, unk, 0.35, 0.4], ]), # step 2: torch.FloatTensor([ # eos w1 w2 # sentence 1: [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], # sentence 2: [0.9, unk, 0.1, 0.0], [0.9, unk, 0.1, 0.0], ]), ] task = test_utils.TestTranslationTask.setup_task(args, d, d) self.model = task.build_model(args) self.tgt_dict = task.target_dictionary def test_diverse_beam_search(self): generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, diverse_beam_groups=2, diverse_beam_strength=0., ) encoder_input = {'src_tokens': self.src_tokens, 'src_lengths': self.src_lengths} hypos = generator.generate(encoder_input) eos, w1, w2 = self.eos, self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 0.6, 1.0]) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w1, w1, eos]) self.assertHypoScore(hypos[0][1], [0.9, 0.6, 1.0]) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.9]) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.9]) def assertHypoTokens(self, hypo, tokens): self.assertTensorEqual(hypo['tokens'], torch.LongTensor(tokens)) def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.): pos_scores = torch.FloatTensor(pos_probs).log() self.assertAlmostEqual(hypo['positional_scores'], pos_scores) self.assertEqual(pos_scores.numel(), hypo['tokens'].numel()) score = pos_scores.sum() if normalized: score /= pos_scores.numel()**lenpen self.assertLess(abs(score - hypo['score']), 1e-6) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) if __name__ == '__main__': unittest.main()
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/tests/test_sequence_scorer.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import argparse import unittest import torch from fairseq.sequence_scorer import SequenceScorer import tests.utils as test_utils class TestSequenceScorer(unittest.TestCase): def test_sequence_scorer(self): # construct dummy dictionary d = test_utils.dummy_dictionary(vocab_size=2) self.assertEqual(d.pad(), 1) self.assertEqual(d.eos(), 2) self.assertEqual(d.unk(), 3) eos = d.eos() w1 = 4 w2 = 5 # construct dataloader data = [ { 'source': torch.LongTensor([w1, w2, eos]), 'target': torch.LongTensor([w1, w2, w1, eos]), }, { 'source': torch.LongTensor([w2, eos]), 'target': torch.LongTensor([w2, w1, eos]), }, { 'source': torch.LongTensor([w2, eos]), 'target': torch.LongTensor([w2, eos]), }, ] data_itr = test_utils.dummy_dataloader(data) # specify expected output probabilities args = argparse.Namespace() unk = 0. args.beam_probs = [ # step 0: torch.FloatTensor([ # eos w1 w2 [0.0, unk, 0.6, 0.4], # sentence 1 [0.0, unk, 0.4, 0.6], # sentence 2 [0.0, unk, 0.7, 0.3], # sentence 3 ]), # step 1: torch.FloatTensor([ # eos w1 w2 [0.0, unk, 0.2, 0.7], # sentence 1 [0.0, unk, 0.8, 0.2], # sentence 2 [0.7, unk, 0.1, 0.2], # sentence 3 ]), # step 2: torch.FloatTensor([ # eos w1 w2 [0.10, unk, 0.50, 0.4], # sentence 1 [0.15, unk, 0.15, 0.7], # sentence 2 [0.00, unk, 0.00, 0.0], # sentence 3 ]), # step 3: torch.FloatTensor([ # eos w1 w2 [0.9, unk, 0.05, 0.05], # sentence 1 [0.0, unk, 0.00, 0.0], # sentence 2 [0.0, unk, 0.00, 0.0], # sentence 3 ]), ] expected_scores = [ [0.6, 0.7, 0.5, 0.9], # sentence 1 [0.6, 0.8, 0.15], # sentence 2 [0.3, 0.7], # sentence 3 ] task = test_utils.TestTranslationTask.setup_task(args, d, d) model = task.build_model(args) scorer = SequenceScorer([model], task.target_dictionary) for id, _src, _ref, hypos in scorer.score_batched_itr(data_itr): self.assertHypoTokens(hypos[0], data[id]['target']) self.assertHypoScore(hypos[0], expected_scores[id]) def assertHypoTokens(self, hypo, tokens): self.assertTensorEqual(hypo['tokens'], torch.LongTensor(tokens)) def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.): pos_scores = torch.FloatTensor(pos_probs).log() self.assertAlmostEqual(hypo['positional_scores'], pos_scores) self.assertEqual(pos_scores.numel(), hypo['tokens'].numel()) score = pos_scores.sum() if normalized: score /= pos_scores.numel()**lenpen self.assertLess(abs(score - hypo['score']), 1e-6) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) if __name__ == '__main__': unittest.main()
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/tests/test_train.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import contextlib from io import StringIO import unittest from unittest.mock import MagicMock, patch import torch from fairseq import data import train def mock_trainer(epoch, num_updates, iterations_in_epoch): trainer = MagicMock() trainer.load_checkpoint.return_value = { 'train_iterator': { 'epoch': epoch, 'iterations_in_epoch': iterations_in_epoch, 'shuffle': False, }, } trainer.get_num_updates.return_value = num_updates return trainer def mock_dict(): d = MagicMock() d.pad.return_value = 1 d.eos.return_value = 2 d.unk.return_value = 3 return d def get_trainer_and_epoch_itr(epoch, epoch_size, num_updates, iterations_in_epoch): tokens = torch.LongTensor(list(range(epoch_size))) tokens_ds = data.TokenBlockDataset(tokens, sizes=[len(tokens)], block_size=1, pad=0, eos=1, include_targets=False) trainer = mock_trainer(epoch, num_updates, iterations_in_epoch) dataset = data.LanguagePairDataset(tokens_ds, tokens_ds.sizes, mock_dict(), shuffle=False) epoch_itr = data.EpochBatchIterator( dataset=dataset, collate_fn=dataset.collater, batch_sampler=[[i] for i in range(epoch_size)], ) return trainer, epoch_itr class TestLoadCheckpoint(unittest.TestCase): def setUp(self): self.args_mock = MagicMock() self.args_mock.optimizer_overrides = '{}' self.patches = { 'os.makedirs': MagicMock(), 'os.path.join': MagicMock(), 'os.path.isfile': MagicMock(return_value=True), } self.applied_patches = [patch(p, d) for p, d in self.patches.items()] [p.start() for p in self.applied_patches] def test_load_partial_checkpoint(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(2, 150, 200, 50) train.load_checkpoint(self.args_mock, trainer, epoch_itr) self.assertEqual(epoch_itr.epoch, 2) self.assertEqual(epoch_itr.iterations_in_epoch, 50) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertEqual(epoch_itr.epoch, 2) self.assertEqual(epoch_itr.iterations_in_epoch, 50) self.assertEqual(next(itr)['net_input']['src_tokens'][0].item(), 50) self.assertEqual(epoch_itr.iterations_in_epoch, 51) def test_load_full_checkpoint(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(2, 150, 300, 150) train.load_checkpoint(self.args_mock, trainer, epoch_itr) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertEqual(epoch_itr.epoch, 3) self.assertEqual(epoch_itr.iterations_in_epoch, 0) self.assertEqual(next(itr)['net_input']['src_tokens'][0].item(), 0) def test_load_no_checkpoint(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(0, 150, 0, 0) self.patches['os.path.isfile'].return_value = False train.load_checkpoint(self.args_mock, trainer, epoch_itr) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertEqual(epoch_itr.epoch, 1) self.assertEqual(epoch_itr.iterations_in_epoch, 0) self.assertEqual(next(itr)['net_input']['src_tokens'][0].item(), 0) def tearDown(self): patch.stopall() if __name__ == '__main__': unittest.main()
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/tests/test_utils.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import unittest import torch from fairseq import utils class TestUtils(unittest.TestCase): def test_convert_padding_direction(self): pad = 1 left_pad = torch.LongTensor([ [2, 3, 4, 5, 6], [1, 7, 8, 9, 10], [1, 1, 1, 11, 12], ]) right_pad = torch.LongTensor([ [2, 3, 4, 5, 6], [7, 8, 9, 10, 1], [11, 12, 1, 1, 1], ]) self.assertAlmostEqual( right_pad, utils.convert_padding_direction( left_pad, pad, left_to_right=True, ), ) self.assertAlmostEqual( left_pad, utils.convert_padding_direction( right_pad, pad, right_to_left=True, ), ) def test_make_positions(self): pad = 1 left_pad_input = torch.LongTensor([ [9, 9, 9, 9, 9], [1, 9, 9, 9, 9], [1, 1, 1, 9, 9], ]) left_pad_output = torch.LongTensor([ [2, 3, 4, 5, 6], [1, 2, 3, 4, 5], [1, 1, 1, 2, 3], ]) right_pad_input = torch.LongTensor([ [9, 9, 9, 9, 9], [9, 9, 9, 9, 1], [9, 9, 1, 1, 1], ]) right_pad_output = torch.LongTensor([ [2, 3, 4, 5, 6], [2, 3, 4, 5, 1], [2, 3, 1, 1, 1], ]) self.assertAlmostEqual( left_pad_output, utils.make_positions(left_pad_input, pad, left_pad=True), ) self.assertAlmostEqual( right_pad_output, utils.make_positions(right_pad_input, pad, left_pad=False), ) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess(utils.item((t1 - t2).abs().max()), 1e-4) if __name__ == '__main__': unittest.main()
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/tests/utils.py
Python
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import argparse import torch from fairseq import utils from fairseq.data import Dictionary from fairseq.data.language_pair_dataset import collate from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqModel, ) from fairseq.tasks import FairseqTask def dummy_dictionary(vocab_size, prefix='token_'): d = Dictionary() for i in range(vocab_size): token = prefix + str(i) d.add_symbol(token) d.finalize(padding_factor=1) # don't add extra padding symbols return d def dummy_dataloader( samples, padding_idx=1, eos_idx=2, batch_size=None, ): if batch_size is None: batch_size = len(samples) # add any missing data to samples for i, sample in enumerate(samples): if 'id' not in sample: sample['id'] = i # create dataloader dataset = TestDataset(samples) dataloader = torch.utils.data.DataLoader( dataset, batch_size=batch_size, collate_fn=(lambda samples: collate(samples, padding_idx, eos_idx)), ) return iter(dataloader) def sequence_generator_setup(): # construct dummy dictionary d = dummy_dictionary(vocab_size=2) eos = d.eos() w1 = 4 w2 = 5 # construct source data src_tokens = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]]) src_lengths = torch.LongTensor([2, 2]) args = argparse.Namespace() unk = 0. args.beam_probs = [ # step 0: torch.FloatTensor([ # eos w1 w2 # sentence 1: [0.0, unk, 0.9, 0.1], # beam 1 [0.0, unk, 0.9, 0.1], # beam 2 # sentence 2: [0.0, unk, 0.7, 0.3], [0.0, unk, 0.7, 0.3], ]), # step 1: torch.FloatTensor([ # eos w1 w2 prefix # sentence 1: [1.0, unk, 0.0, 0.0], # w1: 0.9 (emit: w1 <eos>: 0.9*1.0) [0.0, unk, 0.9, 0.1], # w2: 0.1 # sentence 2: [0.25, unk, 0.35, 0.4], # w1: 0.7 (don't emit: w1 <eos>: 0.7*0.25) [0.00, unk, 0.10, 0.9], # w2: 0.3 ]), # step 2: torch.FloatTensor([ # eos w1 w2 prefix # sentence 1: [0.0, unk, 0.1, 0.9], # w2 w1: 0.1*0.9 [0.6, unk, 0.2, 0.2], # w2 w2: 0.1*0.1 (emit: w2 w2 <eos>: 0.1*0.1*0.6) # sentence 2: [0.60, unk, 0.4, 0.00], # w1 w2: 0.7*0.4 (emit: w1 w2 <eos>: 0.7*0.4*0.6) [0.01, unk, 0.0, 0.99], # w2 w2: 0.3*0.9 ]), # step 3: torch.FloatTensor([ # eos w1 w2 prefix # sentence 1: [1.0, unk, 0.0, 0.0], # w2 w1 w2: 0.1*0.9*0.9 (emit: w2 w1 w2 <eos>: 0.1*0.9*0.9*1.0) [1.0, unk, 0.0, 0.0], # w2 w1 w1: 0.1*0.9*0.1 (emit: w2 w1 w1 <eos>: 0.1*0.9*0.1*1.0) # sentence 2: [0.1, unk, 0.5, 0.4], # w2 w2 w2: 0.3*0.9*0.99 (emit: w2 w2 w2 <eos>: 0.3*0.9*0.99*0.1) [1.0, unk, 0.0, 0.0], # w1 w2 w1: 0.7*0.4*0.4 (emit: w1 w2 w1 <eos>: 0.7*0.4*0.4*1.0) ]), ] task = TestTranslationTask.setup_task(args, d, d) model = task.build_model(args) tgt_dict = task.target_dictionary return tgt_dict, w1, w2, src_tokens, src_lengths, model class TestDataset(torch.utils.data.Dataset): def __init__(self, data): super().__init__() self.data = data def __getitem__(self, index): return self.data[index] def __len__(self): return len(self.data) class TestTranslationTask(FairseqTask): def __init__(self, args, src_dict, tgt_dict, model): super().__init__(args) self.src_dict = src_dict self.tgt_dict = tgt_dict self.model = model @classmethod def setup_task(cls, args, src_dict=None, tgt_dict=None, model=None): return cls(args, src_dict, tgt_dict, model) def build_model(self, args): return TestModel.build_model(args, self) @property def source_dictionary(self): return self.src_dict @property def target_dictionary(self): return self.tgt_dict class TestModel(FairseqModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @classmethod def build_model(cls, args, task): encoder = TestEncoder(args, task.source_dictionary) decoder = TestIncrementalDecoder(args, task.target_dictionary) return cls(encoder, decoder) class TestEncoder(FairseqEncoder): def __init__(self, args, dictionary): super().__init__(dictionary) self.args = args def forward(self, src_tokens, src_lengths): return src_tokens def reorder_encoder_out(self, encoder_out, new_order): return encoder_out.index_select(0, new_order) class TestIncrementalDecoder(FairseqIncrementalDecoder): def __init__(self, args, dictionary): super().__init__(dictionary) assert hasattr(args, 'beam_probs') or hasattr(args, 'probs') args.max_decoder_positions = getattr(args, 'max_decoder_positions', 100) self.args = args def forward(self, prev_output_tokens, encoder_out, incremental_state=None): if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] bbsz = prev_output_tokens.size(0) vocab = len(self.dictionary) src_len = encoder_out.size(1) tgt_len = prev_output_tokens.size(1) # determine number of steps if incremental_state is not None: # cache step number step = utils.get_incremental_state(self, incremental_state, 'step') if step is None: step = 0 utils.set_incremental_state(self, incremental_state, 'step', step + 1) steps = [step] else: steps = list(range(tgt_len)) # define output in terms of raw probs if hasattr(self.args, 'probs'): assert self.args.probs.dim() == 3, \ 'expected probs to have size bsz*steps*vocab' probs = self.args.probs.index_select(1, torch.LongTensor(steps)) else: probs = torch.FloatTensor(bbsz, len(steps), vocab).zero_() for i, step in enumerate(steps): # args.beam_probs gives the probability for every vocab element, # starting with eos, then unknown, and then the rest of the vocab if step < len(self.args.beam_probs): probs[:, i, self.dictionary.eos():] = self.args.beam_probs[step] else: probs[:, i, self.dictionary.eos()] = 1.0 # random attention attn = torch.rand(bbsz, tgt_len, src_len) return probs, attn def get_normalized_probs(self, net_output, log_probs, _): # the decoder returns probabilities directly probs = net_output[0] if log_probs: return probs.log() else: return probs def max_positions(self): return self.args.max_decoder_positions
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
translation/train.py
Python
#!/usr/bin/env python3 -u # Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. """ Train a new model on one or across multiple GPUs. """ import collections import itertools import os import math import torch from fairseq import distributed_utils, options, progress_bar, tasks, utils from fairseq.data import iterators from fairseq.trainer import Trainer from fairseq.meters import AverageMeter, StopwatchMeter def main(args): if args.max_tokens is None: args.max_tokens = 6000 print(args) if not torch.cuda.is_available(): raise NotImplementedError('Training on CPU is not supported') torch.cuda.set_device(args.device_id) torch.manual_seed(args.seed) # Setup task, e.g., translation, language modeling, etc. task = tasks.setup_task(args) # Load dataset splits load_dataset_splits(task, ['train', 'valid']) # Build model and criterion model = task.build_model(args) criterion = task.build_criterion(args) print('| model {}, criterion {}'.format(args.arch, criterion.__class__.__name__)) print('| num. model params: {}'.format(sum(p.numel() for p in model.parameters()))) # Make a dummy batch to (i) warm the caching allocator and (ii) as a # placeholder DistributedDataParallel when there's an uneven number of # batches per worker. max_positions = utils.resolve_max_positions( task.max_positions(), model.max_positions(), ) dummy_batch = task.dataset('train').get_dummy_batch(args.max_tokens, max_positions) # Build trainer trainer = Trainer(args, task, model, criterion, dummy_batch) print('| training on {} GPUs'.format(args.distributed_world_size)) print('| max tokens per GPU = {} and max sentences per GPU = {}'.format( args.max_tokens, args.max_sentences, )) # Initialize dataloader epoch_itr = task.get_batch_iterator( dataset=task.dataset(args.train_subset), max_tokens=args.max_tokens, max_sentences=args.max_sentences, max_positions=max_positions, ignore_invalid_inputs=True, required_batch_size_multiple=8, seed=args.seed, num_shards=args.distributed_world_size, shard_id=args.distributed_rank, ) # Load the latest checkpoint if one is available if not load_checkpoint(args, trainer, epoch_itr): trainer.dummy_train_step([dummy_batch]) # Train until the learning rate gets too small max_epoch = args.max_epoch or math.inf max_update = args.max_update or math.inf lr = trainer.get_lr() train_meter = StopwatchMeter() train_meter.start() valid_losses = [None] valid_subsets = args.valid_subset.split(',') while lr > args.min_lr and epoch_itr.epoch < max_epoch and trainer.get_num_updates() < max_update: # train for one epoch train(args, trainer, task, epoch_itr) if epoch_itr.epoch % args.validate_interval == 0: valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets) # only use first validation loss to update the learning rate lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0]) # save checkpoint if epoch_itr.epoch % args.save_interval == 0: save_checkpoint(args, trainer, epoch_itr, valid_losses[0]) train_meter.stop() print('| done training in {:.1f} seconds'.format(train_meter.sum)) def train(args, trainer, task, epoch_itr): """Train the model for one epoch.""" # Update parameters every N batches if epoch_itr.epoch <= len(args.update_freq): update_freq = args.update_freq[epoch_itr.epoch - 1] else: update_freq = args.update_freq[-1] # Initialize data iterator itr = epoch_itr.next_epoch_itr() itr = iterators.GroupedIterator(itr, update_freq) progress = progress_bar.build_progress_bar( args, itr, epoch_itr.epoch, no_progress_bar='simple', ) extra_meters = collections.defaultdict(lambda: AverageMeter()) first_valid = args.valid_subset.split(',')[0] max_update = args.max_update or math.inf num_batches = len(epoch_itr) for i, samples in enumerate(progress, start=epoch_itr.iterations_in_epoch): log_output = trainer.train_step(samples) if log_output is None: continue # log mid-epoch stats stats = get_training_stats(trainer) for k, v in log_output.items(): if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']: continue # these are already logged above if 'loss' in k: extra_meters[k].update(v, log_output['sample_size']) else: extra_meters[k].update(v) stats[k] = extra_meters[k].avg progress.log(stats) # ignore the first mini-batch in words-per-second calculation if i == 0: trainer.get_meter('wps').reset() num_updates = trainer.get_num_updates() if args.save_interval_updates > 0 and num_updates % args.save_interval_updates == 0 and num_updates > 0: valid_losses = validate(args, trainer, task, epoch_itr, [first_valid]) save_checkpoint(args, trainer, epoch_itr, valid_losses[0]) if num_updates >= max_update: break # log end-of-epoch stats stats = get_training_stats(trainer) for k, meter in extra_meters.items(): stats[k] = meter.avg progress.print(stats) # reset training meters for k in [ 'train_loss', 'train_nll_loss', 'wps', 'ups', 'wpb', 'bsz', 'gnorm', 'clip', ]: meter = trainer.get_meter(k) if meter is not None: meter.reset() def get_training_stats(trainer): stats = collections.OrderedDict() stats['loss'] = '{:.3f}'.format(trainer.get_meter('train_loss').avg) if trainer.get_meter('train_nll_loss').count > 0: nll_loss = trainer.get_meter('train_nll_loss').avg stats['nll_loss'] = '{:.3f}'.format(nll_loss) else: nll_loss = trainer.get_meter('train_loss').avg stats['ppl'] = get_perplexity(nll_loss) stats['wps'] = round(trainer.get_meter('wps').avg) stats['ups'] = '{:.1f}'.format(trainer.get_meter('ups').avg) stats['wpb'] = round(trainer.get_meter('wpb').avg) stats['bsz'] = round(trainer.get_meter('bsz').avg) stats['num_updates'] = trainer.get_num_updates() stats['lr'] = trainer.get_lr() stats['gnorm'] = '{:.3f}'.format(trainer.get_meter('gnorm').avg) stats['clip'] = '{:.0%}'.format(trainer.get_meter('clip').avg) stats['oom'] = trainer.get_meter('oom').avg if trainer.get_meter('loss_scale') is not None: stats['loss_scale'] = '{:.3f}'.format(trainer.get_meter('loss_scale').avg) stats['wall'] = round(trainer.get_meter('wall').elapsed_time) stats['train_wall'] = round(trainer.get_meter('train_wall').sum) return stats def validate(args, trainer, task, epoch_itr, subsets): """Evaluate the model on the validation set(s) and return the losses.""" valid_losses = [] for subset in subsets: # Initialize data iterator itr = task.get_batch_iterator( dataset=task.dataset(subset), max_tokens=args.max_tokens, max_sentences=args.max_sentences_valid, max_positions=utils.resolve_max_positions( task.max_positions(), trainer.get_model().max_positions(), ), ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=8, seed=args.seed, num_shards=args.distributed_world_size, shard_id=args.distributed_rank, ).next_epoch_itr(shuffle=False) progress = progress_bar.build_progress_bar( args, itr, epoch_itr.epoch, prefix='valid on \'{}\' subset'.format(subset), no_progress_bar='simple' ) # reset validation loss meters for k in ['valid_loss', 'valid_nll_loss']: meter = trainer.get_meter(k) if meter is not None: meter.reset() extra_meters = collections.defaultdict(lambda: AverageMeter()) for sample in progress: log_output = trainer.valid_step(sample) for k, v in log_output.items(): if k in ['loss', 'nll_loss', 'ntokens', 'nsentences', 'sample_size']: continue extra_meters[k].update(v) # log validation stats stats = get_valid_stats(trainer) for k, meter in extra_meters.items(): stats[k] = meter.avg progress.print(stats) valid_losses.append(stats['valid_loss']) return valid_losses def get_valid_stats(trainer): stats = collections.OrderedDict() stats['valid_loss'] = trainer.get_meter('valid_loss').avg if trainer.get_meter('valid_nll_loss').count > 0: nll_loss = trainer.get_meter('valid_nll_loss').avg stats['valid_nll_loss'] = nll_loss else: nll_loss = trainer.get_meter('valid_loss').avg stats['valid_ppl'] = get_perplexity(nll_loss) stats['num_updates'] = trainer.get_num_updates() if hasattr(save_checkpoint, 'best'): stats['best'] = min(save_checkpoint.best, stats['valid_loss']) return stats def get_perplexity(loss): try: return '{:.2f}'.format(math.pow(2, loss)) except OverflowError: return float('inf') def save_checkpoint(args, trainer, epoch_itr, val_loss): if args.no_save or not distributed_utils.is_master(args): return epoch = epoch_itr.epoch end_of_epoch = epoch_itr.end_of_epoch() updates = trainer.get_num_updates() checkpoint_conds = collections.OrderedDict() checkpoint_conds['checkpoint{}.pt'.format(epoch)] = ( end_of_epoch and not args.no_epoch_checkpoints and epoch % args.save_interval == 0 ) checkpoint_conds['checkpoint_{}_{}.pt'.format(epoch, updates)] = ( not end_of_epoch and args.save_interval_updates > 0 and updates % args.save_interval_updates == 0 ) checkpoint_conds['checkpoint_best.pt'] = ( val_loss is not None and (not hasattr(save_checkpoint, 'best') or val_loss < save_checkpoint.best) ) checkpoint_conds['checkpoint_last.pt'] = True # keep this last so that it's a symlink prev_best = getattr(save_checkpoint, 'best', val_loss) if val_loss is not None: save_checkpoint.best = min(val_loss, prev_best) extra_state = { 'best': save_checkpoint.best, 'train_iterator': epoch_itr.state_dict(), 'val_loss': val_loss, } checkpoints = [os.path.join(args.save_dir, fn) for fn, cond in checkpoint_conds.items() if cond] if len(checkpoints) > 0: for cp in checkpoints: trainer.save_checkpoint(cp, extra_state) if not end_of_epoch and args.keep_interval_updates > 0: # remove old checkpoints; checkpoints are sorted in descending order checkpoints = utils.checkpoint_paths(args.save_dir, pattern=r'checkpoint_\d+_(\d+)\.pt') for old_chk in checkpoints[args.keep_interval_updates:]: os.remove(old_chk) def load_checkpoint(args, trainer, epoch_itr): """Load a checkpoint and replay dataloader to match.""" os.makedirs(args.save_dir, exist_ok=True) checkpoint_path = os.path.join(args.save_dir, args.restore_file) if os.path.isfile(checkpoint_path): extra_state = trainer.load_checkpoint(checkpoint_path, args.reset_optimizer, args.reset_lr_scheduler, eval(args.optimizer_overrides)) if extra_state is not None: # replay train iterator to match checkpoint epoch_itr.load_state_dict(extra_state['train_iterator']) print('| loaded checkpoint {} (epoch {} @ {} updates)'.format( checkpoint_path, epoch_itr.epoch, trainer.get_num_updates())) trainer.lr_step(epoch_itr.epoch) trainer.lr_step_update(trainer.get_num_updates()) if 'best' in extra_state: save_checkpoint.best = extra_state['best'] return True return False def load_dataset_splits(task, splits): for split in splits: if split == 'train': task.load_dataset(split, combine=True) else: for k in itertools.count(): split_k = split + (str(k) if k > 0 else '') try: task.load_dataset(split_k, combine=False) except FileNotFoundError as e: if k > 0: break raise e if __name__ == '__main__': parser = options.get_training_parser() args = options.parse_args_and_arch(parser) if args.distributed_port > 0 or args.distributed_init_method is not None: from distributed_train import main as distributed_main distributed_main(args) elif args.distributed_world_size > 1: from multiprocessing_train import main as multiprocessing_main multiprocessing_main(args) else: main(args)
zhuohan123/macaron-net
147
Codes for "Understanding and Improving Transformer From a Multi-Particle Dynamic System Point of View"
Python
zhuohan123
Zhuohan Li
vLLM / Meta
src/AssertableHtmlServiceProvider.php
PHP
<?php declare(strict_types=1); namespace Ziadoz\AssertableHtml; use Illuminate\Support\ServiceProvider; use Illuminate\Support\Traits\Macroable; use Illuminate\Testing\TestComponent; use Illuminate\Testing\TestResponse; use Illuminate\Testing\TestView; use Ziadoz\AssertableHtml\Mixins\TestComponentMixins; use Ziadoz\AssertableHtml\Mixins\TestResponseMixins; use Ziadoz\AssertableHtml\Mixins\TestViewMixins; class AssertableHtmlServiceProvider extends ServiceProvider { public function boot(): void { if ($this->app->runningUnitTests()) { TestResponse::mixin(new TestResponseMixins); TestView::mixin(new TestViewMixins); // This functionality is only available in the point release after I added it to Laravel. // @see: https://github.com/laravel/framework/pull/54359 if (in_array(Macroable::class, class_uses(TestComponent::class) ?? [])) { TestComponent::mixin(new TestComponentMixins); } } } }
ziadoz/assertable-html
4
Assertable HTML is an elegantly designed PHPUnit library that makes performing assertions on HTML responses from PHP and Laravel applications quick and enjoyable.
PHP
ziadoz
Jamie York
src/Concerns/Asserts/AssertsAttributesList.php
PHP
<?php declare(strict_types=1); namespace Ziadoz\AssertableHtml\Concerns\Asserts; use PHPUnit\Framework\Assert as PHPUnit; use Ziadoz\AssertableHtml\Support\Whitespace; trait AssertsAttributesList { /* |-------------------------------------------------------------------------- | Assert Attributes |-------------------------------------------------------------------------- */ /** * Assert the attribute list passes the given callback. * * @param callable(static $attributes): bool $callback */ public function assertAttributes(callable $callback, ?string $message = null): static { PHPUnit::assertTrue( $callback($this), $message ?? "The attribute list doesn't pass the given callback.", ); return $this; } /** * Assert the given attribute in the attribute list passes the given callback. * * @param callable(?string $value): bool $callback */ public function assertAttribute(string $attribute, callable $callback, ?string $message = null): static { PHPUnit::assertTrue( $callback($this->attributes[$attribute] ?? null), $message ?? sprintf( "The attribute [%s] doesn't pass the given callback.", $attribute, ), ); return $this; } /* |-------------------------------------------------------------------------- | Assert Attribute Array |-------------------------------------------------------------------------- */ /** Assert the given associative array of attributes equals the attribute list. */ public function assertEqualsArray(array $attributes, bool $normaliseWhitespace = false, ?string $message = null): static { PHPUnit::assertSame( $this->prepareArray($attributes), $this->prepareArray($this->toArray(), $normaliseWhitespace), $message ?? "The attributes list doesn't equal the given array.", ); return $this; } /** Prepare the attributes array by sorting and then normalising the whitespace. */ private function prepareArray(array $attributes, bool $normaliseWhitespace = false): array { ksort($attributes); return $normaliseWhitespace ? array_map(fn (string $value): string => Whitespace::normalise($value), $attributes) : $attributes; } /* |-------------------------------------------------------------------------- | Assert Empty |-------------------------------------------------------------------------- */ /** Assert the attribute list is empty. */ public function assertEmpty(?string $message = null): static { PHPUnit::assertTrue( $this->empty(), $message ?? "The attribute list isn't empty.", ); return $this; } /** Assert the attribute list is not empty. */ public function assertNotEmpty(?string $message = null): static { PHPUnit::assertFalse( $this->empty(), $message ?? 'The attribute list is empty.', ); return $this; } /* |-------------------------------------------------------------------------- | Assert Present / Missing |-------------------------------------------------------------------------- */ /** Assert the given attribute is present in the attribute list. */ public function assertPresent(string $attribute, ?string $message = null): static { PHPUnit::assertArrayHasKey( $attribute, $this->attributes, $message ?? sprintf( "The attribute list doesn't contain the [%s] attribute.", $attribute, ), ); return $this; } /** Assert the given attribute is missing in the attribute list. */ public function assertMissing(string $attribute, ?string $message = null): static { PHPUnit::assertArrayNotHasKey( $attribute, $this->attributes, $message ?? sprintf( 'The attribute list contains the [%s] attribute.', $attribute, ), ); return $this; } /* |-------------------------------------------------------------------------- | Assert Attribute Equals |-------------------------------------------------------------------------- */ /** Assert the given attribute equals the given value in the attribute list. */ public function assertEquals(string $attribute, string $value, bool $normaliseWhitespace = false, ?string $message = null): static { PHPUnit::assertSame( $value, $this->value($attribute, $normaliseWhitespace), $message ?? sprintf( "The attribute [%s] doesn't equal the given value [%s].", $attribute, $value, ), ); return $this; } /** Assert the given attribute doesn't equal the given value in the attribute list. */ public function assertDoesntEqual(string $attribute, string $value, bool $normaliseWhitespace = false, ?string $message = null): static { PHPUnit::assertNotSame( $value, $this->value($attribute, $normaliseWhitespace), $message ?? sprintf( 'The attribute [%s] equals the given value [%s].', $attribute, $value, ), ); return $this; } /* |-------------------------------------------------------------------------- | Assert Attribute Starts / Ends With |-------------------------------------------------------------------------- */ /** Assert the given attribute starts with the given prefix in the attribute list. */ public function assertStartsWith(string $attribute, string $prefix, bool $normaliseWhitespace = false, ?string $message = null): static { PHPUnit::assertStringStartsWith( $prefix, $this->value($attribute, $normaliseWhitespace), $message ?? sprintf( "The attribute [%s] doesn't start with the given prefix [%s].", $attribute, $prefix, ), ); return $this; } /** Assert the given attribute doesn't start with the given prefix in the attribute list. */ public function assertDoesntStartWith(string $attribute, string $prefix, bool $normaliseWhitespace = false, ?string $message = null): static { PHPUnit::assertStringStartsNotWith( $prefix, $this->value($attribute, $normaliseWhitespace), $message ?? sprintf( 'The attribute [%s] starts with the given prefix [%s].', $attribute, $prefix, ), ); return $this; } /** Assert the given attribute ends with the given suffix in the attribute list. */ public function assertEndsWith(string $attribute, string $suffix, bool $normaliseWhitespace = false, ?string $message = null): static { PHPUnit::assertStringEndsWith( $suffix, $this->value($attribute, $normaliseWhitespace), $message ?? sprintf( "The attribute [%s] doesn't end with the given suffix [%s].", $attribute, $suffix, ), ); return $this; } /** Assert the given attribute doesn't end with the given suffix in the attribute list. */ public function assertDoesntEndWith(string $attribute, string $suffix, bool $normaliseWhitespace = false, ?string $message = null): static { PHPUnit::assertStringEndsNotWith( $suffix, $this->value($attribute, $normaliseWhitespace), $message ?? sprintf( 'The attribute [%s] ends with the given suffix [%s].', $attribute, $suffix, ), ); return $this; } /* |-------------------------------------------------------------------------- | Assert Attribute Contains |-------------------------------------------------------------------------- */ /** Assert the given attribute contains the given value in the attribute list. */ public function assertContains(string $attribute, string $value, bool $normaliseWhitespace = false, ?string $message = null): static { PHPUnit::assertStringContainsString( $value, $this->value($attribute, $normaliseWhitespace), $message ?? sprintf( "The attribute [%s] doesn't contains the given value [%s].", $attribute, $value, ), ); return $this; } /** Assert the given attribute doesn't contain the given value in the attribute list. */ public function assertDoesntContain(string $attribute, string $value, bool $normaliseWhitespace = false, ?string $message = null): static { PHPUnit::assertStringNotContainsString( $value, $this->value($attribute, $normaliseWhitespace), $message ?? sprintf( 'The attribute [%s] contains the given value [%s].', $attribute, $value, ), ); return $this; } }
ziadoz/assertable-html
4
Assertable HTML is an elegantly designed PHPUnit library that makes performing assertions on HTML responses from PHP and Laravel applications quick and enjoyable.
PHP
ziadoz
Jamie York
src/Concerns/Asserts/AssertsClassesList.php
PHP
<?php declare(strict_types=1); namespace Ziadoz\AssertableHtml\Concerns\Asserts; use PHPUnit\Framework\Assert as PHPUnit; trait AssertsClassesList { /* |-------------------------------------------------------------------------- | Assert Class |-------------------------------------------------------------------------- */ /** * Assert the class list passes the given callback. * * @param callable(static $classes): bool $callback */ public function assertClasses(callable $callback, ?string $message = null): static { PHPUnit::assertTrue( $callback($this), $message ?? "The class list doesn't pass the given callback.", ); return $this; } /* |-------------------------------------------------------------------------- | Assert Empty |-------------------------------------------------------------------------- */ /** Assert the class list is empty. */ public function assertEmpty(?string $message = null): static { PHPUnit::assertTrue( $this->empty(), $message ?? "The class list isn't empty.", ); return $this; } /** Assert the class list isn't empty. */ public function assertNotEmpty(?string $message = null): static { PHPUnit::assertFalse( $this->empty(), $message ?? 'The class list is empty.', ); return $this; } /* |-------------------------------------------------------------------------- | Assert Contains |-------------------------------------------------------------------------- */ /** Assert the class list contains the given class. */ public function assertContains(string $class, ?string $message = null): static { PHPUnit::assertTrue( $this->contains($class), $message ?? sprintf( "The class list doesn't contain the given class [%s].", $class, ), ); return $this; } /** Assert the element's class doesn't contain the given class. */ public function assertDoesntContain(string $class, ?string $message = null): static { PHPUnit::assertFalse( $this->contains($class), $message ?? sprintf( 'The class list contains the given class [%s].', $class, ), ); return $this; } /* |-------------------------------------------------------------------------- | Assert Any / All |-------------------------------------------------------------------------- */ /** Assert the class list contains any of the given classes. */ public function assertContainsAny(array $classes, ?string $message = null): static { PHPUnit::assertTrue( $this->any($classes), $message ?? sprintf( "The class list doesn't contain any of the classes [%s]", implode(' ', $classes), ), ); return $this; } /** Assert the class list doesn't contain any of the given classes. */ public function assertDoesntContainAny(array $classes, ?string $message = null): static { PHPUnit::assertFalse( $this->any($classes), $message ?? sprintf( 'The class list contains some of the classes [%s]', implode(' ', $classes), ), ); return $this; } /** Assert the class list contains all the given classes. */ public function assertContainsAll(array $classes, ?string $message = null): static { PHPUnit::assertTrue( $this->all($classes), $message ?? sprintf( "The class list doesn't contain all the classes [%s]", implode(' ', $classes), ), ); return $this; } /** Assert the class list doesn't contain all the given classes. */ public function assertDoesntContainAll(array $classes, ?string $message = null): static { PHPUnit::assertFalse( $this->all($classes), $message ?? sprintf( 'The class list contains all the classes [%s]', implode(' ', $classes), ), ); return $this; } /* |-------------------------------------------------------------------------- | Assert Value |-------------------------------------------------------------------------- */ /** Assert the class list value equals the given value. */ public function assertValueEquals(string $value, bool $normaliseWhitespace = false, ?string $message = null): static { PHPUnit::assertSame( $value, $this->value($normaliseWhitespace), $message ?? "The class list doesn't equal the given value.", ); return $this; } /** Assert the class list value doesn't equal the given value. */ public function assertValueDoesntEqual(string $value, bool $normaliseWhitespace = false, ?string $message = null): static { PHPUnit::assertNotSame( $value, $this->value($normaliseWhitespace), $message ?? 'The class list equals the given value.', ); return $this; } }
ziadoz/assertable-html
4
Assertable HTML is an elegantly designed PHPUnit library that makes performing assertions on HTML responses from PHP and Laravel applications quick and enjoyable.
PHP
ziadoz
Jamie York
src/Concerns/Asserts/AssertsDocument.php
PHP
<?php declare(strict_types=1); namespace Ziadoz\AssertableHtml\Concerns\Asserts; use PHPUnit\Framework\Assert as PHPUnit; trait AssertsDocument { /* |-------------------------------------------------------------------------- | Assert Title |-------------------------------------------------------------------------- */ /** Assert the page title equals the given value. */ public function assertTitleEquals(string $title, ?string $message = null): self { PHPUnit::assertSame( $title, $this->title, $message ?? "The page title doesn't equal the given title.", ); return $this; } /** Assert the page title doesn't equal the given value. */ public function assertTitleDoesntEqual(string $title, ?string $message = null): self { PHPUnit::assertNotSame( $title, $this->title, $message ?? 'The page title equals the given title.', ); return $this; } }
ziadoz/assertable-html
4
Assertable HTML is an elegantly designed PHPUnit library that makes performing assertions on HTML responses from PHP and Laravel applications quick and enjoyable.
PHP
ziadoz
Jamie York
src/Concerns/Asserts/AssertsElement.php
PHP
<?php declare(strict_types=1); namespace Ziadoz\AssertableHtml\Concerns\Asserts; use PHPUnit\Framework\Assert as PHPUnit; use Ziadoz\AssertableHtml\Dom\AssertableAttributesList; use Ziadoz\AssertableHtml\Dom\AssertableClassesList; use Ziadoz\AssertableHtml\Dom\AssertableElement; use Ziadoz\AssertableHtml\Dom\AssertableText; trait AssertsElement { /* |-------------------------------------------------------------------------- | Assert Tag |-------------------------------------------------------------------------- */ /** Assert the element's tag matches the given tag. */ public function assertTagEquals(string $tag, ?string $message = null): static { PHPUnit::assertSame( $expected = strtolower($tag), strtolower($this->element->tagName), $message ?? sprintf( "The element [%s] tag doesn't equal the given tag [%s].", $this->identifier(), $expected, ), ); return $this; } /** Assert the element's tag matches the given tag. */ public function assertTagDoesntEqual(string $tag, ?string $message = null): static { PHPUnit::assertNotSame( $expected = strtolower($tag), strtolower($this->element->tagName), $message ?? sprintf( 'The element [%s] tag equals the given tag [%s].', $this->identifier(), $expected, ), ); return $this; } /* |-------------------------------------------------------------------------- | Assert Element |-------------------------------------------------------------------------- */ /** * Assert the element passes the given callback. * * @param callable(AssertableElement $element): bool $callback */ public function assertElement(callable $callback, ?string $message = null): static { PHPUnit::assertTrue( $callback($this), $message ?? sprintf( "The element [%s] doesn't pass the given callback.", $this->identifier(), ), ); return $this; } /* |-------------------------------------------------------------------------- | Assert Matches Selector |-------------------------------------------------------------------------- */ /** Assert the element matches the given selector. */ public function assertMatchesSelector(string $selector, ?string $message = null): static { PHPUnit::assertTrue( $this->element->matches($selector), $message ?? sprintf( "The element [%s] doesn't match the given selector [%s].", $this->identifier(), $selector, ), ); return $this; } /** Assert the element doesn't match the given selector. */ public function assertDoesntMatchSelector(string $selector, ?string $message = null): static { PHPUnit::assertFalse( $this->element->matches($selector), $message ?? sprintf( 'The element [%s] matches the given selector [%s].', $this->identifier(), $selector, ), ); return $this; } /* |-------------------------------------------------------------------------- | Assert Elements Count |-------------------------------------------------------------------------- */ /** Assert the element the expected number of elements matching the given selector. */ public function assertNumberOfElements(string $selector, string $comparison, int $count, ?string $message = null): static { return match ($comparison) { '=' => $this->assertElementsCount($selector, $count, $message), '!=' => $this->assertElementsNotCount($selector, $count, $message), '>' => $this->assertElementsCountGreaterThan($selector, $count, $message), '>=' => $this->assertElementsCountGreaterThanOrEqual($selector, $count, $message), '<' => $this->assertElementsCountLessThan($selector, $count, $message), '<=' => $this->assertElementsCountLessThanOrEqual($selector, $count, $message), }; } /** Assert the element contains the exact number of elements matching the given selector. */ public function assertElementsCount(string $selector, int $count, ?string $message = null): static { $this->querySelectorAll($selector)->assertCount($count, $message ?? sprintf( "The element [%s] doesn't have exactly [%d] elements matching the given selector [%s].", $this->identifier(), $count, $selector, )); return $this; } /** Assert the element doesn't contain the exact number of elements matching the given selector. */ public function assertElementsNotCount(string $selector, int $count, ?string $message = null): static { $this->querySelectorAll($selector)->assertNotCount($count, $message ?? sprintf( 'The element [%s] has exactly [%d] elements matching the given selector [%s].', $this->identifier(), $count, $selector, )); return $this; } /** Assert the element contains greater than the number of elements matching the given selector. */ public function assertElementsCountGreaterThan(string $selector, int $count, ?string $message = null): static { $this->querySelectorAll($selector)->assertCountGreaterThan($count, $message ?? sprintf( "The element [%s] doesn't have greater than [%d] elements matching the given selector [%s].", $this->identifier(), $count, $selector, )); return $this; } /** Assert the element contains greater than or equal the number of elements matching the given selector. */ public function assertElementsCountGreaterThanOrEqual(string $selector, int $count, ?string $message = null): static { $this->querySelectorAll($selector)->assertCountGreaterThanOrEqual($count, $message ?? sprintf( "The element [%s] doesn't have greater than or equal to [%d] elements matching the given selector [%s].", $this->identifier(), $count, $selector, )); return $this; } /** Assert the element contains less than the number of elements matching the given selector. */ public function assertElementsCountLessThan(string $selector, int $count, ?string $message = null): static { $this->querySelectorAll($selector)->assertCountLessThan($count, $message ?? sprintf( "The element [%s] doesn't have less than [%d] elements matching the given selector [%s].", $this->identifier(), $count, $selector, )); return $this; } /** Assert the element contains less than or equal the number of elements matching the given selector. */ public function assertElementsCountLessThanOrEqual(string $selector, int $count, ?string $message = null): static { $this->querySelectorAll($selector)->assertCountLessThanOrEqual($count, $message ?? sprintf( "The element [%s] doesn't have less than or equal to [%d] elements matching the given selector [%s].", $this->identifier(), $count, $selector, )); return $this; } /* |-------------------------------------------------------------------------- | Assert Elements Present/Missing |-------------------------------------------------------------------------- */ /** Assert the element contains one or more elements matching the given selector. */ public function assertElementsPresent(string $selector, ?string $message = null): static { $this->assertElementsCountGreaterThan($selector, 0, $message ?? sprintf( "The element [%s] doesn't have one or more elements matching the given selector [%s].", $this->identifier(), $selector, )); return $this; } /** Assert the element contains no elements matching the given selector. */ public function assertElementsMissing(string $selector, ?string $message = null): static { $this->assertElementsCount($selector, 0, $message ?? sprintf( 'The element [%s] has one or more elements matching the given selector [%s].', $this->identifier(), $selector, )); return $this; } /* |-------------------------------------------------------------------------- | Assert Text |-------------------------------------------------------------------------- */ /** * Assert the element's text passes the given callback. * * @param callable(AssertableText $text): bool $callback */ public function assertText(callable $callback, ?string $message = null): static { $this->text->assertText($callback, $message ?? sprintf( "The element [%s] text doesn't pass the given callback.", $this->identifier(), )); return $this; } /* |-------------------------------------------------------------------------- | Assert Text Equals |-------------------------------------------------------------------------- */ /** Assert the element's text equals the given text. */ public function assertTextEquals(string $text, bool $normaliseWhitespace = false, ?string $message = null): static { $this->text->assertEquals($text, $normaliseWhitespace, $message ?? sprintf( "The element [%s] text doesn't equal the given text.", $this->identifier(), )); return $this; } /** Assert the element's text doesn't equal the given text. */ public function assertTextDoesntEqual(string $text, bool $normaliseWhitespace = false, ?string $message = null): static { $this->text->assertDoesntEqual($text, $normaliseWhitespace, $message ?? sprintf( 'The element [%s] text equals the given text.', $this->identifier(), )); return $this; } /* |-------------------------------------------------------------------------- | Assert Text Starts / Ends With |-------------------------------------------------------------------------- */ /** Assert the element's text starts with the given text. */ public function assertTextStartsWith(string $prefix, bool $normaliseWhitespace = false, ?string $message = null): static { $this->text->assertStartsWith($prefix, $normaliseWhitespace, $message ?? sprintf( "The element [%s] text doesn't start with the given prefix.", $this->identifier(), )); return $this; } /** Assert the element's text starts doesn't start with the given text. */ public function assertTextDoesntStartWith(string $prefix, bool $normaliseWhitespace = false, ?string $message = null): static { $this->text->assertDoesntStartWith($prefix, $normaliseWhitespace, $message ?? sprintf( 'The element [%s] text starts with the given prefix.', $this->identifier(), )); return $this; } /** Assert the element's text ends with the given text. */ public function assertTextEndsWith(string $suffix, bool $normaliseWhitespace = false, ?string $message = null): static { $this->text->assertEndsWith($suffix, $normaliseWhitespace, $message ?? sprintf( "The element [%s] text doesn't end with the given suffix.", $this->identifier(), )); return $this; } /** Assert the element's text doesn't end with the given text. */ public function assertTextDoesntEndWith(string $suffix, bool $normaliseWhitespace = false, ?string $message = null): static { $this->text->assertDoesntEndWith($suffix, $normaliseWhitespace, $message ?? sprintf( 'The element [%s] text ends with the given suffix.', $this->identifier(), )); return $this; } /* |-------------------------------------------------------------------------- | Assert Text Contains |-------------------------------------------------------------------------- */ /** Assert the element's text contains the given text. */ public function assertTextContains(string $text, bool $normaliseWhitespace = false, ?string $message = null): static { $this->text->assertContains($text, $normaliseWhitespace, $message ?? sprintf( "The element [%s] text doesn't contain the given text.", $this->identifier(), )); return $this; } /** Alias for assertTextContains() */ public function assertSeeIn(string $text, bool $normaliseWhitespace = false, ?string $message = null): static { $this->assertTextContains($text, $normaliseWhitespace, $message); return $this; } /** Assert the element's text doesn't contain the given text. */ public function assertTextDoesntContain(string $text, bool $normaliseWhitespace = false, ?string $message = null): static { $this->text->assertDoesntContain($text, $normaliseWhitespace, $message ?? sprintf( 'The element [%s] text contains the given text.', $this->identifier(), )); return $this; } /** Alias for assertTextDoesntContain() */ public function assertDontSeeIn(string $text, bool $normaliseWhitespace = false, ?string $message = null): static { $this->assertTextDoesntContain($text, $normaliseWhitespace, $message); return $this; } /* |-------------------------------------------------------------------------- | Assert ID Present/Missing |-------------------------------------------------------------------------- */ /** Assert the element has an ID. */ public function assertIdPresent(): static { $this->attributes->assertPresent('id', sprintf( 'The element [%s] is missing the id attribute.', $this->identifier(), )); return $this; } /** Assert the element is missing an ID. */ public function assertIdMissing(): static { $this->attributes->assertMissing('id', sprintf( 'The element [%s] has the id attribute.', $this->identifier(), )); return $this; } /* |-------------------------------------------------------------------------- | Assert ID Equals |-------------------------------------------------------------------------- */ /** Assert the element's ID equals the given value. */ public function assertIdEquals(string $value, ?string $message = null): static { $this->attributes->assertEquals('id', $value, false, $message ?? sprintf( "The element [%s] id doesn't equal the given value [%s].", $this->identifier(), $value, )); return $this; } /** Assert the element's ID doesn't equal the given value. */ public function assertIdDoesntEqual(string $value, ?string $message = null): static { $this->attributes->assertDoesntEqual('id', $value, false, $message ?? sprintf( 'The element [%s] id equals the given value [%s].', $this->identifier(), $value, )); return $this; } /* |-------------------------------------------------------------------------- | Assert Class |-------------------------------------------------------------------------- */ /** * Assert the element's class passes the given callback. * * @param callable(AssertableClassesList $classes): bool $callback */ public function assertClass(callable $callback, ?string $message = null): static { $this->classes->assertClasses($callback, $message ?? sprintf( "The element [%s] class doesn't pass the given callback.", $this->identifier(), )); return $this; } /* |-------------------------------------------------------------------------- | Assert Class Empty |-------------------------------------------------------------------------- */ /** Assert the element's class list is empty. */ public function assertClassEmpty(?string $message = null): static { $this->classes->assertEmpty($message ?? sprintf( "The element [%s] class list isn't empty.", $this->identifier(), )); return $this; } /** Assert the element's class list isn't empty. */ public function assertClassNotEmpty(?string $message = null): static { $this->classes->assertNotEmpty($message ?? sprintf( 'The element [%s] class list is empty.', $this->identifier(), )); return $this; } /* |-------------------------------------------------------------------------- | Assert Class Present/Missing |-------------------------------------------------------------------------- */ /** Assert the element has a class. */ public function assertClassPresent(): static { $this->attributes->assertPresent('class', sprintf( 'The element [%s] is missing the class attribute.', $this->identifier(), )); return $this; } /** Assert the element is missing a class. */ public function assertClassMissing(): static { $this->attributes->assertMissing('class', sprintf( 'The element [%s] has the class attribute.', $this->identifier(), )); return $this; } /* |-------------------------------------------------------------------------- | Assert Class Equals |-------------------------------------------------------------------------- */ /** Assert the element's class equals the given class. */ public function assertClassEquals(string $class, bool $normaliseWhitespace = false, ?string $message = null): static { $this->classes->assertValueEquals($class, $normaliseWhitespace, $message ?? sprintf( "The element [%s] class doesn't equal the given class [%s].", $this->identifier(), $class, )); return $this; } /** Assert the element's class doesn't equal the given class. */ public function assertClassDoesntEqual(string $class, bool $normaliseWhitespace = false, ?string $message = null): static { $this->classes->assertValueDoesntEqual($class, $normaliseWhitespace, $message ?? sprintf( 'The element [%s] class equals the given class [%s].', $this->identifier(), $class, )); return $this; } /* |-------------------------------------------------------------------------- | Assert Class Contains |-------------------------------------------------------------------------- */ /** Assert the element's class contains the given class. */ public function assertClassContains(string $class, ?string $message = null): static { $this->classes->assertContains($class, $message ?? sprintf( "The element [%s] class doesn't contain the given class [%s].", $this->identifier(), $class, )); return $this; } /** Assert the element's class doesn't contain the given class. */ public function assertClassDoesntContain(string $class, ?string $message = null): static { $this->classes->assertDoesntContain($class, $message ?? sprintf( 'The element [%s] class contains the given class [%s].', $this->identifier(), $class, )); return $this; } /* |-------------------------------------------------------------------------- | Assert Class Contains Any / All |-------------------------------------------------------------------------- */ /** Assert the element's class contains any of the given classes. */ public function assertClassContainsAny(array $classes, ?string $message = null): static { $this->classes->assertContainsAny($classes, $message ?? sprintf( "The element [%s] class doesn't contain any of the given classes [%s].", $this->identifier(), implode(' ', $classes), )); return $this; } /** Assert the element's class doesn't contain any of the given classes. */ public function assertClassDoesntContainAny(array $classes, ?string $message = null): static { $this->classes->assertDoesntContainAny($classes, $message ?? sprintf( 'The element [%s] class contains some of the given classes [%s].', $this->identifier(), implode(' ', $classes), )); return $this; } /** Assert the element's class contains all the given classes. */ public function assertClassContainsAll(array $classes, ?string $message = null): static { $this->classes->assertContainsAll($classes, $message ?? sprintf( "The element [%s] class doesn't contain all the given classes [%s].", $this->identifier(), implode(' ', $classes), )); return $this; } /** Assert the element's class doesn't contain all the given classes. */ public function assertClassDoesntContainAll(array $classes, ?string $message = null): static { $this->classes->assertDoesntContainAll($classes, $message ?? sprintf( 'The element [%s] class contains all the given classes [%s].', $this->identifier(), implode(' ', $classes), )); return $this; } /* |-------------------------------------------------------------------------- | Assert Attribute |-------------------------------------------------------------------------- */ /** * Assert the element's attributes pass the given callback. * * @param callable(AssertableAttributesList $value): bool $callback */ public function assertAttributes(callable $callback, ?string $message = null): static { $this->attributes->assertAttributes($callback, $message ?? sprintf( "The element [%s] attributes don't pass the given callback.", $this->identifier(), )); return $this; } /** * Assert the element's attribute passes the given callback. * * @param callable(?string $value): bool $callback */ public function assertAttribute(string $attribute, callable $callback, ?string $message = null): static { $this->attributes->assertAttribute($attribute, $callback, $message ?? sprintf( "The element [%s] attribute [%s] doesn't pass the given callback.", $this->identifier(), $attribute, )); return $this; } /* |-------------------------------------------------------------------------- | Assert Attribute Array |-------------------------------------------------------------------------- */ /** Assert the given associative array of attributes equals the attribute list. */ public function assertAttributesEqualArray(array $attributes, bool $normaliseWhitespace = false, ?string $message = null): static { $this->attributes->assertEqualsArray($attributes, $normaliseWhitespace, $message ?? sprintf( "The element [%s] attributes don't equal the given array.", $this->identifier(), )); return $this; } /* |-------------------------------------------------------------------------- | Assert Attributes Empty |-------------------------------------------------------------------------- */ /** Assert the element's attributes list is empty. */ public function assertAttributesEmpty(?string $message = null): static { $this->attributes->assertEmpty($message ?? sprintf( "The element [%s] attribute list isn't empty.", $this->identifier(), )); return $this; } /** Assert the element's attribute list isn't empty. */ public function assertAttributesNotEmpty(?string $message = null): static { $this->attributes->assertNotEmpty($message ?? sprintf( 'The element [%s] attribute list is empty.', $this->identifier(), )); return $this; } /* |-------------------------------------------------------------------------- | Assert Attribute Present/Missing |-------------------------------------------------------------------------- */ /** Assert the element has the given attribute. */ public function assertAttributePresent(string $attribute, ?string $message = null): static { $this->attributes->assertPresent($attribute, $message ?? sprintf( 'The element [%s] is missing the given attribute [%s].', $this->identifier(), $attribute, )); return $this; } /** Assert the element is missing the given attribute. */ public function assertAttributeMissing(string $attribute, ?string $message = null): static { $this->attributes->assertMissing($attribute, $message ?? sprintf( 'The element [%s] has the given attribute [%s].', $this->identifier(), $attribute, )); return $this; } /* |-------------------------------------------------------------------------- | Assert Attribute Equals |-------------------------------------------------------------------------- */ /** Assert the given element's attribute equals the given value. */ public function assertAttributeEquals(string $attribute, string $value, bool $normaliseWhitespace = false, ?string $message = null): static { $this->attributes->assertEquals($attribute, $value, $normaliseWhitespace, $message ?? sprintf( "The element [%s] attribute [%s] doesn't equal the given value [%s].", $this->identifier(), $attribute, $value, )); return $this; } /** Assert the given element's attribute doesn't equal the given value. */ public function assertAttributeDoesntEqual(string $attribute, string $value, bool $normaliseWhitespace = false, ?string $message = null): static { $this->attributes->assertDoesntEqual($attribute, $value, $normaliseWhitespace, $message ?? sprintf( 'The element [%s] attribute [%s] equals the given value [%s].', $this->identifier(), $attribute, $value, )); return $this; } /* |-------------------------------------------------------------------------- | Assert Attribute Contains |-------------------------------------------------------------------------- */ /** Assert the given element's attribute contains the given value. */ public function assertAttributeContains(string $attribute, string $value, bool $normaliseWhitespace = false, ?string $message = null): static { $this->attributes->assertContains($attribute, $value, $normaliseWhitespace, $message ?? sprintf( "The element [%s] attribute [%s] doesn't contain the given value [%s].", $this->identifier(), $attribute, $value, )); return $this; } /** Assert the given element's class doesn't contain the given value. */ public function assertAttributeDoesntContain(string $attribute, string $value, bool $normaliseWhitespace = false, ?string $message = null): static { $this->attributes->assertDoesntContain($attribute, $value, $normaliseWhitespace, $message ?? sprintf( 'The element [%s] attribute [%s] contains the given value [%s].', $this->identifier(), $attribute, $value, )); return $this; } /* |-------------------------------------------------------------------------- | Assert Attribute Starts / Ends With |-------------------------------------------------------------------------- */ /** Assert the attribute starts with the given prefix. */ public function assertAttributeStartsWith(string $attribute, string $prefix, bool $normaliseWhitespace = false, ?string $message = null): static { $this->attributes->assertStartsWith($attribute, $prefix, $normaliseWhitespace, $message ?? sprintf( "The element [%s] attribute [%s] doesn't start with the given prefix [%s].", $this->identifier(), $attribute, $prefix, )); return $this; } /** Assert the attribute doesn't start with the given prefix. */ public function assertAttributeDoesntStartWith(string $attribute, string $prefix, bool $normaliseWhitespace = false, ?string $message = null): static { $this->attributes->assertDoesntStartWith($attribute, $prefix, $normaliseWhitespace, $message ?? sprintf( 'The element [%s] attribute [%s] starts with the given prefix [%s].', $this->identifier(), $attribute, $prefix, )); return $this; } /** Assert the attribute ends with the given prefix. */ public function assertAttributeEndsWith(string $attribute, string $suffix, bool $normaliseWhitespace = false, ?string $message = null): static { $this->attributes->assertEndsWith($attribute, $suffix, $normaliseWhitespace, $message ?? sprintf( "The element [%s] attribute [%s] doesn't end with the given suffix [%s].", $this->identifier(), $attribute, $suffix, )); return $this; } /** Assert the attribute doesn't start with the given prefix. */ public function assertAttributeDoesntEndWith(string $attribute, string $suffix, bool $normaliseWhitespace = false, ?string $message = null): static { $this->attributes->assertDoesntEndWith($attribute, $suffix, $normaliseWhitespace, $message ?? sprintf( 'The element [%s] attribute [%s] ends with the given suffix [%s].', $this->identifier(), $attribute, $suffix, )); return $this; } /* |-------------------------------------------------------------------------- | Assert Data Attribute |-------------------------------------------------------------------------- */ /** * Assert the element's data attribute passes the given callback. * * @param callable(?string $value): bool $callback */ public function assertDataAttribute(string $attribute, callable $callback, ?string $message = null): static { $this->assertAttribute($this->prefixDataAttribute($attribute), $callback, $message); return $this; } /** Assert the element has the given data attribute. */ public function assertDataAttributePresent(string $attribute, ?string $message = null): static { $this->assertAttributePresent($this->prefixDataAttribute($attribute), $message); return $this; } /** Assert the element is missing the given data attribute. */ public function assertDataAttributeMissing(string $attribute, ?string $message = null): static { $this->assertAttributeMissing($this->prefixDataAttribute($attribute), $message); return $this; } /** Assert the given element's data attribute equals the given value. */ public function assertDataAttributeEquals(string $attribute, string $value, bool $normaliseWhitespace = false, ?string $message = null): static { $this->assertAttributeEquals($this->prefixDataAttribute($attribute), $value, $normaliseWhitespace, $message); return $this; } /** Assert the given element's attribute doesn't equal the given value. */ public function assertDataAttributeDoesntEqual(string $attribute, string $value, bool $normaliseWhitespace = false, ?string $message = null): static { $this->assertAttributeDoesntEqual($this->prefixDataAttribute($attribute), $value, $normaliseWhitespace, $message); return $this; } /** Assert the given element's data attribute contains the given value. */ public function assertDataAttributeContains(string $attribute, string $value, bool $normaliseWhitespace = false, ?string $message = null): static { $this->assertAttributeContains($this->prefixDataAttribute($attribute), $value, $normaliseWhitespace, $message); return $this; } /** Assert the given element's data attribute doesn't contain the given value. */ public function assertDataAttributeDoesntContain(string $attribute, string $value, bool $normaliseWhitespace = false, ?string $message = null): static { $this->assertAttributeDoesntContain($this->prefixDataAttribute($attribute), $value, $normaliseWhitespace, $message); return $this; } /* |-------------------------------------------------------------------------- | Assert Aria Attribute |-------------------------------------------------------------------------- */ /** * Assert the element's data attribute passes the given callback. * * @param callable(?string $value): bool $callback */ public function assertAriaAttribute(string $attribute, callable $callback, ?string $message = null): static { $this->assertAttribute($this->prefixAriaAttribute($attribute), $callback, $message); return $this; } /** Assert the element has the given data attribute. */ public function assertAriaAttributePresent(string $attribute, ?string $message = null): static { $this->assertAttributePresent($this->prefixAriaAttribute($attribute), $message); return $this; } /** Assert the element is missing the given data attribute. */ public function assertAriaAttributeMissing(string $attribute, ?string $message = null): static { $this->assertAttributeMissing($this->prefixAriaAttribute($attribute), $message); return $this; } /** Assert the given element's data attribute equals the given value. */ public function assertAriaAttributeEquals(string $attribute, string $value, bool $normaliseWhitespace = false, ?string $message = null): static { $this->assertAttributeEquals($this->prefixAriaAttribute($attribute), $value, $normaliseWhitespace, $message); return $this; } /** Assert the given element's attribute doesn't equal the given value. */ public function assertAriaAttributeDoesntEqual(string $attribute, string $value, bool $normaliseWhitespace = false, ?string $message = null): static { $this->assertAttributeDoesntEqual($this->prefixAriaAttribute($attribute), $value, $normaliseWhitespace, $message); return $this; } /** Assert the given element's data attribute contains the given value. */ public function assertAriaAttributeContains(string $attribute, string $value, bool $normaliseWhitespace = false, ?string $message = null): static { $this->assertAttributeContains($this->prefixAriaAttribute($attribute), $value, $normaliseWhitespace, $message); return $this; } /** Assert the given element's data attribute doesn't contain the given value. */ public function assertAriaAttributeDoesntContain(string $attribute, string $value, bool $normaliseWhitespace = false, ?string $message = null): static { $this->assertAttributeDoesntContain($this->prefixAriaAttribute($attribute), $value, $normaliseWhitespace, $message); return $this; } /* |-------------------------------------------------------------------------- | Internal |-------------------------------------------------------------------------- */ /** Prefix the given attribute name with "data-" if applicable. */ protected function prefixDataAttribute(string $attribute): string { return (! str_starts_with($attribute, 'data-') ? 'data-' : '') . $attribute; } /** Prefix the given attribute name with "aria-" if applicable. */ protected function prefixAriaAttribute(string $attribute): string { return (! str_starts_with($attribute, 'aria-') ? 'aria-' : '') . $attribute; } }
ziadoz/assertable-html
4
Assertable HTML is an elegantly designed PHPUnit library that makes performing assertions on HTML responses from PHP and Laravel applications quick and enjoyable.
PHP
ziadoz
Jamie York
src/Concerns/Asserts/AssertsElementsList.php
PHP
<?php declare(strict_types=1); namespace Ziadoz\AssertableHtml\Concerns\Asserts; use OutOfBoundsException; use PHPUnit\Framework\Assert as PHPUnit; trait AssertsElementsList { /* |-------------------------------------------------------------------------- | Assert Elements |-------------------------------------------------------------------------- */ /** * Assert the element list passes the given callback. * * @param callable(static $element): bool $callback */ public function assertElements(callable $callback, ?string $message = null): static { PHPUnit::assertTrue( $callback($this), $message ?? "The element list doesn't pass the given callback.", ); return $this; } /* |-------------------------------------------------------------------------- | Assert Empty |-------------------------------------------------------------------------- */ /** Assert the element list is empty. */ public function assertEmpty(?string $message = null): static { PHPUnit::assertTrue( $this->empty(), $message ?? "The element list isn't empty.", ); return $this; } /** Assert the element list isn't empty. */ public function assertNotEmpty(?string $message = null): static { PHPUnit::assertFalse( $this->empty(), $message ?? 'The element list is empty.', ); return $this; } /* |-------------------------------------------------------------------------- | Assert Count |-------------------------------------------------------------------------- */ /** Assert the element list contains the expected number of elements. */ public function assertNumberOfElements(string $comparison, int $count, ?string $message = null): static { return match ($comparison) { '=' => $this->assertCount($count, $message), '!=' => $this->assertNotCount($count, $message), '>' => $this->assertCountGreaterThan($count, $message), '>=' => $this->assertCountGreaterThanOrEqual($count, $message), '<' => $this->assertCountLessThan($count, $message), '<=' => $this->assertCountLessThanOrEqual($count, $message), }; } /** Assert the element list contains the given number of elements. */ public function assertCount(int $count, ?string $message = null): static { $this->countNotNegative($count); PHPUnit::assertCount($count, $this, $message ?? sprintf( "The element list doesn't have exactly [%d] elements.", $count, )); return $this; } /** Assert the element list doesn't contain the given number of elements. */ public function assertNotCount(int $count, ?string $message = null): static { $this->countNotNegative($count); PHPUnit::assertNotCount($count, $this, $message ?? sprintf( 'The element list has exactly [%d] elements.', $count, )); return $this; } /** Assert the element list contains greater than the given number of elements. */ public function assertCountGreaterThan(int $count, ?string $message = null): static { $this->countNotNegative($count); PHPUnit::assertGreaterThan($count, count($this), $message ?? sprintf( "The element list doesn't have greater than [%d] elements.", $count, )); return $this; } /** Assert the element list contains greater than or equal the given number of elements. */ public function assertCountGreaterThanOrEqual(int $count, ?string $message = null): static { $this->countNotNegative($count); PHPUnit::assertGreaterThanOrEqual($count, count($this), $message ?? sprintf( "The element list doesn't have greater than or equal to [%d] elements.", $count, )); return $this; } /** Assert the element list contains less than the given number of elements. */ public function assertCountLessThan(int $count, ?string $message = null): static { $this->countNotNegative($count); PHPUnit::assertLessThan($count, count($this), $message ?? sprintf( "The element list doesn't have less than [%d] elements.", $count, )); return $this; } /** Assert the element contains less than or equal the given number of elements. */ public function assertCountLessThanOrEqual(int $count, ?string $message = null): static { $this->countNotNegative($count); PHPUnit::assertLessThanOrEqual($count, count($this), $message ?? sprintf( "The element list doesn't have less than or equal to [%d] elements.", $count, )); return $this; } /** Ensure the given count value is not negative, which makes no sense. */ private function countNotNegative(int $count): void { if ($count < 0) { throw new OutOfBoundsException('Expected count of elements cannot be less than zero.'); } } /* |-------------------------------------------------------------------------- | Assert Any / All |-------------------------------------------------------------------------- */ /** Assert any element in the element list passes the given callback. */ public function assertAny(?callable $callback, ?string $message = null): static { PHPUnit::assertTrue( array_any(iterator_to_array($this), $callback), $message ?? 'No elements in the list match the given callback.', ); return $this; } /** Assert all elements in the element list pass the given callback. */ public function assertAll(?callable $callback, ?string $message = null): static { PHPUnit::assertTrue( array_all(iterator_to_array($this), $callback), $message ?? 'Not every element in the list matches the given callback.', ); return $this; } }
ziadoz/assertable-html
4
Assertable HTML is an elegantly designed PHPUnit library that makes performing assertions on HTML responses from PHP and Laravel applications quick and enjoyable.
PHP
ziadoz
Jamie York
src/Concerns/Asserts/AssertsText.php
PHP
<?php declare(strict_types=1); namespace Ziadoz\AssertableHtml\Concerns\Asserts; use PHPUnit\Framework\Assert as PHPUnit; trait AssertsText { /* |-------------------------------------------------------------------------- | Assert Text |-------------------------------------------------------------------------- */ /** * Assert the text passes the given callback. * * @param callable(string $text): bool $callback */ public function assertText(callable $callback, ?string $message = null): static { PHPUnit::assertTrue( $callback($this), $message ?? "The text doesn't pass the given callback.", ); return $this; } /* |-------------------------------------------------------------------------- | Assert Text Equals |-------------------------------------------------------------------------- */ /** Assert the text equals the given text. */ public function assertEquals(string $text, bool $normaliseWhitespace = false, ?string $message = null): static { PHPUnit::assertSame( $text, $this->value($normaliseWhitespace), $message ?? "The text doesn't equal the given text.", ); return $this; } /** Assert the text doesn't equal the given text. */ public function assertDoesntEqual(string $text, bool $normaliseWhitespace = false, ?string $message = null): static { PHPUnit::assertNotSame( $text, $this->value($normaliseWhitespace), $message ?? 'The text equals the given text.', ); return $this; } /* |-------------------------------------------------------------------------- | Assert Text Starts / Ends With |-------------------------------------------------------------------------- */ /** Assert the text starts with the given text. */ public function assertStartsWith(string $prefix, bool $normaliseWhitespace = false, ?string $message = null): static { PHPUnit::assertStringStartsWith( $prefix, $this->value($normaliseWhitespace), $message ?? "The text doesn't start with the given prefix text.", ); return $this; } /** Assert the text starts doesn't start with the given text. */ public function assertDoesntStartWith(string $prefix, bool $normaliseWhitespace = false, ?string $message = null): static { PHPUnit::assertStringStartsNotWith( $prefix, $this->value($normaliseWhitespace), $message ?? 'The text starts with the given prefix text.', ); return $this; } /** Assert the text ends with the given text. */ public function assertEndsWith(string $suffix, bool $normaliseWhitespace = false, ?string $message = null): static { PHPUnit::assertStringEndsWith( $suffix, $this->value($normaliseWhitespace), $message ?? "The text doesn't end with the given suffix text.", ); return $this; } /** Assert the text doesn't end with the given text. */ public function assertDoesntEndWith(string $suffix, bool $normaliseWhitespace = false, ?string $message = null): static { PHPUnit::assertStringEndsNotWith( $suffix, $this->value($normaliseWhitespace), $message ?? 'The text ends with the given suffix text.', ); return $this; } /* |-------------------------------------------------------------------------- | Assert Text Contains |-------------------------------------------------------------------------- */ /** Assert the text contains the given text. */ public function assertContains(string $text, bool $normaliseWhitespace = false, ?string $message = null): static { PHPUnit::assertStringContainsString( $text, $this->value($normaliseWhitespace), $message ?? "The text doesn't contain the given text.", ); return $this; } /** Alias for assertTextContains() */ public function assertSeeIn(string $text, bool $normaliseWhitespace = false, ?string $message = null): static { $this->assertContains($text, $normaliseWhitespace, $message); return $this; } /** Assert the text doesn't contain the given text. */ public function assertDoesntContain(string $text, bool $normaliseWhitespace = false, ?string $message = null): static { PHPUnit::assertStringNotContainsString( $text, $this->value($normaliseWhitespace), $message ?? 'The element [%s] text contains the given text.', ); return $this; } /** Alias for assertTextDoesntContain() */ public function assertDontSeeIn(string $text, bool $normaliseWhitespace = false, ?string $message = null): static { $this->assertDoesntContain($text, $normaliseWhitespace, $message); return $this; } }
ziadoz/assertable-html
4
Assertable HTML is an elegantly designed PHPUnit library that makes performing assertions on HTML responses from PHP and Laravel applications quick and enjoyable.
PHP
ziadoz
Jamie York
src/Concerns/AssertsMany.php
PHP
<?php declare(strict_types=1); namespace Ziadoz\AssertableHtml\Concerns; use PHPUnit\Framework\AssertionFailedError; trait AssertsMany { /** Perform many PHPUnit assertions in a callback, but capture any failures into a single exception. */ public function assertMany(callable $callback, ?string $message = null): static { try { $callback(...)->call($this); } catch (AssertionFailedError $assertion) { throw new AssertionFailedError(message: $message ?? $assertion->getMessage(), previous: $assertion); } return $this; } }
ziadoz/assertable-html
4
Assertable HTML is an elegantly designed PHPUnit library that makes performing assertions on HTML responses from PHP and Laravel applications quick and enjoyable.
PHP
ziadoz
Jamie York
src/Concerns/IdentifiesElement.php
PHP
<?php declare(strict_types=1); namespace Ziadoz\AssertableHtml\Concerns; use Dom\Element; use Ziadoz\AssertableHtml\Support\Whitespace; trait IdentifiesElement { /** Return a simple identifying selector for the given element (e.g. p#foo.bar.baz) */ public function identifier(): string { return implode('', array_filter([ $this->formatTag(), $this->formatId(), $this->formatClasses(), ])); } /** Return the element's formatted tag name. */ private function formatTag(): string { return mb_strtolower($this->element->tagName); } /** Return the element's formatted ID (if applicable). */ private function formatId(): string { return trim($id = $this->element->id) !== '' ? '#' . Whitespace::normalise(trim($id)) : ''; } /** Return the elements format classes (if any). */ private function formatClasses(): string { return implode('', array_map(fn (string $class): string => '.' . $class, iterator_to_array($this->element->classList))); } }
ziadoz/assertable-html
4
Assertable HTML is an elegantly designed PHPUnit library that makes performing assertions on HTML responses from PHP and Laravel applications quick and enjoyable.
PHP
ziadoz
Jamie York
src/Concerns/Targetable.php
PHP
<?php declare(strict_types=1); namespace Ziadoz\AssertableHtml\Concerns; use Ziadoz\AssertableHtml\Dom\AssertableDocument; use Ziadoz\AssertableHtml\Dom\AssertableElement; use Ziadoz\AssertableHtml\Dom\AssertableElementsList; trait Targetable { /** * Scope the first assertable element within the current assertable document or element matching the given selector. * * @param callable(AssertableElement $assertable): void $callback */ public function one(string $selector, callable $callback): static { $callback($this->querySelector($selector)); return $this; } /** * Scope the matching assertable elements within the current assertable document or element matching the given selector. * * @param callable(AssertableElementsList $assertable): void $callback */ public function many(string $selector, callable $callback): static { $callback($this->querySelectorAll($selector)); return $this; } /** * Scope the first assertable element elsewhere in the assertable document matching the given selector. * * @param callable(AssertableElement $assertable): void $callback */ public function elsewhere(string $selector, callable $callback): static { $document = $this instanceof AssertableDocument ? $this : AssertableDocument::createFromString($this->element->ownerDocument->saveHtml()); $callback($document->querySelector($selector)); return $this; } }
ziadoz/assertable-html
4
Assertable HTML is an elegantly designed PHPUnit library that makes performing assertions on HTML responses from PHP and Laravel applications quick and enjoyable.
PHP
ziadoz
Jamie York
src/Concerns/Whenable.php
PHP
<?php declare(strict_types=1); namespace Ziadoz\AssertableHtml\Concerns; trait Whenable { /** * Return a value if the given condition is true, otherwise return a default. * * @param callable(static $assertable): bool|bool $condition * @param callable(static $assertable): callable $callback * @param ?callable(static $assertable): ?callable $default */ public function when(callable|bool $condition, callable $callback, ?callable $default = null): static { $condition = is_callable($condition) ? $condition($this) : $condition; if ($condition) { $callback($this); } elseif ($default) { $default($this); } return $this; } }
ziadoz/assertable-html
4
Assertable HTML is an elegantly designed PHPUnit library that makes performing assertions on HTML responses from PHP and Laravel applications quick and enjoyable.
PHP
ziadoz
Jamie York
src/Concerns/Withable.php
PHP
<?php declare(strict_types=1); namespace Ziadoz\AssertableHtml\Concerns; trait Withable { /** * Scope with the current assertable, mostly for readability purposes. * * @param callable(static $assertable): void $callback */ public function with(callable $callback): static { $callback($this); return $this; } }
ziadoz/assertable-html
4
Assertable HTML is an elegantly designed PHPUnit library that makes performing assertions on HTML responses from PHP and Laravel applications quick and enjoyable.
PHP
ziadoz
Jamie York
src/Contracts/PromotableAssertableElement.php
PHP
<?php declare(strict_types=1); namespace Ziadoz\AssertableHtml\Contracts; use Dom\Element; use Dom\HTMLElement; interface PromotableAssertableElement { /** Return if the HTML element should be promoted by this element-specific assertable element. */ public static function shouldPromote(HTMLElement|Element $element): bool; }
ziadoz/assertable-html
4
Assertable HTML is an elegantly designed PHPUnit library that makes performing assertions on HTML responses from PHP and Laravel applications quick and enjoyable.
PHP
ziadoz
Jamie York
src/Dom/AssertableAttributesList.php
PHP
<?php declare(strict_types=1); namespace Ziadoz\AssertableHtml\Dom; use ArrayAccess; use Countable; use Dom\NamedNodeMap; use InvalidArgumentException; use IteratorAggregate; use OutOfBoundsException; use RuntimeException; use Traversable; use Ziadoz\AssertableHtml\Concerns\Asserts\AssertsAttributesList; use Ziadoz\AssertableHtml\Support\Whitespace; final readonly class AssertableAttributesList implements ArrayAccess, Countable, IteratorAggregate { use AssertsAttributesList; /** The element attributes */ private array $attributes; /** Create a list of assertable attributes. */ public function __construct(NamedNodeMap $map) { $this->attributes = $this->mapToAssocArray($map); } /** Convert the \Dom\NamedNodeMap to an associative array. */ private function mapToAssocArray(NamedNodeMap $map): array { $attributes = []; foreach ($map as $key => $attr) { $attributes[$key] = $attr->value; } return $attributes; } /** Dump the assertable class list. */ public function dump(): void { dump($this->toArray()); } /** Dump and die the assertable class list. */ public function dd(): never { dd($this->toArray()); } /** Return the attribute (optionally whitespace normalised). */ public function value(string $attribute, bool $normaliseWhitespace = false): string { if (! $this->offsetExists($attribute)) { return ''; } return $normaliseWhitespace ? Whitespace::normalise($this->attributes[$attribute]) : $this->attributes[$attribute]; } /** Return whether the assertable attribute list is empty. */ public function empty(): bool { return count($this->attributes) === 0; } /** Return the names of the attributes of the attributes in the assertable attribute list. */ public function names(): array { return array_keys($this->attributes); } /** Return whether assertable attribute list has the given attribute in it. */ public function has(string $attribute): bool { return $this->offsetExists($attribute); } /** * Perform a callback on each attribute in the assertable attribute list. * * @param callable(string $attribute, ?string $value, int $index): void $callback */ public function each(callable $callback): self { array_map($callback, array_keys($this->attributes), array_values($this->attributes), range(0, count($this->attributes) - 1)); return $this; } /** * Perform a callback on each attribute in the assertable attribute list in sequence. * * @param callable(string $attribute, ?string $value, int $sequence): void ...$callbacks */ public function sequence(callable ...$callbacks): self { if (count($callbacks) === 0) { throw new InvalidArgumentException('No sequence callbacks given.'); } $index = 0; foreach ($this as $attribute => $value) { $callback = $callbacks[$index] ?? throw new OutOfBoundsException(sprintf( 'Missing sequence callback for attribute [%s] at position [%d].', $attribute, $index, )); $callback($attribute, $value, $index); $index++; } return $this; } /** Return the assertable class list as an array. */ public function toArray(): array { return $this->attributes; } /* |-------------------------------------------------------------------------- | Array Access |-------------------------------------------------------------------------- */ /** Check an attribute exists in the assertable attribute list. */ public function offsetExists(mixed $offset): bool { return isset($this->attributes[(string) $offset]); } /** Get an attribute in the assertable attribute list. */ public function offsetGet(mixed $offset): ?string { return $this->attributes[(string) $offset]; } /** Unable to add attribute to the assertable attribute list. */ public function offsetSet(mixed $offset, mixed $value): void { throw new RuntimeException('Unable to add attributes to list'); } /** Unable to remove attribute from the assertable attribute list. */ public function offsetUnset(mixed $offset): void { throw new RuntimeException('Unable to remove attributes from list'); } /* |-------------------------------------------------------------------------- | Countable |-------------------------------------------------------------------------- */ /** Return the number of attributes in the assertable attribute list. */ public function count(): int { return count($this->attributes); } /* |-------------------------------------------------------------------------- | IteratorAggregate |-------------------------------------------------------------------------- */ /** Get an iterator of the assertable attribute list. */ public function getIterator(): Traversable { yield from $this->attributes; } /* |-------------------------------------------------------------------------- | Stringable |-------------------------------------------------------------------------- */ /** Return the assertable attribute list as a string. */ public function __toString(): string { $strings = []; foreach ($this->attributes as $key => $value) { $strings[] = sprintf('%s="%s"', $key, $value); } return implode(' ', $strings); } }
ziadoz/assertable-html
4
Assertable HTML is an elegantly designed PHPUnit library that makes performing assertions on HTML responses from PHP and Laravel applications quick and enjoyable.
PHP
ziadoz
Jamie York
src/Dom/AssertableClassesList.php
PHP
<?php declare(strict_types=1); namespace Ziadoz\AssertableHtml\Dom; use ArrayAccess; use Countable; use Dom\TokenList; use InvalidArgumentException; use IteratorAggregate; use OutOfBoundsException; use RuntimeException; use Stringable; use Traversable; use Ziadoz\AssertableHtml\Concerns\Asserts\AssertsClassesList; final readonly class AssertableClassesList implements ArrayAccess, Countable, IteratorAggregate, Stringable { use AssertsClassesList; /** Create a list of assertable classes. */ public function __construct(private TokenList $classes) { } /** Return the classes (optionally whitespace normalised). */ public function value(bool $normaliseWhitespace = false): string { return $normaliseWhitespace ? $this->__toString() : $this->classes->value; } /** Return whether the assertable class list is empty. */ public function empty(): bool { return count($this) === 0; } /** Return whether the assertable class list contains the given class. */ public function contains(string $class): bool { return $this->classes->contains($class); } /** * Perform a callback on each class in the assertable class list. * * @param callable(string $class, int $index): void $callback */ public function each(callable $callback): self { array_map($callback, array_values($this->toArray()), array_keys($this->toArray())); return $this; } /** * Perform a callback on each class in the assertable class list in sequence. * * @param callable(string $class, int $sequence): void ...$callbacks */ public function sequence(callable ...$callbacks): self { if (count($callbacks) === 0) { throw new InvalidArgumentException('No sequence callbacks given.'); } foreach ($this as $index => $class) { $callback = $callbacks[$index] ?? throw new OutOfBoundsException(sprintf( 'Missing sequence callback for class at position [%d].', $index, )); $callback($class, $index); } return $this; } /** Return whether the assertable class list contains any of the given classes. */ public function any(array $classes): bool { return array_any(array_values($classes), fn (string $class): bool => $this->classes->contains($class)); } /** Return whether the assertable class list contains all the given classes. */ public function all(array $classes): bool { return array_all(array_values($classes), fn (string $class): bool => $this->classes->contains($class)); } /** Dump the assertable class list. */ public function dump(): void { dump($this->toArray()); } /** Dump and die the assertable class list. */ public function dd(): never { dd($this->toArray()); } /** Return the assertable class list as an array. */ public function toArray(): array { return iterator_to_array($this->classes); } /* |-------------------------------------------------------------------------- | Array Access |-------------------------------------------------------------------------- */ /** Check a class exists in the assertable class list. */ public function offsetExists(mixed $offset): bool { return isset($this->classes[(int) $offset]); } /** Get a class in the assertable class list. */ public function offsetGet(mixed $offset): ?string { return $this->classes[(int) $offset]; } /** Unable to add class to the assertable class list. */ public function offsetSet(mixed $offset, mixed $value): void { throw new RuntimeException('Unable to add classes to list'); } /** Unable to remove class from the assertable class list. */ public function offsetUnset(mixed $offset): void { throw new RuntimeException('Unable to remove classes from list'); } /* |-------------------------------------------------------------------------- | Countable |-------------------------------------------------------------------------- */ /** Return the number of classes in the assertable class list. */ public function count(): int { return count($this->classes); } /* |-------------------------------------------------------------------------- | IteratorAggregate |-------------------------------------------------------------------------- */ /** Get an iterator of the assertable class list. */ public function getIterator(): Traversable { yield from iterator_to_array($this->classes); } /* |-------------------------------------------------------------------------- | Stringable |-------------------------------------------------------------------------- */ /** Return the assertable class list as a string. */ public function __toString(): string { return implode(' ', iterator_to_array($this->classes)); } }
ziadoz/assertable-html
4
Assertable HTML is an elegantly designed PHPUnit library that makes performing assertions on HTML responses from PHP and Laravel applications quick and enjoyable.
PHP
ziadoz
Jamie York
src/Dom/AssertableDocument.php
PHP
<?php declare(strict_types=1); namespace Ziadoz\AssertableHtml\Dom; use Dom\Document; use Dom\HTMLDocument; use ErrorException; use PHPUnit\Framework\Assert as PHPUnit; use Ziadoz\AssertableHtml\Concerns\Asserts\AssertsDocument; use Ziadoz\AssertableHtml\Concerns\Targetable; use Ziadoz\AssertableHtml\Concerns\Whenable; use Ziadoz\AssertableHtml\Concerns\Withable; use Ziadoz\AssertableHtml\Exceptions\UnableToCreateAssertableDocument; final readonly class AssertableDocument { use AssertsDocument; use Targetable; use Whenable; use Withable; /** The document's page title. */ public string $title; /** Create a new assertable document. */ public function __construct(private HTMLDocument|Document $document) { $this->title = $this->document->title; } /** Get the assertable document HTML. */ public function getHtml(): string { return $this->document->saveHtml(); } /** Dump the document HTML. */ public function dump(): void { dump($this->getHtml()); } /** Dump and die the document HTML. */ public function dd(): never { dd($this->getHtml()); } /* |-------------------------------------------------------------------------- | Native |-------------------------------------------------------------------------- */ /** Create an assertable document from a file. */ public static function createFromFile(string $path, int $options = 0, ?string $overrideEncoding = null): self { return self::convertErrorsToExceptions(fn () => new self(HTMLDocument::createFromFile($path, $options, $overrideEncoding))); } /** Create an assertable document from a string. */ public static function createFromString(string $source, int $options = 0, ?string $overrideEncoding = null): self { return self::convertErrorsToExceptions(fn () => new self(HTMLDocument::createFromString($source, $options, $overrideEncoding))); } /** Return the assertable element matching the given selector. */ public function querySelector(string $selector): AssertableElement { if (($element = $this->document->querySelector($selector)) === null) { PHPUnit::fail(sprintf( "The document doesn't contain an element matching the given selector [%s].", $selector, )); } return new AssertableElement($element)->promote(); } /** Return assertable elements matching the given selector. */ public function querySelectorAll(string $selector): AssertableElementsList { return new AssertableElementsList($this->document->querySelectorAll($selector)); } /** Return an assertable element matching the given ID. */ public function getElementById(string $id): AssertableElement { if (($element = $this->document->getElementById($id)) === null) { PHPUnit::fail(sprintf( "The document doesn't contain an element matching the given ID [%s].", $id, )); } return new AssertableElement($element)->promote(); } /** Return assertable elements matching the given tag. */ public function getElementsByTagName(string $tag): AssertableElementsList { return new AssertableElementsList($this->document->getElementsByTagName($tag)); } /** Convert any PHP errors that occur in the given callback to custom exceptions. */ private static function convertErrorsToExceptions(callable $callback): mixed { try { set_error_handler(function (int $severity, string $message, string $file, int $line): never { throw new UnableToCreateAssertableDocument( 'Unable to create assertable HTML document.', previous: new ErrorException($message, 0, $severity, $file, $line), ); }); return $callback(); } finally { restore_error_handler(); } } }
ziadoz/assertable-html
4
Assertable HTML is an elegantly designed PHPUnit library that makes performing assertions on HTML responses from PHP and Laravel applications quick and enjoyable.
PHP
ziadoz
Jamie York
src/Dom/AssertableElement.php
PHP
<?php declare(strict_types=1); namespace Ziadoz\AssertableHtml\Dom; use Dom\Element; use Dom\HTMLElement; use PHPUnit\Framework\Assert as PHPUnit; use Ziadoz\AssertableHtml\Concerns\Asserts\AssertsElement; use Ziadoz\AssertableHtml\Concerns\IdentifiesElement; use Ziadoz\AssertableHtml\Concerns\Targetable; use Ziadoz\AssertableHtml\Concerns\Whenable; use Ziadoz\AssertableHtml\Concerns\Withable; readonly class AssertableElement { use AssertsElement; use IdentifiesElement; use Targetable; use Whenable; use Withable; /** The element's inner HTML. */ public string $html; /** The element's classes. */ public AssertableClassesList $classes; /** The element's attributes. */ public AssertableAttributesList $attributes; /** The element's tag (lowercase). */ public string $tag; /** The element's ID. */ public string $id; /** The element's text. */ public AssertableText $text; /** Create an assertable element. */ public function __construct(private HTMLElement|Element $element) { // Properties $this->html = $this->element->innerHTML; $this->classes = new AssertableClassesList($this->element->classList); $this->attributes = new AssertableAttributesList($this->element->attributes); $this->tag = strtolower($this->element->tagName); $this->id = $this->element->id; $this->text = new AssertableText($this->element->textContent); } /** Get the underlying HTML element. */ private function getElement(): HTMLElement|Element { return $this->element; } /** Promote this assertable element to an element-specific equivalent assertable element, if possible. */ public function promote(): static { return new AssertableElementPromoter($this->getElement())->promote() ?? $this; } /** Get the assertable element HTML. */ public function getHtml(): string { return $this->element->ownerDocument->saveHtml($this->element); } /** Dump the assertable element. */ public function dump(): void { dump($this->getHtml()); } /** Dump and die the assertable element. */ public function dd(): never { dd($this->getHtml()); } /* |-------------------------------------------------------------------------- | Native |-------------------------------------------------------------------------- */ /** Return whether the assertable element contains the given assertable element. */ public function contains(self $other): bool { return $this->element->contains($other->getElement()); } /** Return the closest matching assertable element. */ public function closest(string $selector): static { if (($element = $this->element->closest($selector)) === null) { PHPUnit::fail(sprintf( "The element [%s] doesn't have a closest element matching the given selector [%s].", $this->identifier(), $selector, )); } return new static($element); } /** Return whether the assertable element matches the given selector. */ public function matches(string $selector): bool { return $this->element->matches($selector); } /** Return the assertable element matches the given selector. */ public function querySelector(string $selector): static { if (($element = $this->element->querySelector($selector)) === null) { PHPUnit::fail(sprintf( "The element [%s] doesn't contain an element matching the given selector [%s].", $this->identifier(), $selector, )); } return new static($element); } /** Return assertable elements matches the given selector. */ public function querySelectorAll(string $selector): AssertableElementsList { return new AssertableElementsList($this->element->querySelectorAll($selector)); } /** Return assertable elements matches the given tag. */ public function getElementsByTagName(string $tag): AssertableElementsList { return new AssertableElementsList($this->element->getElementsByTagName($tag)); } }
ziadoz/assertable-html
4
Assertable HTML is an elegantly designed PHPUnit library that makes performing assertions on HTML responses from PHP and Laravel applications quick and enjoyable.
PHP
ziadoz
Jamie York
src/Dom/AssertableElementPromoter.php
PHP
<?php declare(strict_types=1); namespace Ziadoz\AssertableHtml\Dom; use Dom\Element; use Dom\HTMLElement; use Ziadoz\AssertableHtml\Contracts\PromotableAssertableElement; use Ziadoz\AssertableHtml\Dom\Elements\AssertableForm; final readonly class AssertableElementPromoter { private const array CUSTOM_ELEMENTS = [ AssertableForm::class, ]; /** Create a core assertable element. */ public function __construct(private HTMLElement|Element $element) { } /** Promote and return the first matching assertable element that matches the given HTML element. */ public function promote(): (PromotableAssertableElement&AssertableElement)|null { $match = array_find( self::CUSTOM_ELEMENTS, fn (string $customElement): bool => $customElement::shouldPromote($this->element), ); return $match ? new $match($this->element) : null; } }
ziadoz/assertable-html
4
Assertable HTML is an elegantly designed PHPUnit library that makes performing assertions on HTML responses from PHP and Laravel applications quick and enjoyable.
PHP
ziadoz
Jamie York
src/Dom/AssertableElementsList.php
PHP
<?php declare(strict_types=1); namespace Ziadoz\AssertableHtml\Dom; use ArrayAccess; use Countable; use Dom\Element; use Dom\HTMLCollection; use Dom\HTMLElement; use Dom\NodeList; use InvalidArgumentException; use IteratorAggregate; use OutOfBoundsException; use RuntimeException; use Traversable; use Ziadoz\AssertableHtml\Concerns\Asserts\AssertsElementsList; use Ziadoz\AssertableHtml\Concerns\Whenable; use Ziadoz\AssertableHtml\Concerns\Withable; final readonly class AssertableElementsList implements ArrayAccess, Countable, IteratorAggregate { use AssertsElementsList; use Whenable; use Withable; /** The assertable elements. */ private array $elements; /** Create a list of assertable elements. */ public function __construct(NodeList|HTMLCollection $nodes) { $this->elements = array_values( array_map( fn (HTMLElement|Element $element): AssertableElement => new AssertableElement($element)->promote(), $nodes instanceof NodeList ? iterator_to_array($nodes) : $this->htmlCollectionToArray($nodes), ), ); } /** Convert a \Dom\HTMLCollection instance to an array. */ private function htmlCollectionToArray(HTMLCollection $nodes): array { $array = []; foreach ($nodes as $node) { $array[] = $node; } return $array; } /** Get the assertable element list HTML. */ public function getHtml(): string { return implode("\n", array_map( fn (AssertableElement $element): string => $element->getHtml(), $this->elements, )); } /** Dump the assertable element list. */ public function dump(): void { dump($this->getHtml()); } /** Dump and die the assertable element list. */ public function dd(): never { dd($this->getHtml()); } /** Return whether the assertable element list is empty. */ public function empty(): bool { return count($this) === 0; } /** Get the assertable element at the nth position in the assertable element list. */ public function nth(int $index): ?AssertableElement { return $this->offsetGet($index); } /** Return the first assertable element in the assertable element list. */ public function first(): ?AssertableElement { return $this->offsetGet(0); } /** Return the last assertable element in the assertable element list. */ public function last(): ?AssertableElement { return $this->offsetGet(count($this) - 1); } /** * Perform a callback on each element in the assertable element list. * * @param callable(AssertableElement $element, int $sequence): void $callback */ public function each(callable $callback): self { array_map($callback, array_values($this->elements), array_keys($this->elements)); return $this; } /** * Perform a callback on each element in the assertable element list in sequence. * * @param callable(AssertableElement $element, int $sequence): void ...$callbacks */ public function sequence(callable ...$callbacks): self { if (count($callbacks) === 0) { throw new InvalidArgumentException('No sequence callbacks given.'); } foreach ($this as $index => $element) { $callback = $callbacks[$index] ?? throw new OutOfBoundsException(sprintf( 'Missing sequence callback for element at position [%d].', $index, )); $callback($element, $index); } return $this; } /* |-------------------------------------------------------------------------- | Array Access |-------------------------------------------------------------------------- */ /** Check an assertable element exists in the assertable element list. */ public function offsetExists(mixed $offset): bool { return isset($this->elements[(int) $offset]); } /** Get an assertable element in the assertable element list. */ public function offsetGet(mixed $offset): ?AssertableElement { return $this->elements[(int) $offset]; } /** Unable to add an assertable element to the assertable element list. */ public function offsetSet(mixed $offset, mixed $value): void { throw new RuntimeException('Unable to add elements to list'); } /** Unable to remove an assertable element from the assertable element list. */ public function offsetUnset(mixed $offset): void { throw new RuntimeException('Unable to remove elements from list'); } /* |-------------------------------------------------------------------------- | Countable |-------------------------------------------------------------------------- */ /** Return the number of assertable elements in the assertable element list. */ public function count(): int { return count($this->elements); } /* |-------------------------------------------------------------------------- | IteratorAggregate |-------------------------------------------------------------------------- */ /** Get an iterator of the assertable element list. */ public function getIterator(): Traversable { yield from $this->elements; } }
ziadoz/assertable-html
4
Assertable HTML is an elegantly designed PHPUnit library that makes performing assertions on HTML responses from PHP and Laravel applications quick and enjoyable.
PHP
ziadoz
Jamie York
src/Dom/AssertableText.php
PHP
<?php declare(strict_types=1); namespace Ziadoz\AssertableHtml\Dom; use Stringable; use Ziadoz\AssertableHtml\Concerns\Asserts\AssertsText; use Ziadoz\AssertableHtml\Support\Whitespace; final readonly class AssertableText implements Stringable { use AssertsText; /** Create assertable text. */ public function __construct(private string $text) { } /** Dump the assertable text. */ public function dump(): void { dump($this->text); } /** Dump and die the assertable text. */ public function dd(): never { dd($this->text); } /** Return the text (optionally whitespace normalised). */ public function value(bool $normaliseWhitespace = false): string { return $normaliseWhitespace ? Whitespace::normalise($this->text) : $this->text; } /** Return the assertable text. */ public function __toString(): string { return $this->text; } }
ziadoz/assertable-html
4
Assertable HTML is an elegantly designed PHPUnit library that makes performing assertions on HTML responses from PHP and Laravel applications quick and enjoyable.
PHP
ziadoz
Jamie York
src/Dom/Elements/AssertableForm.php
PHP
<?php declare(strict_types=1); namespace Ziadoz\AssertableHtml\Dom\Elements; use Dom\Element; use Dom\HTMLElement; use Ziadoz\AssertableHtml\Concerns\AssertsMany; use Ziadoz\AssertableHtml\Contracts\PromotableAssertableElement; use Ziadoz\AssertableHtml\Dom\AssertableElement; readonly class AssertableForm extends AssertableElement implements PromotableAssertableElement { use AssertsMany; /* |-------------------------------------------------------------------------- | Interface |-------------------------------------------------------------------------- */ /** {@inheritDoc} */ public static function shouldPromote(Element|HTMLElement $element): bool { return $element->tagName === 'FORM'; } /* |-------------------------------------------------------------------------- | Assert Method |-------------------------------------------------------------------------- */ /** Assert the form has the given method attribute. */ public function assertMethod(string $method, ?string $message = null): static { $method = trim(mb_strtolower($method)); $this->assertAttribute( 'method', fn (?string $value): bool => trim(mb_strtolower((string) $value)) === $method, $message ?? sprintf("The form method doesn't equal [%s].", $method), ); return $this; } /** Assert the form has the GET method attribute. */ public function assertMethodGet(?string $message = null): static { $this->assertMethod('get', $message); return $this; } /** Assert the form has the POST method attribute. */ public function assertMethodPost(?string $message = null): static { $this->assertMethod('post', $message); return $this; } /** Assert the form has the DIALOG method attribute. */ public function assertMethodDialog(?string $message = null): static { $this->assertMethod('dialog', $message); return $this; } /* |-------------------------------------------------------------------------- | Assert Hidden Method |-------------------------------------------------------------------------- */ /** Assert the form has the given hidden input method. */ public function assertHiddenInputMethod(string $selector, string $method, ?string $message = null): static { $this->assertMany(function () use ($selector, $method): void { $method = trim(mb_strtolower($method)); $this->querySelector($selector) ->assertMatchesSelector('input[type="hidden"]') ->assertAttribute('value', fn (?string $value): bool => trim(mb_strtolower((string) $value)) === $method); }, $message ?? sprintf("The form hidden input method doesn't equal [%s].", $method)); return $this; } /** Assert the form has the PUT hidden input method. */ public function assertMethodPut(?string $message = null): static { $this->assertHiddenInputMethod('input[type="hidden"][name="_method"]', 'put', $message); return $this; } /** Assert the form has the PATCH hidden input method. */ public function assertMethodPatch(?string $message = null): static { $this->assertHiddenInputMethod('input[type="hidden"][name="_method"]', 'patch', $message); return $this; } /** Assert the form has the DELETE hidden input method. */ public function assertMethodDelete(?string $message = null): static { $this->assertHiddenInputMethod('input[type="hidden"][name="_method"]', 'delete', $message); return $this; } /* |-------------------------------------------------------------------------- | Assert Upload |-------------------------------------------------------------------------- */ /** Assert the form accepts uploads (has correct enctype and at least one file input. */ public function assertAcceptsUpload(?string $message = null): static { $this->assertMany(function (): void { $this->assertAttribute('enctype', fn (?string $value): bool => trim(mb_strtolower((string) $value)) === 'multipart/form-data') ->assertElementsCountGreaterThanOrEqual('input[type="file"]', 1); }, $message ?? "The form doesn't accept uploads."); return $this; } }
ziadoz/assertable-html
4
Assertable HTML is an elegantly designed PHPUnit library that makes performing assertions on HTML responses from PHP and Laravel applications quick and enjoyable.
PHP
ziadoz
Jamie York
src/Exceptions/UnableToCreateAssertableDocument.php
PHP
<?php declare(strict_types=1); namespace Ziadoz\AssertableHtml\Exceptions; use Exception; class UnableToCreateAssertableDocument extends Exception { }
ziadoz/assertable-html
4
Assertable HTML is an elegantly designed PHPUnit library that makes performing assertions on HTML responses from PHP and Laravel applications quick and enjoyable.
PHP
ziadoz
Jamie York
src/Mixins/TestComponentMixins.php
PHP
<?php declare(strict_types=1); namespace Ziadoz\AssertableHtml\Mixins; use Closure; use Ziadoz\AssertableHtml\Dom\AssertableDocument; class TestComponentMixins { /** Return an assertable HTML element of the component. */ public function assertableHtml(): Closure { return function (int $options = LIBXML_HTML_NOIMPLIED, ?string $overrideEncoding = null): AssertableDocument { return AssertableDocument::createFromString((string) $this, $options, $overrideEncoding); }; } /** Return an assertable HTML element scoped to the component. */ public function assertComponent(): Closure { return function (callable $callback, int $options = LIBXML_HTML_NOIMPLIED, ?string $overrideEncoding = null): static { $this->assertableHtml($options, $overrideEncoding)->with($callback); return $this; }; } /** Return an assertable HTML element scoped to the given selector in the component. */ public function assertElement(): Closure { return function (string $selector, callable $callback, int $options = LIBXML_HTML_NOIMPLIED, ?string $overrideEncoding = null): static { $this->assertableHtml($options, $overrideEncoding)->one($selector, $callback); return $this; }; } }
ziadoz/assertable-html
4
Assertable HTML is an elegantly designed PHPUnit library that makes performing assertions on HTML responses from PHP and Laravel applications quick and enjoyable.
PHP
ziadoz
Jamie York
src/Mixins/TestResponseMixins.php
PHP
<?php declare(strict_types=1); namespace Ziadoz\AssertableHtml\Mixins; use Closure; use Symfony\Component\HttpFoundation\StreamedResponse; use Ziadoz\AssertableHtml\Dom\AssertableDocument; class TestResponseMixins { /** Get the response HTML content. */ public function getHtmlContent(): Closure { return fn (): string => $this->baseResponse instanceof StreamedResponse ? $this->streamedContent() : $this->getContent(); } /** Return an assertable HTML document. */ public function assertableHtml(): Closure { return function (int $options = 0, ?string $overrideEncoding = null): AssertableDocument { return AssertableDocument::createFromString($this->getHtmlContent(), $options, $overrideEncoding); }; } /** Return an assertable HTML document. */ public function assertHtml(): Closure { return function (callable $callback, int $options = 0, ?string $overrideEncoding = null): static { AssertableDocument::createFromString($this->getHtmlContent(), $options, $overrideEncoding)->with($callback); return $this; }; } /** Return an assertable HTML document scoped to <head>. */ public function assertHead(): Closure { return function (callable $callback, int $options = 0, ?string $overrideEncoding = null): static { AssertableDocument::createFromString($this->getHtmlContent(), $options, $overrideEncoding)->one('head', $callback); return $this; }; } /** Return an assertable HTML document scoped to <body>. */ public function assertBody(): Closure { return function (callable $callback, int $options = 0, ?string $overrideEncoding = null): static { AssertableDocument::createFromString($this->getHtmlContent(), $options, $overrideEncoding)->one('body', $callback); return $this; }; } /** Return an assertable HTML document scoped to the given selector. */ public function assertElement(): Closure { return function (string $selector, callable $callback, int $options = 0, ?string $overrideEncoding = null): static { AssertableDocument::createFromString($this->getHtmlContent(), $options, $overrideEncoding)->one($selector, $callback); return $this; }; } }
ziadoz/assertable-html
4
Assertable HTML is an elegantly designed PHPUnit library that makes performing assertions on HTML responses from PHP and Laravel applications quick and enjoyable.
PHP
ziadoz
Jamie York
src/Mixins/TestViewMixins.php
PHP
<?php declare(strict_types=1); namespace Ziadoz\AssertableHtml\Mixins; use Closure; use Ziadoz\AssertableHtml\Dom\AssertableDocument; class TestViewMixins { /** Return an assertable HTML document of the view. */ public function assertableHtml(): Closure { return function (int $options = LIBXML_HTML_NOIMPLIED, ?string $overrideEncoding = null): AssertableDocument { return AssertableDocument::createFromString((string) $this, $options, $overrideEncoding); }; } /** Return an assertable HTML document scoped to the view. */ public function assertView(): Closure { return function (callable $callback, int $options = LIBXML_HTML_NOIMPLIED, ?string $overrideEncoding = null): static { $this->assertableHtml($options, $overrideEncoding)->with($callback); return $this; }; } /** Return an assertable HTML element scoped to the given selector in the view. */ public function assertElement(): Closure { return function (string $selector, callable $callback, int $options = LIBXML_HTML_NOIMPLIED, ?string $overrideEncoding = null): static { $this->assertableHtml($options, $overrideEncoding)->one($selector, $callback); return $this; }; } }
ziadoz/assertable-html
4
Assertable HTML is an elegantly designed PHPUnit library that makes performing assertions on HTML responses from PHP and Laravel applications quick and enjoyable.
PHP
ziadoz
Jamie York
src/Support/Whitespace.php
PHP
<?php declare(strict_types=1); namespace Ziadoz\AssertableHtml\Support; class Whitespace { /** Normalise the whitespace of the given string. @link: https://github.com/symfony/symfony/pull/48940 */ public static function normalise(string $string): string { return trim(preg_replace("/[ \n\r\t\x0C]{2,}+|[\n\r\t\x0C]/", ' ', $string), " \n\r\t\x0C"); } }
ziadoz/assertable-html
4
Assertable HTML is an elegantly designed PHPUnit library that makes performing assertions on HTML responses from PHP and Laravel applications quick and enjoyable.
PHP
ziadoz
Jamie York
src/Traits/AssertsHtml.php
PHP
<?php declare(strict_types=1); namespace Ziadoz\AssertableHtml\Traits; use Ziadoz\AssertableHtml\Dom\AssertableDocument; trait AssertsHtml { /** Return a configured assertable HTML document. */ public function assertableHtml(string $html, int $options = 0, ?string $overrideEncoding = null): AssertableDocument { return is_file($html) ? AssertableDocument::createFromFile($html, $options, $overrideEncoding) : AssertableDocument::createFromString($html, $options, $overrideEncoding); } /** Return an assertable HTML document. */ public function assertHtml(string $html, callable $callback, int $options = 0, ?string $overrideEncoding = null): static { $this->assertableHtml($html, $options, $overrideEncoding)->with($callback); return $this; } /** Return an assertable HTML document scoped to <head>. */ public function assertHead(string $html, callable $callback, int $options = 0, ?string $overrideEncoding = null): static { $this->assertableHtml($html, $options, $overrideEncoding)->one('head', $callback); return $this; } /** Return an assertable HTML document scoped to <body>. */ public function assertBody(string $html, callable $callback, int $options = 0, ?string $overrideEncoding = null): static { $this->assertableHtml($html, $options, $overrideEncoding)->one('body', $callback); return $this; } /** Return an assertable HTML document scoped to the given selector. */ public function assertElement(string $html, string $selector, callable $callback, int $options = 0, ?string $overrideEncoding = null): static { $this->assertableHtml($html, $options, $overrideEncoding)->one($selector, $callback); return $this; } }
ziadoz/assertable-html
4
Assertable HTML is an elegantly designed PHPUnit library that makes performing assertions on HTML responses from PHP and Laravel applications quick and enjoyable.
PHP
ziadoz
Jamie York
css/app.css
CSS
@layer reset, theme, layout; /* @link: https://www.joshwcomeau.com/css/custom-css-reset/ */ @layer reset { *, *::before, *::after { box-sizing: border-box; } * { margin: 0; } body { line-height: 1.5; -webkit-font-smoothing: antialiased; } img, picture, video, canvas, svg { display: block; max-width: 100%; } input, button, textarea, select { font: inherit; } p, h1, h2, h3, h4, h5, h6 { overflow-wrap: break-word; } } /* @link: https://systemfontstack.com/ */ /* @link: https://www.makeworld.space/garden/Computers/Web.html */ @layer theme { :root { --font-sans: -apple-system, BlinkMacSystemFont, avenir next, avenir, segoe ui, helvetica neue, helvetica, Cantarell, Ubuntu, roboto, noto, arial, sans-serif; --font-mono: Menlo, Consolas, Monaco, Liberation Mono, Lucida Console, monospace; --colour-cream: #fffdf5; --colour-slate: #363737; --colour-white: #fff; --colour-teal: #196b72; --colour-mustard: #fdb33c; } } @layer layout { html { font-family: var(--font-sans); color: var(--colour-slate); background: var(--colour-cream); } h1 { margin-bottom: 16px; } div.wrapper { max-width: 840px; margin-inline: auto; margin-block: 32px; padding-inline: 16px; } form.input { display: flex; gap: 8px; input { flex-grow: 7; } button { flex-grow: 1; } } div.results { margin-top: 32px; display: none; &:has(> char-to-bytes) { display: inherit; } } a.toggle { display: inline-block; margin-left: 8px; font-size: 16px; font-weight: normal; color: var(--colour-slate); text-decoration: none; vertical-align: middle; &::before { content: "▼"; display: inline-block; width: 24px; text-align: center; } &.collapse::before { content: "►"; } } details { margin-bottom: 32px; } summary { cursor: pointer; } code { font-family: var(--font-mono); display: inline-block; &.char { min-width: 1ch; } } table.bytes { border-collapse: collapse; margin-block: 8px; td { padding: 4px; } } ol.bits { font-family: var(--font-mono); color: var(--colour-white); padding: 0; list-style: none; display: grid; grid-template-columns: repeat(8, 24px); grid-template-rows: 24px; gap: 6px; li { text-align: center; background: var(--colour-mustard); } li.continuation { background: var(--colour-teal); } } .github-corner { position: absolute; top: 0; border: 0; right: 0; } }
ziadoz/utf8-to-bytes
2
A webpage that displays UTF-8 strings as individual bytes.
JavaScript
ziadoz
Jamie York
index.html
HTML
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1"> <title>UTF-8 to Bytes</title> <link rel="icon" href="./favicon.ico" type="image/x-icon"> <link rel="stylesheet" href="./css/app.css"> <script type="importmap"> { "imports": { "utf8": "./js/modules/utf8.mjs", "ready": "./js/modules/ready.mjs", "database": "./js/modules/database.mjs", "char-to-bytes": "./js/elements/char-to-bytes.js" } } </script> </head> <body> <!-- @see: https://github.com/tholman/github-corners --> <a href="https://github.com/ziadoz/utf8-to-bytes" class="github-corner" aria-label="View on GitHub"> <svg width="50" height="50" viewBox="0 0 250 250" style="fill:#151513; color:#fff;" aria-hidden="true"> <path d="M0,0 L115,115 L130,115 L142,142 L250,250 L250,0 Z"></path><path d="M128.3,109.0 C113.8,99.7 119.0,89.6 119.0,89.6 C122.0,82.7 120.5,78.6 120.5,78.6 C119.2,72.0 123.4,76.3 123.4,76.3 C127.3,80.9 125.5,87.3 125.5,87.3 C122.9,97.6 130.6,101.9 134.4,103.2" fill="currentColor" style="transform-origin: 130px 106px;" class="octo-arm"></path> <path d="M115.0,115.0 C114.9,115.1 118.7,116.5 119.8,115.4 L133.7,101.6 C136.9,99.2 139.9,98.4 142.2,98.6 C133.8,88.0 127.5,74.4 143.8,58.0 C148.5,53.4 154.0,51.2 159.7,51.0 C160.3,49.4 163.2,43.6 171.4,40.1 C171.4,40.1 176.1,42.5 178.8,56.2 C183.1,58.6 187.2,61.8 190.9,65.4 C194.5,69.0 197.7,73.2 200.1,77.6 C213.8,80.2 216.3,84.9 216.3,84.9 C212.7,93.1 206.9,96.0 205.4,96.6 C205.1,102.4 203.0,107.8 198.3,112.5 C181.9,128.9 168.3,122.5 157.7,114.1 C157.9,116.9 156.7,120.9 152.7,124.9 L141.0,136.5 C139.8,137.7 141.6,141.9 141.8,141.8 Z" fill="currentColor" class="octo-body"></path> </svg> </a> <div class="wrapper"> <h1>UTF-8 to Bytes</h1> <form class="input"> <input type="text" name="query" placeholder="Enter text or emojis..."> <button type="submit">Show</button> </form> <div class="results"> <h1>Results <a href="#" class="toggle collapse">Collapse All</a></h1> </div> </div> <script type="module" src="./js/app.js"></script> </body> </html>
ziadoz/utf8-to-bytes
2
A webpage that displays UTF-8 strings as individual bytes.
JavaScript
ziadoz
Jamie York
js/app.js
JavaScript
import CharToBytes from 'char-to-bytes'; import ready from 'ready'; import { strToChars } from 'utf8'; customElements.define('char-to-bytes', CharToBytes); // Render the results. const render = (query) => { const chars = strToChars(query); const output = document.querySelector('.results'); Array.from(document.querySelectorAll('.results > char-to-bytes')) .map(charToBytes => charToBytes.remove()); chars.map((char) => { const charToBytes = document.createElement('char-to-bytes'); charToBytes.setAttribute('char', char); output.append(charToBytes); }); }; // Update the input, toggle and querystring. const update = (query) => { document.querySelector('input[name="query"]').value = query; document.querySelector('a.toggle').textContent = 'Collapse All'; const url = new URL(window.location.href); if (query !== '') { url.searchParams.set('query', query); } else { url.searchParams.delete('query'); } history.replaceState(history.state, '', url.href); }; ready(() => { window.addEventListener('load', (event) => { const query = new URLSearchParams(window.location.search).get('query') ?? ''; if (query !== '') { render(query); update(query); } }); document.querySelector('form').addEventListener('submit', (event) => { event.preventDefault(); const query = document.querySelector('input[name="query"]').value; render(query); update(query); }); document.querySelector('a.toggle').addEventListener('click', (event) => { event.preventDefault(); const selector = event.target.textContent.includes('Collapse') ? 'details[open]' : 'details:not([open])'; event.target.textContent = event.target.textContent.includes('Collapse') ? 'Expand All' : 'Collapse All'; event.target.classList.toggle('collapse'); document.querySelectorAll(selector).forEach(details => { details.toggleAttribute('open'); }); }); });
ziadoz/utf8-to-bytes
2
A webpage that displays UTF-8 strings as individual bytes.
JavaScript
ziadoz
Jamie York
js/elements/char-to-bytes.js
JavaScript
import { binToDec, binToHex, byteToBin, charToBytes, charToName, parseBin, strToCodePoints } from 'utf8'; import { codePointToHex } from '../modules/utf8.mjs'; export default class CharToBytes extends HTMLElement { #char = ''; #charBin = ''; #charHex = ''; #charDec = ''; #charName = ''; #bytes = []; connectedCallback() { this.#char = this.getAttribute('char'); this.#bytes = []; for (const byte of charToBytes(this.#char)) { const bin = byteToBin(byte); const parsed = parseBin(bin); const hex = binToHex(bin); const dec = binToDec(bin); this.#bytes.push({ parsed, hex, dec }); } this.#charBin = this.#bytes.map(byte => byte.parsed.remainder).join(''); this.#charHex = binToHex(this.#charBin); this.#charDec = binToDec(this.#charBin); this.#charName = strToCodePoints(this.#char) .map(codepoint => codepoint + ' [' + charToName(codePointToHex(codepoint)) + ']') .join("\n"); this.innerHTML = this.renderDetails(); } renderDetails() { return ` <details open> <summary> <code class="char" title="${this.#charName}">${this.#char}</code> <code class="base" title="Binary">${this.#charBin}</code> <code class="base" title="Hexadecimal">0x${this.#charHex}</code> <code class="base" title="Decimal">${this.#charDec}</code> </summary> <table class="bytes"> ${this.#bytes.map((byte) => this.renderTableRow(byte)).join("\n")} </table> </details>`; } renderTableRow(byte) { return ` <tr> <td> <ol class="bits"> ${[...byte.parsed.continuation].map(bit => this.renderOrderedListItem(bit, true)).join("\n")} ${[...byte.parsed.remainder].map(bit => this.renderOrderedListItem(bit)).join("\n")} </ol> </td> <td> <code class="base" title="Hexadecimal">0x${byte.hex}</code> <code class="base" title="Decimal">${byte.dec}</code> </td> </tr>`; } renderOrderedListItem(bit, continuation) { if (continuation) { return `<li class="continuation" title="Continuation Bit">${bit}</li>`; } return `<li title="Remainder Bit">${bit}</li>`; } }
ziadoz/utf8-to-bytes
2
A webpage that displays UTF-8 strings as individual bytes.
JavaScript
ziadoz
Jamie York
js/modules/ready.mjs
JavaScript
// @see: https://javascript.info/onload-ondomcontentloaded export default function ready(callback) { if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', callback); } else { callback(); } }
ziadoz/utf8-to-bytes
2
A webpage that displays UTF-8 strings as individual bytes.
JavaScript
ziadoz
Jamie York
js/modules/utf8.mjs
JavaScript
// @link: https://www.unicode.org/Public/UCD/latest/ucd/UnicodeData.txt // @link: https://www.unicode.org/Public/emoji/latest/emoji-sequences.txt // @link: https://jakearchibald.com/2025/importing-vs-fetching-json/ import database from './database.json' with { type: 'json' }; // @link: https://stackoverflow.com/a/62305199 // @link: https://bsky.app/profile/did:plc:etdjdgnly5tz5l5xdd4jq76d/post/3m3umixshqs2r export function strToChars(str) { return Array.from(new Intl.Segmenter().segment(str)) .map((segment) => segment.segment); } export function strToCodePoints(str) { return [...str]; } export function charToBytes(char) { return new TextEncoder().encode(char); } export function charToName(hex) { return database[hex.padStart(4, '0')] ?? ''; } export function byteToBin(byte) { return parseInt(byte).toString(2).padStart(8, '0'); // Ensure 8 bits for 7-bit ASCII characters. } export function binToHex(bin) { return parseInt(bin, 2).toString(16).toUpperCase(); } export function binToDec(bin) { return parseInt(bin, 2).toString(10); } export function codePointToHex(cp) { return cp.codePointAt(0).toString(16).toUpperCase(); } export function parseBin(bin) { if (bin.charAt(0) === '0') { return { continuation: '', remainder: bin, }; } const str = bin; const index = bin.indexOf('0') + 1 ; return { continuation: str.substring(0, index), remainder: str.substring(index), }; }
ziadoz/utf8-to-bytes
2
A webpage that displays UTF-8 strings as individual bytes.
JavaScript
ziadoz
Jamie York
public/electron.js
JavaScript
const url = require('url'); const http = require('http'); const path = require('path'); const { autoUpdater } = require('electron-updater'); const { app, BrowserWindow } = require('electron'); const { setMainMenu } = require('./menu'); let mainWindow; function createWindow() { mainWindow = new BrowserWindow({ title: 'Deployer', width: 478, height: 712, resizable: false, webPreferences: { plugins: true, webSecurity: false, }, }); const isDev = !!process.env.APP_URL; if (process.env.APP_URL) { mainWindow.loadURL(process.env.APP_URL); } else { mainWindow.loadFile(path.join(__dirname, '../build/index.html')); } // Open the dev tools only for dev // and when the flag is not set if (isDev && !process.env.DEV_TOOLS) { mainWindow.webContents.openDevTools(); } mainWindow.on('closed', () => { mainWindow = null }); setMainMenu(mainWindow); } function checkAndDownloadUpdate() { try { autoUpdater.checkForUpdatesAndNotify(); } catch (e) { console.log(e.message); } } app.on('ready', () => { createWindow(); checkAndDownloadUpdate(); const server = http.createServer((request, response) => { let target_url = url.parse(request.url, true).query.url; if (target_url) { if (Array.isArray(target_url)) { target_url = target_url.pop(); } mainWindow.webContents.send('url.requested', target_url); } response.writeHeader(200); response.end(); }); server.listen(6280, "0.0.0.0") }); // Quit when all windows are closed. app.on('window-all-closed', function () { app.quit(); }); app.on('activate', function () { // On OS X it's common to re-create a window in the app when the // dock icon is clicked and there are no other windows open. if (mainWindow === null) { createWindow(); } });
ziishaned/deployer
66
Cross-platform application to deploy your applications through Jenkins.
JavaScript
ziishaned
Zeeshan Ahmad