id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
185,511
from fairseq import options, utils from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( Embedding, TransformerDecoder, ) from fairseq.modules import ( AdaptiveInput, CharacterTokenEmbedder, ) def transformer_lm_big(args): args.decoder_layers = getattr(args, 'decoder_layers', 12) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) base_lm_architecture(args) def transformer_lm_baevski_wiki103(args): args.decoder_layers = getattr(args, 'decoder_layers', 16) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.dropout = getattr(args, 'dropout', 0.3) args.adaptive_input = getattr(args, 'adaptive_input', True) args.tie_adaptive_weights = getattr(args, 'tie_adaptive_weights', True) args.adaptive_input_cutoff = getattr(args, 'adaptive_input_cutoff', '20000,60000') args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', '20000,60000') args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0.2) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.activation_dropout = getattr(args, 'activation_dropout', 0.1) args.no_decoder_final_norm = getattr(args, 'no_decoder_final_norm', True) args.tie_adaptive_proj = getattr(args, 'tie_adaptive_proj', True) transformer_lm_big(args)
null
185,512
from fairseq import options, utils from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( Embedding, TransformerDecoder, ) from fairseq.modules import ( AdaptiveInput, CharacterTokenEmbedder, ) def transformer_lm_big(args): args.decoder_layers = getattr(args, 'decoder_layers', 12) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1024) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) base_lm_architecture(args) def transformer_lm_baevski_gbw(args): args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.dropout = getattr(args, 'dropout', 0.1) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.no_decoder_final_norm = getattr(args, 'no_decoder_final_norm', True) transformer_lm_big(args)
null
185,513
from fairseq import options, utils from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.transformer import ( Embedding, TransformerDecoder, ) from fairseq.modules import ( AdaptiveInput, CharacterTokenEmbedder, ) def base_lm_architecture(args): def transformer_lm_gpt(args): args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 768) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 3072) args.decoder_layers = getattr(args, 'decoder_layers', 12) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 12) args.dropout = getattr(args, 'dropout', 0.1) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.activation_fn = getattr(args, 'activation_fn', 'gelu') base_lm_architecture(args)
null
185,517
from fairseq import options from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.lightconv import ( Embedding, LightConvDecoder, ) from fairseq.modules import ( AdaptiveInput, CharacterTokenEmbedder, ) def base_lm_architecture(args): args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 2048) args.decoder_layers = getattr(args, 'decoder_layers', 6) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 8) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', None) args.adaptive_softmax_dropout = getattr(args, 'adaptive_softmax_dropout', 0) args.adaptive_softmax_factor = getattr(args, 'adaptive_softmax_factor', 4) args.decoder_learned_pos = getattr(args, 'decoder_learned_pos', False) args.character_embeddings = getattr(args, 'character_embeddings', False) args.decoder_output_dim = getattr(args, 'decoder_output_dim', args.decoder_embed_dim) args.decoder_input_dim = getattr(args, 'decoder_input_dim', args.decoder_embed_dim) args.decoder_conv_dim = getattr(args, 'decoder_conv_dim', args.decoder_embed_dim) # The model training is not stable without this args.decoder_normalize_before = True args.adaptive_input = getattr(args, 'adaptive_input', False) args.adaptive_input_factor = getattr(args, 'adaptive_input_factor', 4) args.adaptive_input_cutoff = getattr(args, 'adaptive_input_cutoff', None) args.tie_adaptive_weights = getattr(args, 'tie_adaptive_weights', False) args.tie_adaptive_proj = getattr(args, 'tie_adaptive_proj', False) args.decoder_kernel_size_list = getattr(args, 'decoder_kernel_size_list', [3, 7, 15, 31, 31, 31]) if len(args.decoder_kernel_size_list) == 1: args.decoder_kernel_size_list = args.decoder_kernel_size_list * args.decoder_layers assert len(args.decoder_kernel_size_list) == args.decoder_layers, "decoder_kernel_size_list doesn't match decoder_layers" args.decoder_glu = getattr(args, 'decoder_glu', True) args.input_dropout = getattr(args, 'input_dropout', 0.1) args.weight_dropout = getattr(args, 'weight_dropout', args.attention_dropout) def lightconv_lm_gbw(args): args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.dropout = getattr(args, 'dropout', 0.1) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.decoder_ffn_embed_dim = getattr(args, 'decoder_ffn_embed_dim', 4096) args.decoder_attention_heads = getattr(args, 'decoder_attention_heads', 16) base_lm_architecture(args)
null
185,518
import sys import math import torch import torch.nn as nn import torch.nn.functional as F from . import ( BaseFairseqModel, register_model, register_model_architecture ) class TransposeLast(nn.Module): def __init__(self, deconstruct_idx=None): super().__init__() self.deconstruct_idx = deconstruct_idx def forward(self, x): if self.deconstruct_idx is not None: x = x[self.deconstruct_idx] return x.transpose(-2, -1) class Fp32GroupNorm(nn.GroupNorm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def forward(self, input): output = F.group_norm( input.float(), self.num_groups, self.weight.float() if self.weight is not None else None, self.bias.float() if self.bias is not None else None, self.eps) return output.type_as(input) class Fp32LayerNorm(nn.LayerNorm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def forward(self, input): output = F.layer_norm( input.float(), self.normalized_shape, self.weight.float() if self.weight is not None else None, self.bias.float() if self.bias is not None else None, self.eps) return output.type_as(input) def norm_block(is_layer_norm, dim, affine=True): if is_layer_norm: mod = nn.Sequential( TransposeLast(), Fp32LayerNorm(dim, elementwise_affine=affine), TransposeLast(), ) else: mod = Fp32GroupNorm(1, dim, affine=affine) return mod
null
185,519
import sys import math import torch import torch.nn as nn import torch.nn.functional as F from . import ( BaseFairseqModel, register_model, register_model_architecture ) def base_wav2vec_architecture(args): conv_feature_layers = '[(512, 10, 5)]' conv_feature_layers += ' + [(512, 8, 4)]' conv_feature_layers += ' + [(512, 4, 2)] * 3' args.conv_feature_layers = getattr(args, 'conv_feature_layers', conv_feature_layers) args.conv_aggregator_layers = getattr(args, 'conv_aggregator_layers', '[(512, 3, 1)] * 9') args.prediction_steps = getattr(args, 'prediction_steps', 12) args.num_negatives = getattr(args, 'num_negatives', 1) args.sample_distance = getattr(args, 'sample_distance', None) args.cross_sample_negatives = getattr(args, 'cross_sample_negatives', False) args.dropout = getattr(args, 'dropout', 0.) args.dropout_features = getattr(args, 'dropout_features', 0.) args.dropout_agg = getattr(args, 'dropout_agg', 0.) args.encoder = getattr(args, 'encoder', 'cnn') args.aggregator = getattr(args, 'aggregator', 'cnn') args.skip_connections_feat = getattr(args, 'skip_connections_feat', False) args.skip_connections_agg = getattr(args, 'skip_connections_agg', False) args.residual_scale = getattr(args, 'residual_scale', 0.5) args.gru_dim = getattr(args, 'gru_dim', 512) args.no_conv_bias = getattr(args, 'no_conv_bias', False) args.agg_zero_pad = getattr(args, 'agg_zero_pad', False) args.log_compression = getattr(args, 'log_compression', False) args.balanced_classes = getattr(args, 'balanced_classes', False) args.project_features = getattr(args, 'project_features', 'none') args.non_affine_group_norm = getattr(args, 'non_affine_group_norm', False) args.offset = getattr(args, 'offset', 'auto')
null
185,520
import numpy as np import torch import torch.nn.functional as F from fairseq.models import register_model, register_model_architecture from fairseq.models.levenshtein_transformer import ( LevenshteinTransformerDecoder, LevenshteinTransformerModel, ) from fairseq.models.transformer import Linear, TransformerModel from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import new_arange neg_scorer = NegativeDistanceScore() def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx, vocab_size, tau=None): try: from fairseq import libnat except ImportError as e: import sys sys.stderr.write('ERROR: missing libnat. run `pip install --editable .`\n') raise e B = in_tokens.size(0) T = in_tokens.size(1) V = vocab_size with torch.cuda.device_of(in_tokens): in_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist()) ] out_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(out_tokens.tolist()) ] full_labels = libnat.suggested_ed2_path( in_tokens_list, out_tokens_list, padding_idx ) insert_labels = [a[:-1] for a in full_labels] # numericalize1 insert_label_tensors = in_tokens.new_zeros(B * (T - 1) * V).float() insert_index, insert_labels = zip( *[ (w + (j + i * (T - 1)) * V, neg_scorer(k, len(label), tau)) for i, labels in enumerate(insert_labels) for j, label in enumerate(labels[1:-1]) for k, w in enumerate(label) ] ) # HACK 1:-1 insert_index, insert_labels = [ torch.tensor(list(a), device=in_tokens.device) for a in [insert_index, insert_labels] ] insert_label_tensors.scatter_(0, insert_index.long(), insert_labels) insert_label_tensors = insert_label_tensors.view(B, T - 1, V) return insert_label_tensors
null
185,521
import numpy as np import torch import torch.nn.functional as F from fairseq.models import register_model, register_model_architecture from fairseq.models.levenshtein_transformer import ( LevenshteinTransformerDecoder, LevenshteinTransformerModel, ) from fairseq.models.transformer import Linear, TransformerModel from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import new_arange def new_arange(x, *size): """ Return a Tensor of `size` filled with a range function on the device of x. If size is empty, using the size of the variable x. """ if len(size) == 0: size = x.size() return torch.arange(size[-1], device=x.device).expand(*size).contiguous() def _apply_ins_words(in_tokens, in_scores, word_ins_pred, word_ins_scores, padding_idx): padding_masks = in_tokens[:, 1:].eq(padding_idx) word_ins_scores.masked_fill_(padding_masks, 0.0) word_ins_pred.masked_fill_(padding_masks, padding_idx) in_coords = new_arange(in_tokens).type_as(in_scores) # shift all padding predictions to infinite out_coords = (in_coords[:, 1:] - 0.5).masked_fill( word_ins_pred.eq(padding_idx), float("inf") ) out_coords = torch.cat([in_coords, out_coords], 1).sort(-1)[1] out_tokens = torch.cat([in_tokens, word_ins_pred], 1).gather(1, out_coords) out_scores = torch.cat([in_scores, word_ins_scores], 1).gather(1, out_coords) return out_tokens, out_scores
null
185,522
import numpy as np import torch import torch.nn.functional as F from fairseq.models import register_model, register_model_architecture from fairseq.models.levenshtein_transformer import ( LevenshteinTransformerDecoder, LevenshteinTransformerModel, ) from fairseq.models.transformer import Linear, TransformerModel from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import new_arange def base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) # special for insertion transformer args.label_tau = getattr(args, "label_tau", None)
null
185,523
import torch import torch.nn.functional as F from fairseq import utils from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( Embedding, TransformerDecoder, TransformerEncoder, TransformerModel, ) from fairseq.modules.transformer_sentence_encoder import init_bert_params def _mean_pooling(enc_feats, src_masks): # enc_feats: T x B x C # src_masks: B x T or None if src_masks is None: enc_feats = enc_feats.mean(0) else: src_masks = (~src_masks).transpose(0, 1).type_as(enc_feats) enc_feats = ( (enc_feats / src_masks.sum(0)[None, :, None]) * src_masks[:, :, None] ).sum(0) return enc_feats
null
185,524
import torch import torch.nn.functional as F from fairseq import utils from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( Embedding, TransformerDecoder, TransformerEncoder, TransformerModel, ) from fairseq.modules.transformer_sentence_encoder import init_bert_params def _argmax(x, dim): return (x == x.max(dim, keepdim=True)[0]).type_as(x)
null
185,525
import torch import torch.nn.functional as F from fairseq import utils from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( Embedding, TransformerDecoder, TransformerEncoder, TransformerModel, ) from fairseq.modules.transformer_sentence_encoder import init_bert_params def _uniform_assignment(src_lens, trg_lens): max_trg_len = trg_lens.max() steps = (src_lens.float() - 1) / (trg_lens.float() - 1) # step-size # max_trg_len index_t = utils.new_arange(trg_lens, max_trg_len).float() index_t = steps[:, None] * index_t[None, :] # batch_size X max_trg_len index_t = torch.round(index_t).long().detach() return index_t
null
185,526
import torch import torch.nn.functional as F from fairseq import utils from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( Embedding, TransformerDecoder, TransformerEncoder, TransformerModel, ) from fairseq.modules.transformer_sentence_encoder import init_bert_params def base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) # --- special arguments --- args.sg_length_pred = getattr(args, "sg_length_pred", False) args.pred_length_offset = getattr(args, "pred_length_offset", False) args.length_loss_factor = getattr(args, "length_loss_factor", 0.1) args.src_embedding_copy = getattr(args, "src_embedding_copy", False) "nonautoregressive_transformer", "nonautoregressive_transformer_wmt_en_de" def nonautoregressive_transformer_wmt_en_de(args): base_architecture(args)
null
185,527
import inspect import torch.nn as nn from fairseq.legacy_distributed_data_parallel import LegacyDistributedDataParallel from fairseq.models import BaseFairseqModel The provided code snippet includes necessary dependencies for implementing the `DistributedFairseqModel` function. Write a Python function `def DistributedFairseqModel(args, model)` to solve the following problem: Wrap a *model* to support distributed data parallel training. This is similar to the built-in DistributedDataParallel, but allows additional configuration of the DistributedDataParallel class to use, and also provides easier access to the wrapped model by forwarding requests for missing attributes to the wrapped model. Args: args (argparse.Namespace): fairseq args model (BaseFairseqModel): model to wrap Here is the function: def DistributedFairseqModel(args, model): """ Wrap a *model* to support distributed data parallel training. This is similar to the built-in DistributedDataParallel, but allows additional configuration of the DistributedDataParallel class to use, and also provides easier access to the wrapped model by forwarding requests for missing attributes to the wrapped model. Args: args (argparse.Namespace): fairseq args model (BaseFairseqModel): model to wrap """ # determine which DDP class to extend assert isinstance(model, nn.Module) if args.ddp_backend == 'c10d': ddp_class = nn.parallel.DistributedDataParallel init_kwargs = dict( module=model, device_ids=[args.device_id], output_device=args.device_id, broadcast_buffers=False, bucket_cap_mb=args.bucket_cap_mb, ) # Maintain backward compatibility if 'check_reduction' in inspect.getargspec(ddp_class)[0]: init_kwargs['check_reduction'] = True if 'find_unused_parameters' in inspect.getargspec(ddp_class)[0]: init_kwargs['find_unused_parameters'] = args.find_unused_parameters elif args.ddp_backend == 'no_c10d': ddp_class = LegacyDistributedDataParallel init_kwargs = dict( module=model, world_size=args.distributed_world_size, buffer_size=2**28, ) else: raise ValueError('Unknown --ddp-backend: ' + args.ddp_backend) class _DistributedFairseqModel(ddp_class): """Extend DistributedDataParallel to check for missing attributes in the wrapped module.""" def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def __getattr__(self, name): wrapped_module = super().__getattr__('module') if hasattr(wrapped_module, name): return getattr(wrapped_module, name) return super().__getattr__(name) return _DistributedFairseqModel(**init_kwargs)
Wrap a *model* to support distributed data parallel training. This is similar to the built-in DistributedDataParallel, but allows additional configuration of the DistributedDataParallel class to use, and also provides easier access to the wrapped model by forwarding requests for missing attributes to the wrapped model. Args: args (argparse.Namespace): fairseq args model (BaseFairseqModel): model to wrap
185,528
import math import os import torch import torch.nn as nn import torch.nn.functional as F from fairseq import checkpoint_utils from fairseq.models import ( CompositeEncoder, FairseqDecoder, FairseqEncoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( DownsampledMultiHeadAttention, GradMultiply, LayerNorm, LearnedPositionalEmbedding, LinearizedConvolution, ) def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) m.weight.data.normal_(0, 0.1) return m
null
185,529
import math import os import torch import torch.nn as nn import torch.nn.functional as F from fairseq import checkpoint_utils from fairseq.models import ( CompositeEncoder, FairseqDecoder, FairseqEncoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( DownsampledMultiHeadAttention, GradMultiply, LayerNorm, LearnedPositionalEmbedding, LinearizedConvolution, ) def PositionalEmbedding(num_embeddings, embedding_dim, padding_idx): m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx) m.weight.data.normal_(0, 0.1) return m
null
185,530
import math import os import torch import torch.nn as nn import torch.nn.functional as F from fairseq import checkpoint_utils from fairseq.models import ( CompositeEncoder, FairseqDecoder, FairseqEncoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( DownsampledMultiHeadAttention, GradMultiply, LayerNorm, LearnedPositionalEmbedding, LinearizedConvolution, ) The provided code snippet includes necessary dependencies for implementing the `Linear` function. Write a Python function `def Linear(in_features, out_features, dropout=0.)` to solve the following problem: Weight-normalized Linear layer (input: N x T x C) Here is the function: def Linear(in_features, out_features, dropout=0.): """Weight-normalized Linear layer (input: N x T x C)""" m = nn.Linear(in_features, out_features) m.weight.data.normal_(mean=0, std=math.sqrt((1 - dropout) / in_features)) m.bias.data.zero_() return m
Weight-normalized Linear layer (input: N x T x C)
185,531
import math import os import torch import torch.nn as nn import torch.nn.functional as F from fairseq import checkpoint_utils from fairseq.models import ( CompositeEncoder, FairseqDecoder, FairseqEncoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( DownsampledMultiHeadAttention, GradMultiply, LayerNorm, LearnedPositionalEmbedding, LinearizedConvolution, ) The provided code snippet includes necessary dependencies for implementing the `LinearizedConv1d` function. Write a Python function `def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0., **kwargs)` to solve the following problem: Weight-normalized Conv1d layer optimized for decoding Here is the function: def LinearizedConv1d(in_channels, out_channels, kernel_size, dropout=0., **kwargs): """Weight-normalized Conv1d layer optimized for decoding""" m = LinearizedConvolution(in_channels, out_channels, kernel_size, **kwargs) std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) m.weight.data.normal_(mean=0, std=std) m.bias.data.zero_() return m
Weight-normalized Conv1d layer optimized for decoding
185,532
import math import os import torch import torch.nn as nn import torch.nn.functional as F from fairseq import checkpoint_utils from fairseq.models import ( CompositeEncoder, FairseqDecoder, FairseqEncoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( DownsampledMultiHeadAttention, GradMultiply, LayerNorm, LearnedPositionalEmbedding, LinearizedConvolution, ) The provided code snippet includes necessary dependencies for implementing the `ConvTBC` function. Write a Python function `def ConvTBC(in_channels, out_channels, kernel_size, dropout=0, **kwargs)` to solve the following problem: Weight-normalized Conv1d layer Here is the function: def ConvTBC(in_channels, out_channels, kernel_size, dropout=0, **kwargs): """Weight-normalized Conv1d layer""" from fairseq.modules import ConvTBC m = ConvTBC(in_channels, out_channels, kernel_size, **kwargs) std = math.sqrt((4 * (1.0 - dropout)) / (m.kernel_size[0] * in_channels)) m.weight.data.normal_(mean=0, std=std) m.bias.data.zero_() return m
Weight-normalized Conv1d layer
185,533
import math import os import torch import torch.nn as nn import torch.nn.functional as F from fairseq import checkpoint_utils from fairseq.models import ( CompositeEncoder, FairseqDecoder, FairseqEncoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( DownsampledMultiHeadAttention, GradMultiply, LayerNorm, LearnedPositionalEmbedding, LinearizedConvolution, ) def base_architecture(args): args.dropout = getattr(args, 'dropout', 0.1) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_layers = getattr(args, 'encoder_layers', '[(512, 3)] * 3') args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_layers = getattr(args, 'decoder_layers', '[(512, 3)] * 8') args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256) args.decoder_attention = getattr(args, 'decoder_attention', 'True') args.self_attention = getattr(args, 'self_attention', 'False') args.encoder_attention = getattr(args, 'encoder_attention', 'False') args.multihead_attention_nheads = getattr(args, 'multihead_attention_nheads', 1) args.multihead_self_attention_nheads = getattr(args, 'multihead_self_attention_nheads', 1) args.encoder_attention_nheads = getattr(args, 'encoder_attention_nheads', 1) args.project_input = getattr(args, 'project_input', 'False') args.gated_attention = getattr(args, 'gated_attention', 'False') args.downsample = getattr(args, 'downsample', 'False') args.pretrained_checkpoint = getattr(args, 'pretrained_checkpoint', '') args.pretrained = getattr(args, 'pretrained', 'False') def fconv_self_att_wp(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256) args.encoder_layers = getattr(args, 'encoder_layers', '[(128, 3)] * 2 + [(512,3)] * 1') args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 256) args.decoder_layers = getattr(args, 'decoder_layers', '[(512, 4)] * 4 + [(768, 4)] * 2 + [(1024, 4)] * 1') args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256) args.self_attention = getattr(args, 'self_attention', 'True') args.multihead_self_attention_nheads = getattr(args, 'multihead_self_attention_nheads', 4) args.project_input = getattr(args, 'project_input', 'True') args.gated_attention = getattr(args, 'gated_attention', 'True') args.downsample = getattr(args, 'downsample', 'True') base_architecture(args)
null
185,534
import torch from fairseq.models import register_model, register_model_architecture from fairseq.models.nonautoregressive_transformer import NATransformerModel def _sequential_poisoning(s, V, beta=0.33, bos=2, eos=3, pad=1): # s: input batch # V: vocabulary size rand_words = torch.randint(low=4, high=V, size=s.size(), device=s.device) choices = torch.rand(size=s.size(), device=s.device) choices.masked_fill_((s == pad) | (s == bos) | (s == eos), 1) replace = choices < beta / 3 repeat = (choices >= beta / 3) & (choices < beta * 2 / 3) swap = (choices >= beta * 2 / 3) & (choices < beta) safe = choices >= beta for i in range(s.size(1) - 1): rand_word = rand_words[:, i] next_word = s[:, i + 1] self_word = s[:, i] replace_i = replace[:, i] swap_i = swap[:, i] & (next_word != 3) repeat_i = repeat[:, i] & (next_word != 3) safe_i = safe[:, i] | ((next_word == 3) & (~replace_i)) s[:, i] = ( self_word * (safe_i | repeat_i).long() + next_word * swap_i.long() + rand_word * replace_i.long() ) s[:, i + 1] = ( next_word * (safe_i | replace_i).long() + self_word * (swap_i | repeat_i).long() ) return s
null
185,535
import torch from fairseq.models import register_model, register_model_architecture from fairseq.models.nonautoregressive_transformer import NATransformerModel def gumbel_noise(input, TINY=1e-8): return input.new_zeros(*input.size()).uniform_().add_( TINY).log_().neg_().add_(TINY).log_().neg_()
null
185,536
import torch from fairseq.models import register_model, register_model_architecture from fairseq.models.nonautoregressive_transformer import NATransformerModel def base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) # --- special arguments --- args.sg_length_pred = getattr(args, "sg_length_pred", False) args.pred_length_offset = getattr(args, "pred_length_offset", False) args.length_loss_factor = getattr(args, "length_loss_factor", 0.1) args.ngram_predictor = getattr(args, "ngram_predictor", 1) args.src_embedding_copy = getattr(args, "src_embedding_copy", False) args.train_step = getattr(args, "train_step", 4) args.dae_ratio = getattr(args, "dae_ratio", 0.5) args.stochastic_approx = getattr(args, "stochastic_approx", False) "iterative_nonautoregressive_transformer", "iterative_nonautoregressive_transformer_wmt_en_de", def iter_nat_wmt_en_de(args): base_architecture(args)
null
185,541
from fairseq import options from fairseq.models import ( FairseqLanguageModel, register_model, register_model_architecture, ) from fairseq.models.fconv import FConvDecoder def base_lm_architecture(args): def fconv_lm_dauphin_wikitext103(args): layers = '[(850, 6)] * 3' layers += ' + [(850, 1)] * 1' layers += ' + [(850, 5)] * 4' layers += ' + [(850, 1)] * 1' layers += ' + [(850, 4)] * 3' layers += ' + [(1024, 4)] * 1' layers += ' + [(2048, 4)] * 1' args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 280) args.decoder_layers = getattr(args, 'decoder_layers', layers) args.decoder_attention = getattr(args, 'decoder_attention', 'False') args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', '10000,20000,200000') base_lm_architecture(args)
null
185,543
import torch import torch.nn as nn import torch.nn.functional as F from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( Embedding, TransformerDecoder, TransformerEncoder, TransformerModel, TransformerDecoderLayer ) from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import new_arange The provided code snippet includes necessary dependencies for implementing the `_skip` function. Write a Python function `def _skip(x, mask)` to solve the following problem: Getting sliced (dim=0) tensor by mask. Supporting tensor and list/dict of tensors. Here is the function: def _skip(x, mask): """ Getting sliced (dim=0) tensor by mask. Supporting tensor and list/dict of tensors. """ if isinstance(x, int): return x if x is None: return None if isinstance(x, torch.Tensor): if x.size(0) == mask.size(0): return x[mask] elif x.size(1) == mask.size(0): return x[:, mask] if isinstance(x, list): return [_skip(x_i, mask) for x_i in x] if isinstance(x, dict): return {k: _skip(v, mask) for k, v in x.items()} raise NotImplementedError
Getting sliced (dim=0) tensor by mask. Supporting tensor and list/dict of tensors.
185,544
import torch import torch.nn as nn import torch.nn.functional as F from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( Embedding, TransformerDecoder, TransformerEncoder, TransformerModel, TransformerDecoderLayer ) from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import new_arange def _skip_encoder_out(encoder, encoder_out, mask): if not mask.any(): return encoder_out else: return encoder.reorder_encoder_out(encoder_out, mask.nonzero().squeeze())
null
185,545
import torch import torch.nn as nn import torch.nn.functional as F from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( Embedding, TransformerDecoder, TransformerEncoder, TransformerModel, TransformerDecoderLayer ) from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import new_arange The provided code snippet includes necessary dependencies for implementing the `_fill` function. Write a Python function `def _fill(x, mask, y, padding_idx)` to solve the following problem: Filling tensor x with y at masked positions (dim=0). Here is the function: def _fill(x, mask, y, padding_idx): """ Filling tensor x with y at masked positions (dim=0). """ if x is None: return y assert x.dim() == y.dim() and mask.size(0) == x.size(0) assert x.dim() == 2 or (x.dim() == 3 and x.size(2) == y.size(2)) n_selected = mask.sum() assert n_selected == y.size(0) if n_selected == x.size(0): return y if x.size(1) < y.size(1): dims = [x.size(0), y.size(1) - x.size(1)] if x.dim() == 3: dims.append(x.size(2)) x = torch.cat([x, x.new_zeros(*dims).fill_(padding_idx)], 1) x[mask] = y elif x.size(1) > y.size(1): x[mask] = padding_idx if x.dim() == 2: x[mask, :y.size(1)] = y else: x[mask, :y.size(1), :] = y else: x[mask] = y return x
Filling tensor x with y at masked positions (dim=0).
185,546
import torch import torch.nn as nn import torch.nn.functional as F from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( Embedding, TransformerDecoder, TransformerEncoder, TransformerModel, TransformerDecoderLayer ) from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import new_arange def load_libnat(): try: from fairseq import libnat except ImportError as e: import sys sys.stderr.write("ERROR: missing libnat. run `pip install --editable .`\n") raise e return libnat def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx): libnat = load_libnat() in_seq_len, out_seq_len = in_tokens.size(1), out_tokens.size(1) in_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist()) ] out_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(out_tokens.tolist()) ] full_labels = libnat.suggested_ed2_path( in_tokens_list, out_tokens_list, padding_idx ) mask_inputs = [ [len(c) if c[0] != padding_idx else 0 for c in a[:-1]] for a in full_labels ] # generate labels masked_tgt_masks = [] for mask_input in mask_inputs: mask_label = [] for beam_size in mask_input[1:-1]: # HACK 1:-1 mask_label += [0] + [1 for _ in range(beam_size)] masked_tgt_masks.append( mask_label + [0 for _ in range(out_seq_len - len(mask_label))] ) mask_ins_targets = [ mask_input[1:-1] + [0 for _ in range(in_seq_len - 1 - len(mask_input[1:-1]))] for mask_input in mask_inputs ] # transform to tensor masked_tgt_masks = torch.tensor( masked_tgt_masks, device=out_tokens.device ).bool() mask_ins_targets = torch.tensor(mask_ins_targets, device=in_tokens.device) masked_tgt_tokens = out_tokens.masked_fill(masked_tgt_masks, unk_idx) return masked_tgt_masks, masked_tgt_tokens, mask_ins_targets
null
185,547
import torch import torch.nn as nn import torch.nn.functional as F from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( Embedding, TransformerDecoder, TransformerEncoder, TransformerModel, TransformerDecoderLayer ) from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import new_arange def load_libnat(): try: from fairseq import libnat except ImportError as e: import sys sys.stderr.write("ERROR: missing libnat. run `pip install --editable .`\n") raise e return libnat def _get_del_targets(in_tokens, out_tokens, padding_idx): libnat = load_libnat() out_seq_len = out_tokens.size(1) with torch.cuda.device_of(in_tokens): in_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist()) ] out_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(out_tokens.tolist()) ] full_labels = libnat.suggested_ed2_path( in_tokens_list, out_tokens_list, padding_idx ) word_del_targets = [b[-1] for b in full_labels] word_del_targets = [ labels + [0 for _ in range(out_seq_len - len(labels))] for labels in word_del_targets ] # transform to tensor word_del_targets = torch.tensor(word_del_targets, device=out_tokens.device) return word_del_targets
null
185,548
import torch import torch.nn as nn import torch.nn.functional as F from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( Embedding, TransformerDecoder, TransformerEncoder, TransformerModel, TransformerDecoderLayer ) from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import new_arange def load_libnat(): try: from fairseq import libnat except ImportError as e: import sys sys.stderr.write("ERROR: missing libnat. run `pip install --editable .`\n") raise e return libnat def _get_del_ins_targets(in_tokens, out_tokens, padding_idx): libnat = load_libnat() in_seq_len, out_seq_len = in_tokens.size(1), out_tokens.size(1) with torch.cuda.device_of(in_tokens): in_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist()) ] out_tokens_list = [ [t for t in s if t != padding_idx] for i, s in enumerate(out_tokens.tolist()) ] full_labels = libnat.suggested_ed2_path( in_tokens_list, out_tokens_list, padding_idx ) word_del_targets = [b[-1] for b in full_labels] word_del_targets = [ labels + [0 for _ in range(out_seq_len - len(labels))] for labels in word_del_targets ] mask_inputs = [ [len(c) if c[0] != padding_idx else 0 for c in a[:-1]] for a in full_labels ] mask_ins_targets = [ mask_input[1:-1] + [0 for _ in range(in_seq_len - 1 - len(mask_input[1:-1]))] for mask_input in mask_inputs ] # transform to tensor mask_ins_targets = torch.tensor(mask_ins_targets, device=in_tokens.device) word_del_targets = torch.tensor(word_del_targets, device=out_tokens.device) return word_del_targets, mask_ins_targets
null
185,549
import torch import torch.nn as nn import torch.nn.functional as F from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( Embedding, TransformerDecoder, TransformerEncoder, TransformerModel, TransformerDecoderLayer ) from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import new_arange def new_arange(x, *size): """ Return a Tensor of `size` filled with a range function on the device of x. If size is empty, using the size of the variable x. """ if len(size) == 0: size = x.size() return torch.arange(size[-1], device=x.device).expand(*size).contiguous() def _apply_ins_masks( in_tokens, in_scores, mask_ins_pred, padding_idx, unk_idx, eos_idx ): in_masks = in_tokens.ne(padding_idx) in_lengths = in_masks.sum(1) # HACK: hacky way to shift all the paddings to eos first. in_tokens.masked_fill_(~in_masks, eos_idx) mask_ins_pred.masked_fill_(~in_masks[:, 1:], 0) out_lengths = in_lengths + mask_ins_pred.sum(1) out_max_len = out_lengths.max() out_masks = ( new_arange(out_lengths, out_max_len)[None, :] < out_lengths[:, None] ) reordering = (mask_ins_pred + in_masks[:, 1:].long()).cumsum(1) out_tokens = ( in_tokens.new_zeros(in_tokens.size(0), out_max_len) .fill_(padding_idx) .masked_fill_(out_masks, unk_idx) ) out_tokens[:, 0] = in_tokens[:, 0] out_tokens.scatter_(1, reordering, in_tokens[:, 1:]) out_scores = None if in_scores is not None: in_scores.masked_fill_(~in_masks, 0) out_scores = in_scores.new_zeros(*out_tokens.size()) out_scores[:, 0] = in_scores[:, 0] out_scores.scatter_(1, reordering, in_scores[:, 1:]) return out_tokens, out_scores
null
185,550
import torch import torch.nn as nn import torch.nn.functional as F from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( Embedding, TransformerDecoder, TransformerEncoder, TransformerModel, TransformerDecoderLayer ) from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import new_arange def _apply_ins_words( in_tokens, in_scores, word_ins_pred, word_ins_scores, unk_idx ): word_ins_masks = in_tokens.eq(unk_idx) out_tokens = in_tokens.masked_scatter(word_ins_masks, word_ins_pred[word_ins_masks]) if in_scores is not None: out_scores = in_scores.masked_scatter( word_ins_masks, word_ins_scores[word_ins_masks] ) else: out_scores = None return out_tokens, out_scores
null
185,551
import torch import torch.nn as nn import torch.nn.functional as F from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( Embedding, TransformerDecoder, TransformerEncoder, TransformerModel, TransformerDecoderLayer ) from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import new_arange def new_arange(x, *size): """ Return a Tensor of `size` filled with a range function on the device of x. If size is empty, using the size of the variable x. """ if len(size) == 0: size = x.size() return torch.arange(size[-1], device=x.device).expand(*size).contiguous() def _apply_del_words( in_tokens, in_scores, in_attn, word_del_pred, padding_idx, bos_idx, eos_idx ): # apply deletion to a tensor in_masks = in_tokens.ne(padding_idx) bos_eos_masks = in_tokens.eq(bos_idx) | in_tokens.eq(eos_idx) max_len = in_tokens.size(1) word_del_pred.masked_fill_(~in_masks, 1) word_del_pred.masked_fill_(bos_eos_masks, 0) reordering = ( new_arange(in_tokens) .masked_fill_(word_del_pred, max_len) .sort(1)[1] ) out_tokens = in_tokens.masked_fill(word_del_pred, padding_idx).gather(1, reordering) out_scores = None if in_scores is not None: out_scores = in_scores.masked_fill(word_del_pred, 0).gather(1, reordering) out_attn = None if in_attn is not None: _mask = word_del_pred[:, :, None].expand_as(in_attn) _reordering = reordering[:, :, None].expand_as(in_attn) out_attn = in_attn.masked_fill(_mask, 0.).gather(1, _reordering) return out_tokens, out_scores, out_attn
null
185,552
import torch import torch.nn as nn import torch.nn.functional as F from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( Embedding, TransformerDecoder, TransformerEncoder, TransformerModel, TransformerDecoderLayer ) from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import new_arange def base_architecture(args): args.encoder_embed_path = getattr(args, "encoder_embed_path", None) args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048) args.encoder_layers = getattr(args, "encoder_layers", 6) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False) args.decoder_embed_path = getattr(args, "decoder_embed_path", None) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim) args.decoder_ffn_embed_dim = getattr( args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim ) args.decoder_layers = getattr(args, "decoder_layers", 6) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False) args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False) args.attention_dropout = getattr(args, "attention_dropout", 0.0) args.activation_dropout = getattr(args, "activation_dropout", 0.0) args.activation_fn = getattr(args, "activation_fn", "relu") args.dropout = getattr(args, "dropout", 0.1) args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None) args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0) args.share_decoder_input_output_embed = getattr( args, "share_decoder_input_output_embed", False ) args.share_all_embeddings = getattr(args, "share_all_embeddings", False) args.no_token_positional_embeddings = getattr( args, "no_token_positional_embeddings", False ) args.adaptive_input = getattr(args, "adaptive_input", False) args.apply_bert_init = getattr(args, "apply_bert_init", False) args.decoder_output_dim = getattr( args, "decoder_output_dim", args.decoder_embed_dim ) args.sampling_for_deletion = getattr(args, "sampling_for_deletion", False) args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim) args.early_exit = getattr(args, "early_exit", "6,6,6") args.no_share_discriminator = getattr(args, "no_share_discriminator", False) args.no_share_maskpredictor = getattr(args, "no_share_maskpredictor", False) args.share_discriminator_maskpredictor = getattr(args, "share_discriminator_maskpredictor", False) args.no_share_last_layer = getattr(args, "no_share_last_layer", False) "levenshtein_transformer", "levenshtein_transformer_wmt_en_de" def levenshtein_transformer_wmt_en_de(args): base_architecture(args)
null
185,553
import torch import torch.nn as nn import torch.nn.functional as F from fairseq.iterative_refinement_generator import DecoderOut from fairseq.models import register_model, register_model_architecture from fairseq.models.transformer import ( Embedding, TransformerDecoder, TransformerEncoder, TransformerModel, TransformerDecoderLayer ) from fairseq.modules.transformer_sentence_encoder import init_bert_params from fairseq.utils import new_arange def levenshtein_transformer_vaswani_wmt_en_de_big(args): args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024) args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096) args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16) args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False) args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1024) args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 4096) args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16) args.dropout = getattr(args, "dropout", 0.3) base_architecture(args) "levenshtein_transformer", "levenshtein_transformer_wmt_en_de_big" def levenshtein_transformer_wmt_en_de_big_t2t(args): args.encoder_normalize_before = getattr(args, "encoder_normalize_before", True) args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True) args.attention_dropout = getattr(args, "attention_dropout", 0.1) args.activation_dropout = getattr(args, "activation_dropout", 0.1) levenshtein_transformer_vaswani_wmt_en_de_big(args)
null
185,561
import math import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( AdaptiveSoftmax, BeamableMM, GradMultiply, LearnedPositionalEmbedding, LinearizedConvolution, ) def base_architecture(args): args.dropout = getattr(args, 'dropout', 0.1) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_layers = getattr(args, 'encoder_layers', '[(512, 3)] * 20') args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_layers = getattr(args, 'decoder_layers', '[(512, 3)] * 20') args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256) args.decoder_attention = getattr(args, 'decoder_attention', 'True') args.share_input_output_embed = getattr(args, 'share_input_output_embed', False) def fconv_wmt_en_ro(args): args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512) base_architecture(args)
null
185,562
import math import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import ( AdaptiveSoftmax, BeamableMM, GradMultiply, LearnedPositionalEmbedding, LinearizedConvolution, ) def base_architecture(args): def fconv_wmt_en_de(args): convs = '[(512, 3)] * 9' # first 9 layers have 512 units convs += ' + [(1024, 3)] * 4' # next 4 layers have 1024 units convs += ' + [(2048, 1)] * 2' # final 2 layers use 1x1 convolutions args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 768) args.encoder_layers = getattr(args, 'encoder_layers', convs) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 768) args.decoder_layers = getattr(args, 'decoder_layers', convs) args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512) base_architecture(args)
null
185,564
import torch import torch.nn as nn import torch.nn.functional as F from fairseq import options, utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import AdaptiveSoftmax def Embedding(num_embeddings, embedding_dim, padding_idx): m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx) nn.init.uniform_(m.weight, -0.1, 0.1) nn.init.constant_(m.weight[padding_idx], 0) return m
null
185,565
import torch import torch.nn as nn import torch.nn.functional as F from fairseq import options, utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import AdaptiveSoftmax def LSTM(input_size, hidden_size, **kwargs): m = nn.LSTM(input_size, hidden_size, **kwargs) for name, param in m.named_parameters(): if 'weight' in name or 'bias' in name: param.data.uniform_(-0.1, 0.1) return m
null
185,566
import torch import torch.nn as nn import torch.nn.functional as F from fairseq import options, utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import AdaptiveSoftmax def LSTMCell(input_size, hidden_size, **kwargs): m = nn.LSTMCell(input_size, hidden_size, **kwargs) for name, param in m.named_parameters(): if 'weight' in name or 'bias' in name: param.data.uniform_(-0.1, 0.1) return m
null
185,567
import torch import torch.nn as nn import torch.nn.functional as F from fairseq import options, utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import AdaptiveSoftmax The provided code snippet includes necessary dependencies for implementing the `Linear` function. Write a Python function `def Linear(in_features, out_features, bias=True, dropout=0)` to solve the following problem: Linear layer (input: N x T x C) Here is the function: def Linear(in_features, out_features, bias=True, dropout=0): """Linear layer (input: N x T x C)""" m = nn.Linear(in_features, out_features, bias=bias) m.weight.data.uniform_(-0.1, 0.1) if bias: m.bias.data.uniform_(-0.1, 0.1) return m
Linear layer (input: N x T x C)
185,568
import torch import torch.nn as nn import torch.nn.functional as F from fairseq import options, utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import AdaptiveSoftmax def base_architecture(args): args.dropout = getattr(args, 'dropout', 0.1) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 512) args.encoder_embed_path = getattr(args, 'encoder_embed_path', None) args.encoder_freeze_embed = getattr(args, 'encoder_freeze_embed', False) args.encoder_hidden_size = getattr(args, 'encoder_hidden_size', args.encoder_embed_dim) args.encoder_layers = getattr(args, 'encoder_layers', 1) args.encoder_bidirectional = getattr(args, 'encoder_bidirectional', False) args.encoder_dropout_in = getattr(args, 'encoder_dropout_in', args.dropout) args.encoder_dropout_out = getattr(args, 'encoder_dropout_out', args.dropout) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 512) args.decoder_embed_path = getattr(args, 'decoder_embed_path', None) args.decoder_freeze_embed = getattr(args, 'decoder_freeze_embed', False) args.decoder_hidden_size = getattr(args, 'decoder_hidden_size', args.decoder_embed_dim) args.decoder_layers = getattr(args, 'decoder_layers', 1) args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 512) args.decoder_attention = getattr(args, 'decoder_attention', '1') args.decoder_dropout_in = getattr(args, 'decoder_dropout_in', args.dropout) args.decoder_dropout_out = getattr(args, 'decoder_dropout_out', args.dropout) args.share_decoder_input_output_embed = getattr(args, 'share_decoder_input_output_embed', False) args.share_all_embeddings = getattr(args, 'share_all_embeddings', False) args.adaptive_softmax_cutoff = getattr(args, 'adaptive_softmax_cutoff', '10000,50000,200000') def lstm_wiseman_iwslt_de_en(args): args.dropout = getattr(args, 'dropout', 0.1) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 256) args.encoder_dropout_in = getattr(args, 'encoder_dropout_in', 0) args.encoder_dropout_out = getattr(args, 'encoder_dropout_out', 0) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 256) args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 256) args.decoder_dropout_in = getattr(args, 'decoder_dropout_in', 0) args.decoder_dropout_out = getattr(args, 'decoder_dropout_out', args.dropout) base_architecture(args)
null
185,569
import torch import torch.nn as nn import torch.nn.functional as F from fairseq import options, utils from fairseq.models import ( FairseqEncoder, FairseqIncrementalDecoder, FairseqEncoderDecoderModel, register_model, register_model_architecture, ) from fairseq.modules import AdaptiveSoftmax def base_architecture(args): def lstm_luong_wmt_en_de(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1000) args.encoder_layers = getattr(args, 'encoder_layers', 4) args.encoder_dropout_out = getattr(args, 'encoder_dropout_out', 0) args.decoder_embed_dim = getattr(args, 'decoder_embed_dim', 1000) args.decoder_layers = getattr(args, 'decoder_layers', 4) args.decoder_out_embed_dim = getattr(args, 'decoder_out_embed_dim', 1000) args.decoder_dropout_out = getattr(args, 'decoder_dropout_out', 0) base_architecture(args)
null
185,570
import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( BaseFairseqModel, FairseqEncoder, register_model, register_model_architecture, ) from fairseq.modules import ( LayerNorm, SinusoidalPositionalEmbedding, TransformerSentenceEncoder, ) from fairseq.modules.transformer_sentence_encoder import init_bert_params def bert_base_architecture(args): def bert_large_architecture(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.encoder_layers = getattr(args, 'encoder_layers', 24) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 16) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) bert_base_architecture(args)
null
185,571
import torch import torch.nn as nn import torch.nn.functional as F from fairseq import utils from fairseq.models import ( BaseFairseqModel, FairseqEncoder, register_model, register_model_architecture, ) from fairseq.modules import ( LayerNorm, SinusoidalPositionalEmbedding, TransformerSentenceEncoder, ) from fairseq.modules.transformer_sentence_encoder import init_bert_params def base_architecture(args): args.dropout = getattr(args, 'dropout', 0.1) args.attention_dropout = getattr(args, 'attention_dropout', 0.1) args.act_dropout = getattr(args, 'act_dropout', 0.0) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.bias_kv = getattr(args, 'bias_kv', False) args.zero_attn = getattr(args, 'zero_attn', False) args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.share_encoder_input_output_embed = getattr(args, 'share_encoder_input_output_embed', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', False) args.no_token_positional_embeddings = getattr(args, 'no_token_positional_embeddings', False) args.num_segment = getattr(args, 'num_segment', 2) args.sentence_class_num = getattr(args, 'sentence_class_num', 2) args.sent_loss = getattr(args, 'sent_loss', False) args.apply_bert_init = getattr(args, 'apply_bert_init', False) args.activation_fn = getattr(args, 'activation_fn', 'relu') args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh') args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) def xlm_architecture(args): args.encoder_embed_dim = getattr(args, 'encoder_embed_dim', 1024) args.share_encoder_input_output_embed = getattr( args, 'share_encoder_input_output_embed', True) args.no_token_positional_embeddings = getattr( args, 'no_token_positional_embeddings', False) args.encoder_learned_pos = getattr(args, 'encoder_learned_pos', True) args.num_segment = getattr(args, 'num_segment', 1) args.encoder_layers = getattr(args, 'encoder_layers', 6) args.encoder_attention_heads = getattr(args, 'encoder_attention_heads', 8) args.encoder_ffn_embed_dim = getattr(args, 'encoder_ffn_embed_dim', 4096) args.bias_kv = getattr(args, 'bias_kv', False) args.zero_attn = getattr(args, 'zero_attn', False) args.sent_loss = getattr(args, 'sent_loss', False) args.activation_fn = getattr(args, 'activation_fn', 'gelu') args.encoder_normalize_before = getattr(args, 'encoder_normalize_before', False) args.pooler_activation_fn = getattr(args, 'pooler_activation_fn', 'tanh') args.apply_bert_init = getattr(args, 'apply_bert_init', True) base_architecture(args)
null
185,572
import argparse import os import sys from fairseq import bleu from fairseq.data import dictionary def get_parser(): parser = argparse.ArgumentParser(description='Command-line script for BLEU scoring.') # fmt: off parser.add_argument('-s', '--sys', default='-', help='system output') parser.add_argument('-r', '--ref', required=True, help='references') parser.add_argument('-o', '--order', default=4, metavar='N', type=int, help='consider ngrams up to this order') parser.add_argument('--ignore-case', action='store_true', help='case-insensitive scoring') parser.add_argument('--sacrebleu', action='store_true', help='score with sacrebleu') parser.add_argument('--sentence-bleu', action='store_true', help='report sentence-level BLEUs (i.e., with +1 smoothing)') # fmt: on return parser
null
185,573
import numpy as np import torch from fairseq import checkpoint_utils, options, progress_bar, tasks, utils from fairseq.data import LMContextWindowDataset from fairseq.meters import StopwatchMeter, TimeMeter from fairseq.sequence_scorer import SequenceScorer def main(parsed_args): def cli_main(): parser = options.get_eval_lm_parser() args = options.parse_args_and_arch(parser) main(args)
null
185,574
import torch from fairseq import bleu, checkpoint_utils, options, progress_bar, tasks, utils from fairseq.meters import StopwatchMeter, TimeMeter def main(args): def cli_main(): parser = options.get_generation_parser() args = options.parse_args_and_arch(parser) main(args)
null
185,583
import collections import math import random import numpy as np import torch from fairseq import checkpoint_utils, distributed_utils, options, progress_bar, tasks, utils from fairseq.data import iterators from fairseq.trainer import Trainer from fairseq.meters import AverageMeter, StopwatchMeter def main(args, init_distributed=False): utils.import_user_module(args) assert args.max_tokens is not None or args.max_sentences is not None, \ 'Must specify batch size either with --max-tokens or --max-sentences' # Initialize CUDA and distributed training if torch.cuda.is_available() and not args.cpu: torch.cuda.set_device(args.device_id) np.random.seed(args.seed) torch.manual_seed(args.seed) if init_distributed: args.distributed_rank = distributed_utils.distributed_init(args) if distributed_utils.is_master(args): checkpoint_utils.verify_checkpoint_directory(args.save_dir) # Print args print(args) # Setup task, e.g., translation, language modeling, etc. task = tasks.setup_task(args) # Load valid dataset (we load training data below, based on the latest checkpoint) for valid_sub_split in args.valid_subset.split(','): task.load_dataset(valid_sub_split, combine=False, epoch=0) # Build model and criterion model = task.build_model(args) criterion = task.build_criterion(args) print(model) print('| model {}, criterion {}'.format(args.arch, criterion.__class__.__name__)) print('| num. model params: {} (num. trained: {})'.format( sum(p.numel() for p in model.parameters()), sum(p.numel() for p in model.parameters() if p.requires_grad), )) # Build trainer trainer = Trainer(args, task, model, criterion) print('| training on {} GPUs'.format(args.distributed_world_size)) print('| max tokens per GPU = {} and max sentences per GPU = {}'.format( args.max_tokens, args.max_sentences, )) # Load the latest checkpoint if one is available and restore the # corresponding train iterator extra_state, epoch_itr = checkpoint_utils.load_checkpoint(args, trainer) # Prepare train task.prepare_train(model, criterion) # Train until the learning rate gets too small max_epoch = args.max_epoch or math.inf max_update = args.max_update or math.inf lr = trainer.get_lr() train_meter = StopwatchMeter() train_meter.start() valid_subsets = args.valid_subset.split(',') while ( lr > args.min_lr and (epoch_itr.epoch < max_epoch or (epoch_itr.epoch == max_epoch and epoch_itr._next_epoch_itr is not None)) and trainer.get_num_updates() < max_update ): # train for one epoch train(args, trainer, task, epoch_itr) if not args.disable_validation and epoch_itr.epoch % args.validate_interval == 0: valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets) else: valid_losses = [None] # only use first validation loss to update the learning rate lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0]) # save checkpoint if epoch_itr.epoch % args.save_interval == 0: checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0]) reload_dataset = ':' in getattr(args, 'data', '') reload_dataset = reload_dataset or args.reload_dataset_per_epoch # sharded data: get train iterator for next epoch epoch_itr = trainer.get_train_iterator(epoch_itr.epoch, load_dataset=reload_dataset) train_meter.stop() print('| done training in {:.1f} seconds'.format(train_meter.sum)) def distributed_main(i, args, start_rank=0): args.device_id = i if args.distributed_rank is None: # torch.multiprocessing.spawn args.distributed_rank = start_rank + i main(args, init_distributed=True) def cli_main(): parser = options.get_training_parser() args = options.parse_args_and_arch(parser) if args.distributed_init_method is None: distributed_utils.infer_init_method(args) if args.distributed_init_method is not None: # distributed training if torch.cuda.device_count() > 1 and not args.distributed_no_spawn: start_rank = args.distributed_rank args.distributed_rank = None # assign automatically torch.multiprocessing.spawn( fn=distributed_main, args=(args, start_rank), nprocs=torch.cuda.device_count(), ) else: distributed_main(args.device_id, args) elif args.distributed_world_size > 1: # fallback for single node with multiple GPUs assert args.distributed_world_size <= torch.cuda.device_count() port = random.randint(10000, 20000) args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port) args.distributed_rank = None # set based on device id if max(args.update_freq) > 1 and args.ddp_backend != 'no_c10d': print('| NOTE: you may get better performance with: --ddp-backend=no_c10d') torch.multiprocessing.spawn( fn=distributed_main, args=(args, ), nprocs=args.distributed_world_size, ) else: # single GPU training main(args)
null
185,588
from collections import Counter from itertools import zip_longest from fairseq import options, tasks, utils from fairseq.data import indexed_dataset from fairseq.binarizer import Binarizer from multiprocessing import Pool import os import shutil def dataset_dest_file(args, output_prefix, lang, extension): class Binarizer: def binarize( filename, dict, consumer, tokenize=tokenize_line, append_eos=True, reverse_order=False, offset=0, end=-1, already_numberized=False, ) -> Dict[str, int]: def replaced_consumer(word, idx): def binarize_alignments( filename, alignment_parser, consumer, offset=0, end=-1 ) -> Dict[str, int]: def binarize_alignments(args, filename, parse_alignment, output_prefix, offset, end): ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, None, "bin"), impl=args.dataset_impl, vocab_size=None) def consumer(tensor): ds.add_item(tensor) res = Binarizer.binarize_alignments(filename, parse_alignment, consumer, offset=offset, end=end) ds.finalize(dataset_dest_file(args, output_prefix, None, "idx")) return res
null
185,589
from collections import Counter from itertools import zip_longest from fairseq import options, tasks, utils from fairseq.data import indexed_dataset from fairseq.binarizer import Binarizer from multiprocessing import Pool import os import shutil class Binarizer: def binarize( filename, dict, consumer, tokenize=tokenize_line, append_eos=True, reverse_order=False, offset=0, end=-1, already_numberized=False, ) -> Dict[str, int]: nseq, ntok = 0, 0 replaced = Counter() def replaced_consumer(word, idx): if idx == dict.unk_index and word != dict.unk_word: replaced.update([word]) with Chunker( PathManager.get_local_path(filename), offset, end ) as line_iterator: for line in line_iterator: if already_numberized: id_strings = line.strip().split() id_list = [int(id_string) for id_string in id_strings] if reverse_order: id_list.reverse() if append_eos: id_list.append(dict.eos()) ids = torch.IntTensor(id_list) else: ids = dict.encode_line( line=line, line_tokenizer=tokenize, add_if_not_exist=False, consumer=replaced_consumer, append_eos=append_eos, reverse_order=reverse_order, ) nseq += 1 ntok += len(ids) consumer(ids) return { "nseq": nseq, "nunk": sum(replaced.values()), "ntok": ntok, "replaced": replaced, } def binarize_alignments( filename, alignment_parser, consumer, offset=0, end=-1 ) -> Dict[str, int]: nseq = 0 with Chunker( PathManager.get_local_path(filename), offset, end ) as line_iterator: for line in line_iterator: ids = alignment_parser(line) nseq += 1 consumer(ids) return {"nseq": nseq} def get_offsets(input_file, num_workers): return Binarizer.find_offsets(input_file, num_workers)
null
185,592
import numpy as np import torch from fairseq import checkpoint_utils, options, progress_bar, tasks, utils from fairseq.data import LMContextWindowDataset from fairseq.meters import StopwatchMeter, TimeMeter from fairseq.sequence_scorer import SequenceScorer def main(parsed_args): assert parsed_args.path is not None, '--path required for evaluation!' utils.import_user_module(parsed_args) print(parsed_args) use_cuda = torch.cuda.is_available() and not parsed_args.cpu task = tasks.setup_task(parsed_args) # Load ensemble print('| loading model(s) from {}'.format(parsed_args.path)) models, args = checkpoint_utils.load_model_ensemble( parsed_args.path.split(':'), arg_overrides=eval(parsed_args.model_overrides), task=task, ) for arg in vars(parsed_args).keys(): if arg not in { 'self_target', 'future_target', 'past_target', 'tokens_per_sample', 'output_size_dictionary', 'add_bos_token', }: setattr(args, arg, getattr(parsed_args, arg)) # reduce tokens per sample by the required context window size args.tokens_per_sample -= args.context_window task = tasks.setup_task(args) # Load dataset splits task.load_dataset(args.gen_subset) dataset = task.dataset(args.gen_subset) if args.context_window > 0: dataset = LMContextWindowDataset( dataset=dataset, tokens_per_sample=args.tokens_per_sample, context_window=args.context_window, pad_idx=task.source_dictionary.pad(), ) print('| {} {} {} examples'.format(args.data, args.gen_subset, len(dataset))) # Optimize ensemble for generation and set the source and dest dicts on the model (required by scorer) for model in models: model.make_generation_fast_() if args.fp16: model.half() if use_cuda: model.cuda() assert len(models) > 0 print('num. model params: {}'.format(sum(p.numel() for p in models[0].parameters()))) itr = task.get_batch_iterator( dataset=dataset, max_tokens=args.max_tokens or 36000, max_sentences=args.max_sentences, max_positions=utils.resolve_max_positions(*[ model.max_positions() for model in models ]), ignore_invalid_inputs=True, num_shards=args.num_shards, shard_id=args.shard_id, num_workers=args.num_workers, ).next_epoch_itr(shuffle=False) gen_timer = StopwatchMeter() scorer = SequenceScorer(task.target_dictionary, args.softmax_batch) score_sum = 0. count = 0 if args.remove_bpe is not None: if args.remove_bpe == 'sentencepiece': raise NotImplementedError else: bpe_cont = args.remove_bpe.rstrip() bpe_toks = set( i for i in range(len(task.source_dictionary)) if task.source_dictionary[i].endswith(bpe_cont) ) bpe_len = len(bpe_cont) else: bpe_toks = None bpe_len = 0 word_stats = dict() with progress_bar.build_progress_bar(args, itr) as t: wps_meter = TimeMeter() for sample in t: if 'net_input' not in sample: continue sample = utils.move_to_cuda(sample) if use_cuda else sample gen_timer.start() hypos = scorer.generate(models, sample) gen_timer.stop(sample['ntokens']) for i, hypos_i in enumerate(hypos): hypo = hypos_i[0] sample_id = sample['id'][i] tokens = hypo['tokens'] tgt_len = tokens.numel() pos_scores = hypo['positional_scores'].float() if args.add_bos_token: assert hypo['tokens'][0].item() == task.target_dictionary.bos() tokens = tokens[1:] pos_scores = pos_scores[1:] skipped_toks = 0 if bpe_toks is not None: for i in range(tgt_len - 1): if tokens[i].item() in bpe_toks: skipped_toks += 1 pos_scores[i + 1] += pos_scores[i] pos_scores[i] = 0 inf_scores = pos_scores.eq(float('inf')) | pos_scores.eq(float('-inf')) if inf_scores.any(): print('| Skipping tokens with inf scores:', task.target_dictionary.string(tokens[inf_scores.nonzero()])) pos_scores = pos_scores[(~inf_scores).nonzero()] score_sum += pos_scores.sum().cpu() count += pos_scores.numel() - skipped_toks if args.output_word_probs or args.output_word_stats: w = '' word_prob = [] is_bpe = False for i in range(len(tokens)): w_ind = tokens[i].item() w += task.source_dictionary[w_ind] if bpe_toks is not None and w_ind in bpe_toks: w = w[:-bpe_len] is_bpe = True else: word_prob.append((w, pos_scores[i].item())) next_prob = None ind = i + 1 while ind < len(tokens): if pos_scores[ind].item() != 0: next_prob = pos_scores[ind] break ind += 1 word_stats.setdefault(w, WordStat(w, is_bpe)).add(pos_scores[i].item(), next_prob) is_bpe = False w = '' if args.output_word_probs: print( str(int(sample_id)) + " " + ('\t'.join('{} [{:2f}]'.format(x[0], x[1]) for x in word_prob)) ) wps_meter.update(sample['ntokens']) t.log({'wps': round(wps_meter.avg)}) avg_nll_loss = -score_sum / count print('| Evaluated {} tokens in {:.1f}s ({:.2f} tokens/s)'.format(gen_timer.n, gen_timer.sum, 1. / gen_timer.avg)) print('| Loss: {:.4f}, Perplexity: {:.2f}'.format(avg_nll_loss, np.exp(avg_nll_loss))) if args.output_word_stats: for ws in sorted(word_stats.values(), key=lambda x: x.count, reverse=True): print(ws) def cli_main(): parser = options.get_eval_lm_parser() args = options.parse_args_and_arch(parser) main(args)
null
185,593
import torch from fairseq import bleu, checkpoint_utils, options, progress_bar, tasks, utils from fairseq.meters import StopwatchMeter, TimeMeter def main(args): assert args.path is not None, '--path required for generation!' assert not args.sampling or args.nbest == args.beam, \ '--sampling requires --nbest to be equal to --beam' assert args.replace_unk is None or args.raw_text, \ '--replace-unk requires a raw text dataset (--raw-text)' utils.import_user_module(args) if args.max_tokens is None and args.max_sentences is None: args.max_tokens = 12000 print(args) use_cuda = torch.cuda.is_available() and not args.cpu # Load dataset splits task = tasks.setup_task(args) task.load_dataset(args.gen_subset) # Set dictionaries try: src_dict = getattr(task, 'source_dictionary', None) except NotImplementedError: src_dict = None tgt_dict = task.target_dictionary # Load ensemble print('| loading model(s) from {}'.format(args.path)) models, _model_args = checkpoint_utils.load_model_ensemble( args.path.split(':'), arg_overrides=eval(args.model_overrides), task=task, ) # Optimize ensemble for generation for model in models: model.make_generation_fast_( beamable_mm_beam_size=None if args.no_beamable_mm else args.beam, need_attn=args.print_alignment, ) if args.fp16: model.half() if use_cuda: model.cuda() # Load alignment dictionary for unknown word replacement # (None if no unknown word replacement, empty if no path to align dictionary) align_dict = utils.load_align_dict(args.replace_unk) # Load dataset (possibly sharded) itr = task.get_batch_iterator( dataset=task.dataset(args.gen_subset), max_tokens=args.max_tokens, max_sentences=args.max_sentences, max_positions=utils.resolve_max_positions( task.max_positions(), *[model.max_positions() for model in models] ), ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test, required_batch_size_multiple=args.required_batch_size_multiple, num_shards=args.num_shards, shard_id=args.shard_id, num_workers=args.num_workers, ).next_epoch_itr(shuffle=False) # Initialize generator gen_timer = StopwatchMeter() generator = task.build_generator(args) # Generate and compute BLEU score if args.sacrebleu: scorer = bleu.SacrebleuScorer() else: scorer = bleu.Scorer(tgt_dict.pad(), tgt_dict.eos(), tgt_dict.unk()) num_sentences = 0 has_target = True with progress_bar.build_progress_bar(args, itr) as t: wps_meter = TimeMeter() for sample in t: sample = utils.move_to_cuda(sample) if use_cuda else sample if 'net_input' not in sample: continue prefix_tokens = None if args.prefix_size > 0: prefix_tokens = sample['target'][:, :args.prefix_size] gen_timer.start() hypos = task.inference_step(generator, models, sample, prefix_tokens) num_generated_tokens = sum(len(h[0]['tokens']) for h in hypos) gen_timer.stop(num_generated_tokens) for i, sample_id in enumerate(sample['id'].tolist()): has_target = sample['target'] is not None # Remove padding src_tokens = utils.strip_pad(sample['net_input']['src_tokens'][i, :], tgt_dict.pad()) target_tokens = None if has_target: target_tokens = utils.strip_pad(sample['target'][i, :], tgt_dict.pad()).int().cpu() # Either retrieve the original sentences or regenerate them from tokens. if align_dict is not None: src_str = task.dataset(args.gen_subset).src.get_original_text(sample_id) target_str = task.dataset(args.gen_subset).tgt.get_original_text(sample_id) else: if src_dict is not None: src_str = src_dict.string(src_tokens, args.remove_bpe) else: src_str = "" if has_target: target_str = tgt_dict.string(target_tokens, args.remove_bpe, escape_unk=True) if not args.quiet: if src_dict is not None: print('S-{}\t{}'.format(sample_id, src_str)) if has_target: print('T-{}\t{}'.format(sample_id, target_str)) # Process top predictions for j, hypo in enumerate(hypos[i][:args.nbest]): hypo_tokens, hypo_str, alignment = utils.post_process_prediction( hypo_tokens=hypo['tokens'].int().cpu(), src_str=src_str, alignment=hypo['alignment'], align_dict=align_dict, tgt_dict=tgt_dict, remove_bpe=args.remove_bpe, ) if not args.quiet: print('H-{}\t{}\t{}'.format(sample_id, hypo['score'], hypo_str)) print('P-{}\t{}'.format( sample_id, ' '.join(map( lambda x: '{:.4f}'.format(x), hypo['positional_scores'].tolist(), )) )) if args.print_alignment: print('A-{}\t{}'.format( sample_id, ' '.join(['{}-{}'.format(src_idx, tgt_idx) for src_idx, tgt_idx in alignment]) )) if args.print_step: print('I-{}\t{}'.format(sample_id, hypo['steps'])) if getattr(args, 'retain_iter_history', False): print("\n".join([ 'E-{}_{}\t{}'.format( sample_id, step, utils.post_process_prediction( h['tokens'].int().cpu(), src_str, None, None, tgt_dict, None)[1]) for step, h in enumerate(hypo['history'])])) # Score only the top hypothesis if has_target and j == 0: if align_dict is not None or args.remove_bpe is not None: # Convert back to tokens for evaluation with unk replacement and/or without BPE target_tokens = tgt_dict.encode_line(target_str, add_if_not_exist=True) if hasattr(scorer, 'add_string'): scorer.add_string(target_str, hypo_str) else: scorer.add(target_tokens, hypo_tokens) wps_meter.update(num_generated_tokens) t.log({'wps': round(wps_meter.avg)}) num_sentences += sample['nsentences'] print('| Translated {} sentences ({} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)'.format( num_sentences, gen_timer.n, gen_timer.sum, num_sentences / gen_timer.sum, 1. / gen_timer.avg)) if has_target: print('| Generate {} with beam={}: {}'.format(args.gen_subset, args.beam, scorer.result_string())) return scorer def cli_main(): parser = options.get_generation_parser() args = options.parse_args_and_arch(parser) main(args)
null
185,594
import argparse import os import torch from fairseq.data import (FairseqDataset, PrependTokenDataset, TokenBlockDataset, TruncateDataset, data_utils, StripTokenDataset, ConcatDataset, PrependTokenDataset, AppendTokenDataset) from fairseq.data.indexed_dataset import make_builder from tqdm import tqdm from transformers import AutoTokenizer from infoxlm.data.tlm_dataset import TLMDataset def build_tokenizer(args): tokenizer = AutoTokenizer.from_pretrained(args.model_name) return tokenizer
null
185,595
import argparse import os import torch from fairseq.data import (FairseqDataset, PrependTokenDataset, TokenBlockDataset, TruncateDataset, data_utils, StripTokenDataset, ConcatDataset, PrependTokenDataset, AppendTokenDataset) from fairseq.data.indexed_dataset import make_builder from tqdm import tqdm from transformers import AutoTokenizer from infoxlm.data.tlm_dataset import TLMDataset def get_args(): parser = argparse.ArgumentParser() parser.add_argument("--model_name", type=str, default="CZWin32768/xlm-align") parser.add_argument("--input_src", type=str, default="") parser.add_argument("--input_trg", type=str, default="") parser.add_argument("--output", type=str, default="") parser.add_argument("--max_pos", type=int, default=256) args = parser.parse_args() return args
null
185,596
import argparse import os import torch from fairseq.data import (FairseqDataset, PrependTokenDataset, TokenBlockDataset, TruncateDataset, data_utils, StripTokenDataset, ConcatDataset, PrependTokenDataset, AppendTokenDataset) from fairseq.data.indexed_dataset import make_builder from tqdm import tqdm from transformers import AutoTokenizer from infoxlm.data.tlm_dataset import TLMDataset def make_builder(out_file, impl, vocab_size=None): if impl == "mmap": return MMapIndexedDatasetBuilder( out_file, dtype=best_fitting_int_dtype(vocab_size) ) elif impl == "fasta": raise NotImplementedError elif impl == "huffman": raise ValueError("Use HuffmanCodeBuilder directly as it has a different interface.") else: return IndexedDatasetBuilder(out_file) def save_items(items, prefix, vocab_size): bin_fn = "%s.bin" % prefix idx_fn = "%s.idx" % prefix builder = make_builder(bin_fn, "mmap", vocab_size=vocab_size) print("builder: " + str(builder)) for item in items: builder.add_item(item) builder.finalize(idx_fn)
null
185,597
import argparse import os import torch from fairseq.data import (FairseqDataset, PrependTokenDataset, TokenBlockDataset, TruncateDataset, data_utils, StripTokenDataset, ConcatDataset, PrependTokenDataset, AppendTokenDataset) from fairseq.data.indexed_dataset import make_builder from tqdm import tqdm from transformers import AutoTokenizer from infoxlm.data.tlm_dataset import TLMDataset def get_indices(input_fn, tokenizer): indices = [] with open(input_fn) as fp: for lid, line in tqdm(enumerate(fp)): # DEBUG # if lid > 500: break line = line.strip() indices.append(tokenizer.encode(line)) print("tokenize finished.") return indices
null
185,598
import argparse import os import torch from fairseq.data import (FairseqDataset, PrependTokenDataset, TokenBlockDataset, TruncateDataset, data_utils) from fairseq.data.indexed_dataset import make_builder from tqdm import tqdm from transformers import AutoTokenizer def build_tokenizer(args): tokenizer = AutoTokenizer.from_pretrained(args.model_name) return tokenizer
null
185,599
import argparse import os import torch from fairseq.data import (FairseqDataset, PrependTokenDataset, TokenBlockDataset, TruncateDataset, data_utils) from fairseq.data.indexed_dataset import make_builder from tqdm import tqdm from transformers import AutoTokenizer def get_args(): parser = argparse.ArgumentParser() parser.add_argument("--model_name", type=str, default="CZWin32768/xlm-align") parser.add_argument("--input", type=str, default="") parser.add_argument("--output", type=str, default="") parser.add_argument('--sample-break-mode', default='complete', choices=['none', 'complete', 'complete_doc', 'eos'], help='If omitted or "none", fills each sample with tokens-per-sample ' 'tokens. If set to "complete", splits samples only at the end ' 'of sentence, but may include multiple sentences per sample. ' '"complete_doc" is similar but respects doc boundaries. ' 'If set to "eos", includes only one sentence per sample.') parser.add_argument('--tokens-per-sample', default=510, type=int, help='max number of total tokens over all segments per sample') parser.add_argument('--dataset_impl', default="mmap", type=str) args = parser.parse_args() return args
null
185,600
import argparse import os import torch from fairseq.data import (FairseqDataset, PrependTokenDataset, TokenBlockDataset, TruncateDataset, data_utils) from fairseq.data.indexed_dataset import make_builder from tqdm import tqdm from transformers import AutoTokenizer def make_builder(out_file, impl, vocab_size=None): if impl == "mmap": return MMapIndexedDatasetBuilder( out_file, dtype=best_fitting_int_dtype(vocab_size) ) elif impl == "fasta": raise NotImplementedError elif impl == "huffman": raise ValueError("Use HuffmanCodeBuilder directly as it has a different interface.") else: return IndexedDatasetBuilder(out_file) def save_items(items, prefix, vocab_size): bin_fn = "%s.bin" % prefix idx_fn = "%s.idx" % prefix builder = make_builder(bin_fn, "mmap", vocab_size=vocab_size) print("builder: " + str(builder)) for item in items: builder.add_item(item) builder.finalize(idx_fn)
null
185,601
import argparse import os import torch from fairseq.data import (FairseqDataset, PrependTokenDataset, TokenBlockDataset, TruncateDataset, data_utils, StripTokenDataset, ConcatDataset) from fairseq.data.indexed_dataset import make_builder from tqdm import tqdm from transformers import AutoTokenizer from infoxlm.data.tlm_dataset import TLMDataset def build_tokenizer(args): tokenizer = AutoTokenizer.from_pretrained(args.model_name) return tokenizer
null
185,602
import argparse import os import torch from fairseq.data import (FairseqDataset, PrependTokenDataset, TokenBlockDataset, TruncateDataset, data_utils, StripTokenDataset, ConcatDataset) from fairseq.data.indexed_dataset import make_builder from tqdm import tqdm from transformers import AutoTokenizer from infoxlm.data.tlm_dataset import TLMDataset def get_args(): parser = argparse.ArgumentParser() parser.add_argument("--model_name", type=str, default="CZWin32768/xlm-align") parser.add_argument("--input_src", type=str, default="") parser.add_argument("--input_trg", type=str, default="") parser.add_argument("--output", type=str, default="") parser.add_argument("--max_pos", type=int, default=256) args = parser.parse_args() return args
null
185,603
import argparse import os import torch from fairseq.data import (FairseqDataset, PrependTokenDataset, TokenBlockDataset, TruncateDataset, data_utils, StripTokenDataset, ConcatDataset) from fairseq.data.indexed_dataset import make_builder from tqdm import tqdm from transformers import AutoTokenizer from infoxlm.data.tlm_dataset import TLMDataset def make_builder(out_file, impl, vocab_size=None): if impl == "mmap": return MMapIndexedDatasetBuilder( out_file, dtype=best_fitting_int_dtype(vocab_size) ) elif impl == "fasta": raise NotImplementedError elif impl == "huffman": raise ValueError("Use HuffmanCodeBuilder directly as it has a different interface.") else: return IndexedDatasetBuilder(out_file) def save_items(items, prefix, vocab_size): bin_fn = "%s.bin" % prefix idx_fn = "%s.idx" % prefix builder = make_builder(bin_fn, "mmap", vocab_size=vocab_size) print("builder: " + str(builder)) for item in items: builder.add_item(item) builder.finalize(idx_fn)
null
185,604
import argparse import os import torch from fairseq.data import (FairseqDataset, PrependTokenDataset, TokenBlockDataset, TruncateDataset, data_utils, StripTokenDataset, ConcatDataset) from fairseq.data.indexed_dataset import make_builder from tqdm import tqdm from transformers import AutoTokenizer from infoxlm.data.tlm_dataset import TLMDataset def get_indices(input_fn, tokenizer): indices = [] with open(input_fn) as fp: for lid, line in tqdm(enumerate(fp)): # DEBUG # if lid > 500: break line = line.strip() indices.append(tokenizer.encode(line)) print("tokenize finished.") return indices
null
185,605
import argparse import importlib._bootstrap import importlib.machinery import importlib.util import logging import os import re import sys import sysconfig import warnings from glob import glob, escape import _osx_support def get_platform(): # Cross compiling if "_PYTHON_HOST_PLATFORM" in os.environ: return os.environ["_PYTHON_HOST_PLATFORM"] # Get value of sys.platform if sys.platform.startswith('osf1'): return 'osf1' return sys.platform
null
185,606
import argparse import importlib._bootstrap import importlib.machinery import importlib.util import logging import os import re import sys import sysconfig import warnings from glob import glob, escape import _osx_support def run_command(cmd): status = os.system(cmd) return os.waitstatus_to_exitcode(status)
null
185,607
import argparse import importlib._bootstrap import importlib.machinery import importlib.util import logging import os import re import sys import sysconfig import warnings from glob import glob, escape import _osx_support def set_compiler_flags(compiler_flags, compiler_py_flags_nodist): flags = sysconfig.get_config_var(compiler_flags) py_flags_nodist = sysconfig.get_config_var(compiler_py_flags_nodist) sysconfig.get_config_vars()[compiler_flags] = flags + ' ' + py_flags_nodist
null
185,608
import argparse import importlib._bootstrap import importlib.machinery import importlib.util import logging import os import re import sys import sysconfig import warnings from glob import glob, escape import _osx_support The provided code snippet includes necessary dependencies for implementing the `add_dir_to_list` function. Write a Python function `def add_dir_to_list(dirlist, dir)` to solve the following problem: Add the directory 'dir' to the list 'dirlist' (after any relative directories) if: 1) 'dir' is not already in 'dirlist' 2) 'dir' actually exists, and is a directory. Here is the function: def add_dir_to_list(dirlist, dir): """Add the directory 'dir' to the list 'dirlist' (after any relative directories) if: 1) 'dir' is not already in 'dirlist' 2) 'dir' actually exists, and is a directory. """ if dir is None or not os.path.isdir(dir) or dir in dirlist: return for i, path in enumerate(dirlist): if not os.path.isabs(path): dirlist.insert(i + 1, dir) return dirlist.insert(0, dir)
Add the directory 'dir' to the list 'dirlist' (after any relative directories) if: 1) 'dir' is not already in 'dirlist' 2) 'dir' actually exists, and is a directory.
185,609
import argparse import importlib._bootstrap import importlib.machinery import importlib.util import logging import os import re import sys import sysconfig import warnings from glob import glob, escape import _osx_support The provided code snippet includes necessary dependencies for implementing the `sysroot_paths` function. Write a Python function `def sysroot_paths(make_vars, subdirs)` to solve the following problem: Get the paths of sysroot sub-directories. * make_vars: a sequence of names of variables of the Makefile where sysroot may be set. * subdirs: a sequence of names of subdirectories used as the location for headers or libraries. Here is the function: def sysroot_paths(make_vars, subdirs): """Get the paths of sysroot sub-directories. * make_vars: a sequence of names of variables of the Makefile where sysroot may be set. * subdirs: a sequence of names of subdirectories used as the location for headers or libraries. """ dirs = [] for var_name in make_vars: var = sysconfig.get_config_var(var_name) if var is not None: m = re.search(r'--sysroot=([^"]\S*|"[^"]+")', var) if m is not None: sysroot = m.group(1).strip('"') for subdir in subdirs: if os.path.isabs(subdir): subdir = subdir[1:] path = os.path.join(sysroot, subdir) if os.path.isdir(path): dirs.append(path) break return dirs
Get the paths of sysroot sub-directories. * make_vars: a sequence of names of variables of the Makefile where sysroot may be set. * subdirs: a sequence of names of subdirectories used as the location for headers or libraries.
185,610
import argparse import importlib._bootstrap import importlib.machinery import importlib.util import logging import os import re import sys import sysconfig import warnings from glob import glob, escape import _osx_support MACOS_SDK_SPECIFIED = None def macosx_sdk_root(): """Return the directory of the current macOS SDK. If no SDK was explicitly configured, call the compiler to find which include files paths are being searched by default. Use '/' if the compiler is searching /usr/include (meaning system header files are installed) or use the root of an SDK if that is being searched. (The SDK may be supplied via Xcode or via the Command Line Tools). The SDK paths used by Apple-supplied tool chains depend on the setting of various variables; see the xcrun man page for more info. Also sets MACOS_SDK_SPECIFIED for use by macosx_sdk_specified(). """ global MACOS_SDK_ROOT, MACOS_SDK_SPECIFIED # If already called, return cached result. if MACOS_SDK_ROOT: return MACOS_SDK_ROOT cflags = sysconfig.get_config_var('CFLAGS') m = re.search(r'-isysroot\s*(\S+)', cflags) if m is not None: MACOS_SDK_ROOT = m.group(1) MACOS_SDK_SPECIFIED = MACOS_SDK_ROOT != '/' else: MACOS_SDK_ROOT = _osx_support._default_sysroot( sysconfig.get_config_var('CC')) MACOS_SDK_SPECIFIED = False return MACOS_SDK_ROOT The provided code snippet includes necessary dependencies for implementing the `macosx_sdk_specified` function. Write a Python function `def macosx_sdk_specified()` to solve the following problem: Returns true if an SDK was explicitly configured. True if an SDK was selected at configure time, either by specifying --enable-universalsdk=(something other than no or /) or by adding a -isysroot option to CFLAGS. In some cases, like when making decisions about macOS Tk framework paths, we need to be able to know whether the user explicitly asked to build with an SDK versus the implicit use of an SDK when header files are no longer installed on a running system by the Command Line Tools. Here is the function: def macosx_sdk_specified(): """Returns true if an SDK was explicitly configured. True if an SDK was selected at configure time, either by specifying --enable-universalsdk=(something other than no or /) or by adding a -isysroot option to CFLAGS. In some cases, like when making decisions about macOS Tk framework paths, we need to be able to know whether the user explicitly asked to build with an SDK versus the implicit use of an SDK when header files are no longer installed on a running system by the Command Line Tools. """ global MACOS_SDK_SPECIFIED # If already called, return cached result. if MACOS_SDK_SPECIFIED: return MACOS_SDK_SPECIFIED # Find the sdk root and set MACOS_SDK_SPECIFIED macosx_sdk_root() return MACOS_SDK_SPECIFIED
Returns true if an SDK was explicitly configured. True if an SDK was selected at configure time, either by specifying --enable-universalsdk=(something other than no or /) or by adding a -isysroot option to CFLAGS. In some cases, like when making decisions about macOS Tk framework paths, we need to be able to know whether the user explicitly asked to build with an SDK versus the implicit use of an SDK when header files are no longer installed on a running system by the Command Line Tools.
185,611
import argparse import importlib._bootstrap import importlib.machinery import importlib.util import logging import os import re import sys import sysconfig import warnings from glob import glob, escape import _osx_support def grep_headers_for(function, headers): for header in headers: with open(header, 'r', errors='surrogateescape') as f: if function in f.read(): return True return False
null
185,612
import argparse import importlib._bootstrap import importlib.machinery import importlib.util import logging import os import re import sys import sysconfig import warnings from glob import glob, escape import _osx_support MACOS = (HOST_PLATFORM == 'darwin') def macosx_sdk_root(): """Return the directory of the current macOS SDK. If no SDK was explicitly configured, call the compiler to find which include files paths are being searched by default. Use '/' if the compiler is searching /usr/include (meaning system header files are installed) or use the root of an SDK if that is being searched. (The SDK may be supplied via Xcode or via the Command Line Tools). The SDK paths used by Apple-supplied tool chains depend on the setting of various variables; see the xcrun man page for more info. Also sets MACOS_SDK_SPECIFIED for use by macosx_sdk_specified(). """ global MACOS_SDK_ROOT, MACOS_SDK_SPECIFIED # If already called, return cached result. if MACOS_SDK_ROOT: return MACOS_SDK_ROOT cflags = sysconfig.get_config_var('CFLAGS') m = re.search(r'-isysroot\s*(\S+)', cflags) if m is not None: MACOS_SDK_ROOT = m.group(1) MACOS_SDK_SPECIFIED = MACOS_SDK_ROOT != '/' else: MACOS_SDK_ROOT = _osx_support._default_sysroot( sysconfig.get_config_var('CC')) MACOS_SDK_SPECIFIED = False return MACOS_SDK_ROOT def is_macosx_sdk_path(path): """ Returns True if 'path' can be located in a macOS SDK """ return ( (path.startswith('/usr/') and not path.startswith('/usr/local')) or path.startswith('/System/Library') or path.startswith('/System/iOSSupport') ) def find_library_file(compiler, libname, std_dirs, paths): result = compiler.find_library_file(std_dirs + paths, libname) if result is None: return None if MACOS: sysroot = macosx_sdk_root() # Check whether the found file is in one of the standard directories dirname = os.path.dirname(result) for p in std_dirs: # Ensure path doesn't end with path separator p = p.rstrip(os.sep) if MACOS and is_macosx_sdk_path(p): # Note that, as of Xcode 7, Apple SDKs may contain textual stub # libraries with .tbd extensions rather than the normal .dylib # shared libraries installed in /. The Apple compiler tool # chain handles this transparently but it can cause problems # for programs that are being built with an SDK and searching # for specific libraries. Distutils find_library_file() now # knows to also search for and return .tbd files. But callers # of find_library_file need to keep in mind that the base filename # of the returned SDK library file might have a different extension # from that of the library file installed on the running system, # for example: # /Applications/Xcode.app/Contents/Developer/Platforms/ # MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk/ # usr/lib/libedit.tbd # vs # /usr/lib/libedit.dylib if os.path.join(sysroot, p[1:]) == dirname: return [ ] if p == dirname: return [ ] # Otherwise, it must have been in one of the additional directories, # so we have to figure out which one. for p in paths: # Ensure path doesn't end with path separator p = p.rstrip(os.sep) if MACOS and is_macosx_sdk_path(p): if os.path.join(sysroot, p[1:]) == dirname: return [ p ] if p == dirname: return [p] else: assert False, "Internal error: Path not found in std_dirs or paths"
null
185,613
import argparse import importlib._bootstrap import importlib.machinery import importlib.util import logging import os import re import sys import sysconfig import warnings from glob import glob, escape import _osx_support def validate_tzpath(): base_tzpath = sysconfig.get_config_var('TZPATH') if not base_tzpath: return tzpaths = base_tzpath.split(os.pathsep) bad_paths = [tzpath for tzpath in tzpaths if not os.path.isabs(tzpath)] if bad_paths: raise ValueError('TZPATH must contain only absolute paths, ' + f'found:\n{tzpaths!r}\nwith invalid paths:\n' + f'{bad_paths!r}')
null
185,614
import argparse import importlib._bootstrap import importlib.machinery import importlib.util import logging import os import re import sys import sysconfig import warnings from glob import glob, escape import _osx_support log = logging.getLogger('setup') def find_file(filename, std_dirs, paths): """Searches for the directory where a given file is located, and returns a possibly-empty list of additional directories, or None if the file couldn't be found at all. 'filename' is the name of a file, such as readline.h or libcrypto.a. 'std_dirs' is the list of standard system directories; if the file is found in one of them, no additional directives are needed. 'paths' is a list of additional locations to check; if the file is found in one of them, the resulting list will contain the directory. """ if MACOS: # Honor the MacOSX SDK setting when one was specified. # An SDK is a directory with the same structure as a real # system, but with only header files and libraries. sysroot = macosx_sdk_root() # Check the standard locations for dir_ in std_dirs: f = os.path.join(dir_, filename) if MACOS and is_macosx_sdk_path(dir_): f = os.path.join(sysroot, dir_[1:], filename) if os.path.exists(f): return [] # Check the additional directories for dir_ in paths: f = os.path.join(dir_, filename) if MACOS and is_macosx_sdk_path(dir_): f = os.path.join(sysroot, dir_[1:], filename) if os.path.exists(f): return [dir_] # Not found anywhere return None The provided code snippet includes necessary dependencies for implementing the `find_module_file` function. Write a Python function `def find_module_file(module, dirlist)` to solve the following problem: Find a module in a set of possible folders. If it is not found return the unadorned filename Here is the function: def find_module_file(module, dirlist): """Find a module in a set of possible folders. If it is not found return the unadorned filename""" dirs = find_file(module, [], dirlist) if not dirs: return module if len(dirs) > 1: log.info(f"WARNING: multiple copies of {module} found") return os.path.join(dirs[0], module)
Find a module in a set of possible folders. If it is not found return the unadorned filename
185,615
import argparse import functools import os import re import shutil import subprocess import sys import tempfile import zipfile from pathlib import Path def get_appx_layout(ns): VER_DOT = "{}.{}".format(VER_MAJOR, VER_MINOR) PYTHON_DLL_NAME = "python{}{}.dll".format(VER_MAJOR, VER_MINOR) PYTHON_STABLE_DLL_NAME = "python{}.dll".format(VER_MAJOR) PYTHON_ZIP_NAME = "python{}{}.zip".format(VER_MAJOR, VER_MINOR) PYTHON_PTH_NAME = "python{}{}._pth".format(VER_MAJOR, VER_MINOR) PYTHON_CHM_NAME = "python{}{}{}{}.chm".format( VER_MAJOR, VER_MINOR, VER_MICRO, VER_SUFFIX ) def rglob(root, patterns, condition=None): def log_error(msg, *args, **kwargs): def get_pip_layout(ns): def get_props_layout(ns): def get_nuspec_layout(ns): def get_layout(ns): def in_build(f, dest="", new_name=None): n, _, x = f.rpartition(".") n = new_name or n src = ns.build / f if ns.debug and src not in REQUIRED_DLLS: if not src.stem.endswith("_d"): src = src.parent / (src.stem + "_d" + src.suffix) if not n.endswith("_d"): n += "_d" f = n + "." + x yield dest + n + "." + x, src if ns.include_symbols: pdb = src.with_suffix(".pdb") if pdb.is_file(): yield dest + n + ".pdb", pdb if ns.include_dev: lib = src.with_suffix(".lib") if lib.is_file(): yield "libs/" + n + ".lib", lib if ns.include_appxmanifest: yield from in_build("python_uwp.exe", new_name="python{}".format(VER_DOT)) yield from in_build("pythonw_uwp.exe", new_name="pythonw{}".format(VER_DOT)) # For backwards compatibility, but we don't reference these ourselves. yield from in_build("python_uwp.exe", new_name="python") yield from in_build("pythonw_uwp.exe", new_name="pythonw") else: yield from in_build("python.exe", new_name="python") yield from in_build("pythonw.exe", new_name="pythonw") yield from in_build(PYTHON_DLL_NAME) if ns.include_launchers and ns.include_appxmanifest: if ns.include_pip: yield from in_build("python_uwp.exe", new_name="pip{}".format(VER_DOT)) if ns.include_idle: yield from in_build("pythonw_uwp.exe", new_name="idle{}".format(VER_DOT)) if ns.include_stable: yield from in_build(PYTHON_STABLE_DLL_NAME) found_any = False for dest, src in rglob(ns.build, "vcruntime*.dll"): found_any = True yield dest, src if not found_any: log_error("Failed to locate vcruntime DLL in the build.") yield "LICENSE.txt", ns.build / "LICENSE.txt" for dest, src in rglob(ns.build, ("*.pyd", "*.dll")): if src.stem.endswith("_d") != bool(ns.debug) and src not in REQUIRED_DLLS: continue if src in EXCLUDE_FROM_PYDS: continue if src in TEST_PYDS_ONLY and not ns.include_tests: continue if src in TCLTK_PYDS_ONLY and not ns.include_tcltk: continue yield from in_build(src.name, dest="" if ns.flat_dlls else "DLLs/") if ns.zip_lib: zip_name = PYTHON_ZIP_NAME yield zip_name, ns.temp / zip_name else: for dest, src in get_lib_layout(ns): yield "Lib/{}".format(dest), src if ns.include_venv: yield from in_build("venvlauncher.exe", "Lib/venv/scripts/nt/", "python") yield from in_build("venvwlauncher.exe", "Lib/venv/scripts/nt/", "pythonw") if ns.include_tools: def _c(d): if d.is_dir(): return d in TOOLS_DIRS return d in TOOLS_FILES for dest, src in rglob(ns.source / "Tools", "**/*", _c): yield "Tools/{}".format(dest), src if ns.include_underpth: yield PYTHON_PTH_NAME, ns.temp / PYTHON_PTH_NAME if ns.include_dev: for dest, src in rglob(ns.source / "Include", "**/*.h"): yield "include/{}".format(dest), src src = ns.source / "PC" / "pyconfig.h" yield "include/pyconfig.h", src for dest, src in get_tcltk_lib(ns): yield dest, src if ns.include_pip: for dest, src in get_pip_layout(ns): if not isinstance(src, tuple) and ( src in EXCLUDE_FROM_LIB or src in EXCLUDE_FROM_PACKAGED_LIB ): continue yield dest, src if ns.include_chm: for dest, src in rglob(ns.doc_build / "htmlhelp", PYTHON_CHM_NAME): yield "Doc/{}".format(dest), src if ns.include_html_doc: for dest, src in rglob(ns.doc_build / "html", "**/*"): yield "Doc/html/{}".format(dest), src if ns.include_props: for dest, src in get_props_layout(ns): yield dest, src if ns.include_nuspec: for dest, src in get_nuspec_layout(ns): yield dest, src for dest, src in get_appx_layout(ns): yield dest, src if ns.include_cat: if ns.flat_dlls: yield ns.include_cat.name, ns.include_cat else: yield "DLLs/{}".format(ns.include_cat.name), ns.include_cat
null
185,617
import argparse import functools import os import re import shutil import subprocess import sys import tempfile import zipfile from pathlib import Path class Path(PurePath): """PurePath subclass that can make system calls. Path represents a filesystem path but unlike PurePath, also offers methods to do system calls on path objects. Depending on your system, instantiating a Path will return either a PosixPath or a WindowsPath object. You can also instantiate a PosixPath or WindowsPath directly, but cannot instantiate a WindowsPath on a POSIX system or vice versa. """ _accessor = _normal_accessor __slots__ = () def __new__(cls, *args, **kwargs): if cls is Path: cls = WindowsPath if os.name == 'nt' else PosixPath self = cls._from_parts(args) if not self._flavour.is_supported: raise NotImplementedError("cannot instantiate %r on your system" % (cls.__name__,)) return self def _make_child_relpath(self, part): # This is an optimization used for dir walking. `part` must be # a single part relative to this path. parts = self._parts + [part] return self._from_parsed_parts(self._drv, self._root, parts) def __enter__(self): return self def __exit__(self, t, v, tb): # https://bugs.python.org/issue39682 # In previous versions of pathlib, this method marked this path as # closed; subsequent attempts to perform I/O would raise an IOError. # This functionality was never documented, and had the effect of # making Path objects mutable, contrary to PEP 428. In Python 3.9 the # _closed attribute was removed, and this method made a no-op. # This method and __enter__()/__exit__() should be deprecated and # removed in the future. pass # Public API def cwd(cls): """Return a new path pointing to the current working directory (as returned by os.getcwd()). """ return cls(cls._accessor.getcwd()) def home(cls): """Return a new path pointing to the user's home directory (as returned by os.path.expanduser('~')). """ return cls("~").expanduser() def samefile(self, other_path): """Return whether other_path is the same or not as this file (as returned by os.path.samefile()). """ st = self.stat() try: other_st = other_path.stat() except AttributeError: other_st = self._accessor.stat(other_path) return os.path.samestat(st, other_st) def iterdir(self): """Iterate over the files in this directory. Does not yield any result for the special paths '.' and '..'. """ for name in self._accessor.listdir(self): if name in {'.', '..'}: # Yielding a path object for these makes little sense continue yield self._make_child_relpath(name) def glob(self, pattern): """Iterate over this subtree and yield all existing files (of any kind, including directories) matching the given relative pattern. """ sys.audit("pathlib.Path.glob", self, pattern) if not pattern: raise ValueError("Unacceptable pattern: {!r}".format(pattern)) drv, root, pattern_parts = self._flavour.parse_parts((pattern,)) if drv or root: raise NotImplementedError("Non-relative patterns are unsupported") selector = _make_selector(tuple(pattern_parts), self._flavour) for p in selector.select_from(self): yield p def rglob(self, pattern): """Recursively yield all existing files (of any kind, including directories) matching the given relative pattern, anywhere in this subtree. """ sys.audit("pathlib.Path.rglob", self, pattern) drv, root, pattern_parts = self._flavour.parse_parts((pattern,)) if drv or root: raise NotImplementedError("Non-relative patterns are unsupported") selector = _make_selector(("**",) + tuple(pattern_parts), self._flavour) for p in selector.select_from(self): yield p def absolute(self): """Return an absolute version of this path. This function works even if the path doesn't point to anything. No normalization is done, i.e. all '.' and '..' will be kept along. Use resolve() to get the canonical path to a file. """ # XXX untested yet! if self.is_absolute(): return self # FIXME this must defer to the specific flavour (and, under Windows, # use nt._getfullpathname()) return self._from_parts([self._accessor.getcwd()] + self._parts) def resolve(self, strict=False): """ Make the path absolute, resolving all symlinks on the way and also normalizing it (for example turning slashes into backslashes under Windows). """ def check_eloop(e): winerror = getattr(e, 'winerror', 0) if e.errno == ELOOP or winerror == _WINERROR_CANT_RESOLVE_FILENAME: raise RuntimeError("Symlink loop from %r" % e.filename) try: s = self._accessor.realpath(self, strict=strict) except OSError as e: check_eloop(e) raise p = self._from_parts((s,)) # In non-strict mode, realpath() doesn't raise on symlink loops. # Ensure we get an exception by calling stat() if not strict: try: p.stat() except OSError as e: check_eloop(e) return p def stat(self, *, follow_symlinks=True): """ Return the result of the stat() system call on this path, like os.stat() does. """ return self._accessor.stat(self, follow_symlinks=follow_symlinks) def owner(self): """ Return the login name of the file owner. """ return self._accessor.owner(self) def group(self): """ Return the group name of the file gid. """ return self._accessor.group(self) def open(self, mode='r', buffering=-1, encoding=None, errors=None, newline=None): """ Open the file pointed by this path and return a file object, as the built-in open() function does. """ if "b" not in mode: encoding = io.text_encoding(encoding) return self._accessor.open(self, mode, buffering, encoding, errors, newline) def read_bytes(self): """ Open the file in bytes mode, read it, and close the file. """ with self.open(mode='rb') as f: return f.read() def read_text(self, encoding=None, errors=None): """ Open the file in text mode, read it, and close the file. """ encoding = io.text_encoding(encoding) with self.open(mode='r', encoding=encoding, errors=errors) as f: return f.read() def write_bytes(self, data): """ Open the file in bytes mode, write to it, and close the file. """ # type-check for the buffer interface before truncating the file view = memoryview(data) with self.open(mode='wb') as f: return f.write(view) def write_text(self, data, encoding=None, errors=None, newline=None): """ Open the file in text mode, write to it, and close the file. """ if not isinstance(data, str): raise TypeError('data must be str, not %s' % data.__class__.__name__) encoding = io.text_encoding(encoding) with self.open(mode='w', encoding=encoding, errors=errors, newline=newline) as f: return f.write(data) def readlink(self): """ Return the path to which the symbolic link points. """ path = self._accessor.readlink(self) return self._from_parts((path,)) def touch(self, mode=0o666, exist_ok=True): """ Create this file with the given access mode, if it doesn't exist. """ self._accessor.touch(self, mode, exist_ok) def mkdir(self, mode=0o777, parents=False, exist_ok=False): """ Create a new directory at this given path. """ try: self._accessor.mkdir(self, mode) except FileNotFoundError: if not parents or self.parent == self: raise self.parent.mkdir(parents=True, exist_ok=True) self.mkdir(mode, parents=False, exist_ok=exist_ok) except OSError: # Cannot rely on checking for EEXIST, since the operating system # could give priority to other errors like EACCES or EROFS if not exist_ok or not self.is_dir(): raise def chmod(self, mode, *, follow_symlinks=True): """ Change the permissions of the path, like os.chmod(). """ self._accessor.chmod(self, mode, follow_symlinks=follow_symlinks) def lchmod(self, mode): """ Like chmod(), except if the path points to a symlink, the symlink's permissions are changed, rather than its target's. """ self.chmod(mode, follow_symlinks=False) def unlink(self, missing_ok=False): """ Remove this file or link. If the path is a directory, use rmdir() instead. """ try: self._accessor.unlink(self) except FileNotFoundError: if not missing_ok: raise def rmdir(self): """ Remove this directory. The directory must be empty. """ self._accessor.rmdir(self) def lstat(self): """ Like stat(), except if the path points to a symlink, the symlink's status information is returned, rather than its target's. """ return self.stat(follow_symlinks=False) def rename(self, target): """ Rename this path to the target path. The target path may be absolute or relative. Relative paths are interpreted relative to the current working directory, *not* the directory of the Path object. Returns the new Path instance pointing to the target path. """ self._accessor.rename(self, target) return self.__class__(target) def replace(self, target): """ Rename this path to the target path, overwriting if that path exists. The target path may be absolute or relative. Relative paths are interpreted relative to the current working directory, *not* the directory of the Path object. Returns the new Path instance pointing to the target path. """ self._accessor.replace(self, target) return self.__class__(target) def symlink_to(self, target, target_is_directory=False): """ Make this path a symlink pointing to the target path. Note the order of arguments (link, target) is the reverse of os.symlink. """ self._accessor.symlink(target, self, target_is_directory) def hardlink_to(self, target): """ Make this path a hard link pointing to the same file as *target*. Note the order of arguments (self, target) is the reverse of os.link's. """ self._accessor.link(target, self) def link_to(self, target): """ Make the target path a hard link pointing to this path. Note this function does not make this path a hard link to *target*, despite the implication of the function and argument names. The order of arguments (target, link) is the reverse of Path.symlink_to, but matches that of os.link. Deprecated since Python 3.10 and scheduled for removal in Python 3.12. Use `hardlink_to()` instead. """ warnings.warn("pathlib.Path.link_to() is deprecated and is scheduled " "for removal in Python 3.12. " "Use pathlib.Path.hardlink_to() instead.", DeprecationWarning, stacklevel=2) self._accessor.link(self, target) # Convenience functions for querying the stat results def exists(self): """ Whether this path exists. """ try: self.stat() except OSError as e: if not _ignore_error(e): raise return False except ValueError: # Non-encodable path return False return True def is_dir(self): """ Whether this path is a directory. """ try: return S_ISDIR(self.stat().st_mode) except OSError as e: if not _ignore_error(e): raise # Path doesn't exist or is a broken symlink # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) return False except ValueError: # Non-encodable path return False def is_file(self): """ Whether this path is a regular file (also True for symlinks pointing to regular files). """ try: return S_ISREG(self.stat().st_mode) except OSError as e: if not _ignore_error(e): raise # Path doesn't exist or is a broken symlink # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) return False except ValueError: # Non-encodable path return False def is_mount(self): """ Check if this path is a POSIX mount point """ # Need to exist and be a dir if not self.exists() or not self.is_dir(): return False try: parent_dev = self.parent.stat().st_dev except OSError: return False dev = self.stat().st_dev if dev != parent_dev: return True ino = self.stat().st_ino parent_ino = self.parent.stat().st_ino return ino == parent_ino def is_symlink(self): """ Whether this path is a symbolic link. """ try: return S_ISLNK(self.lstat().st_mode) except OSError as e: if not _ignore_error(e): raise # Path doesn't exist return False except ValueError: # Non-encodable path return False def is_block_device(self): """ Whether this path is a block device. """ try: return S_ISBLK(self.stat().st_mode) except OSError as e: if not _ignore_error(e): raise # Path doesn't exist or is a broken symlink # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) return False except ValueError: # Non-encodable path return False def is_char_device(self): """ Whether this path is a character device. """ try: return S_ISCHR(self.stat().st_mode) except OSError as e: if not _ignore_error(e): raise # Path doesn't exist or is a broken symlink # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) return False except ValueError: # Non-encodable path return False def is_fifo(self): """ Whether this path is a FIFO. """ try: return S_ISFIFO(self.stat().st_mode) except OSError as e: if not _ignore_error(e): raise # Path doesn't exist or is a broken symlink # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) return False except ValueError: # Non-encodable path return False def is_socket(self): """ Whether this path is a socket. """ try: return S_ISSOCK(self.stat().st_mode) except OSError as e: if not _ignore_error(e): raise # Path doesn't exist or is a broken symlink # (see http://web.archive.org/web/20200623061726/https://bitbucket.org/pitrou/pathlib/issues/12/ ) return False except ValueError: # Non-encodable path return False def expanduser(self): """ Return a new path with expanded ~ and ~user constructs (as returned by os.path.expanduser) """ if (not (self._drv or self._root) and self._parts and self._parts[0][:1] == '~'): homedir = self._accessor.expanduser(self._parts[0]) if homedir[:1] == "~": raise RuntimeError("Could not determine home directory.") return self._from_parts([homedir] + self._parts[1:]) return self import os def write_catalog(target, files): with target.open("w", encoding="utf-8") as cat: cat.write(CATALOG_TEMPLATE.format(target=target)) cat.writelines("<HASH>{}={}\n".format(n, f) for n, f in files if can_sign(f)) import os import os def log_debug(msg, *args, **kwargs): return LOG.debug(BraceMessage(msg, *args, **kwargs)) def log_info(msg, *args, **kwargs): return LOG.info(BraceMessage(msg, *args, **kwargs)) import os import shutil import os import os def copy_files(files, ns): if ns.copy: ns.copy.mkdir(parents=True, exist_ok=True) try: total = len(files) except TypeError: total = None count = 0 zip_file = _create_zip_file(ns) try: need_compile = [] in_catalog = [] for dest, src in files: count += 1 if count % 10 == 0: if total: log_info("Processed {:>4} of {} files", count, total) else: log_info("Processed {} files", count) log_debug("Processing {!s}", src) if isinstance(src, tuple): src, content = src if ns.copy: log_debug("Copy {} -> {}", src, ns.copy / dest) (ns.copy / dest).parent.mkdir(parents=True, exist_ok=True) with open(ns.copy / dest, "wb") as f: f.write(content) if ns.zip: log_debug("Zip {} into {}", src, ns.zip) zip_file.writestr(str(dest), content) continue if ( ns.precompile and src in PY_FILES and src not in EXCLUDE_FROM_COMPILE and src.parent not in DATA_DIRS and os.path.normcase(str(dest)).startswith(os.path.normcase("Lib")) ): if ns.copy: need_compile.append((dest, ns.copy / dest)) else: (ns.temp / "Lib" / dest).parent.mkdir(parents=True, exist_ok=True) copy_if_modified(src, ns.temp / "Lib" / dest) need_compile.append((dest, ns.temp / "Lib" / dest)) if src not in EXCLUDE_FROM_CATALOG: in_catalog.append((src.name, src)) if ns.copy: log_debug("Copy {} -> {}", src, ns.copy / dest) (ns.copy / dest).parent.mkdir(parents=True, exist_ok=True) try: copy_if_modified(src, ns.copy / dest) except shutil.SameFileError: pass if ns.zip: log_debug("Zip {} into {}", src, ns.zip) zip_file.write(src, str(dest)) if need_compile: for dest, src in need_compile: compiled = [ _compile_one_py(src, None, dest, optimize=0), _compile_one_py(src, None, dest, optimize=1), _compile_one_py(src, None, dest, optimize=2), ] for c in compiled: if not c: continue cdest = Path(dest).parent / Path(c).relative_to(src.parent) if ns.zip: log_debug("Zip {} into {}", c, ns.zip) zip_file.write(c, str(cdest)) in_catalog.append((cdest.name, cdest)) if ns.catalog: # Just write out the CDF now. Compilation and signing is # an extra step log_info("Generating {}", ns.catalog) ns.catalog.parent.mkdir(parents=True, exist_ok=True) write_catalog(ns.catalog, in_catalog) finally: if zip_file: zip_file.close()
null
185,620
import logging import sys LOG = None import logging import sys if sys.platform == 'win32' and ' 32 bit (ARM)' in sys.version: # bpo-37553: test_socket.SendfileUsingSendTest is taking longer than 2 # seconds on Windows ARM32 buildbot LOOPBACK_TIMEOUT = 10 elif sys.platform == 'vxworks': LOOPBACK_TIMEOUT = 10 if sys.platform not in ('win32', 'vxworks'): unix_shell = '/system/bin/sh' if is_android else '/bin/sh' else: unix_shell = None def configure_logger(ns): global LOG if LOG: return LOG = logging.getLogger("make_layout") LOG.level = logging.DEBUG if ns.v: s_level = max(logging.ERROR - ns.v * 10, logging.DEBUG) f_level = max(logging.WARNING - ns.v * 10, logging.DEBUG) else: s_level = logging.ERROR f_level = logging.INFO handler = logging.StreamHandler(sys.stdout) handler.setFormatter(logging.Formatter("{levelname:8s} {message}", style="{")) handler.setLevel(s_level) LOG.addHandler(handler) if ns.log: handler = logging.FileHandler(ns.log, encoding="utf-8", delay=True) handler.setFormatter( logging.Formatter("[{asctime}]{levelname:8s}: {message}", style="{") ) handler.setLevel(f_level) LOG.addHandler(handler)
null
185,623
OPTIONS = { "stable": {"help": "stable ABI stub"}, "pip": {"help": "pip"}, "pip-user": {"help": "pip.ini file for default --user"}, "distutils": {"help": "distutils"}, "tcltk": {"help": "Tcl, Tk and tkinter"}, "idle": {"help": "Idle"}, "tests": {"help": "test suite"}, "tools": {"help": "tools"}, "venv": {"help": "venv"}, "dev": {"help": "headers and libs"}, "symbols": {"help": "symbols"}, "underpth": {"help": "a python._pth file", "not-in-all": True}, "launchers": {"help": "specific launchers"}, "appxmanifest": {"help": "an appxmanifest"}, "props": {"help": "a python.props file"}, "nuspec": {"help": "a python.nuspec file"}, "chm": {"help": "the CHM documentation"}, "html-doc": {"help": "the HTML documentation"}, } PRESETS = { "appx": { "help": "APPX package", "options": [ "stable", "pip", "pip-user", "distutils", "tcltk", "idle", "venv", "dev", "launchers", "appxmanifest", # XXX: Disabled for now "precompile", ], }, "nuget": { "help": "nuget package", "options": [ "dev", "tools", "pip", "stable", "distutils", "venv", "props", "nuspec", ], }, "iot": {"help": "Windows IoT Core", "options": ["stable", "pip"]}, "default": { "help": "development kit package", "options": [ "stable", "pip", "distutils", "tcltk", "idle", "tests", "tools", "venv", "dev", "symbols", "chm", ], }, "embed": { "help": "embeddable package", "options": ["stable", "zip-lib", "flat-dlls", "underpth", "precompile"], }, } def get_argparse_options(): for opt, info in OPTIONS.items(): help = "When specified, includes {}".format(info["help"]) if info.get("not-in-all"): help = "{}. Not affected by --include-all".format(help) yield "--include-{}".format(opt), help for opt, info in PRESETS.items(): help = "When specified, includes default options for {}".format(info["help"]) yield "--preset-{}".format(opt), help
null
185,624
OPTIONS = { "stable": {"help": "stable ABI stub"}, "pip": {"help": "pip"}, "pip-user": {"help": "pip.ini file for default --user"}, "distutils": {"help": "distutils"}, "tcltk": {"help": "Tcl, Tk and tkinter"}, "idle": {"help": "Idle"}, "tests": {"help": "test suite"}, "tools": {"help": "tools"}, "venv": {"help": "venv"}, "dev": {"help": "headers and libs"}, "symbols": {"help": "symbols"}, "underpth": {"help": "a python._pth file", "not-in-all": True}, "launchers": {"help": "specific launchers"}, "appxmanifest": {"help": "an appxmanifest"}, "props": {"help": "a python.props file"}, "nuspec": {"help": "a python.nuspec file"}, "chm": {"help": "the CHM documentation"}, "html-doc": {"help": "the HTML documentation"}, } PRESETS = { "appx": { "help": "APPX package", "options": [ "stable", "pip", "pip-user", "distutils", "tcltk", "idle", "venv", "dev", "launchers", "appxmanifest", # XXX: Disabled for now "precompile", ], }, "nuget": { "help": "nuget package", "options": [ "dev", "tools", "pip", "stable", "distutils", "venv", "props", "nuspec", ], }, "iot": {"help": "Windows IoT Core", "options": ["stable", "pip"]}, "default": { "help": "development kit package", "options": [ "stable", "pip", "distutils", "tcltk", "idle", "tests", "tools", "venv", "dev", "symbols", "chm", ], }, "embed": { "help": "embeddable package", "options": ["stable", "zip-lib", "flat-dlls", "underpth", "precompile"], }, } def ns_get(ns, key, default=False): return getattr(ns, key.replace("-", "_"), default) def ns_set(ns, key, value=True): k1 = key.replace("-", "_") k2 = "include_{}".format(k1) if hasattr(ns, k2): setattr(ns, k2, value) elif hasattr(ns, k1): setattr(ns, k1, value) else: raise AttributeError("no argument named '{}'".format(k1)) def update_presets(ns): for preset, info in PRESETS.items(): if ns_get(ns, "preset-{}".format(preset)): for opt in info["options"]: ns_set(ns, opt) if ns.include_all: for opt in OPTIONS: if OPTIONS[opt].get("not-in-all"): continue ns_set(ns, opt)
null
185,625
import os import re import struct import sys import sys if sys.platform == 'win32' and ' 32 bit (ARM)' in sys.version: # bpo-37553: test_socket.SendfileUsingSendTest is taking longer than 2 # seconds on Windows ARM32 buildbot LOOPBACK_TIMEOUT = 10 elif sys.platform == 'vxworks': LOOPBACK_TIMEOUT = 10 if sys.platform not in ('win32', 'vxworks'): unix_shell = '/system/bin/sh' if is_android else '/bin/sh' else: unix_shell = None def _unpack_hexversion(): try: hexversion = int(os.getenv("PYTHON_HEXVERSION"), 16) except (TypeError, ValueError): hexversion = sys.hexversion return struct.pack(">i", hexversion)
null
185,629
import platform, os, sys, getopt, textwrap, shutil, stat, time, pwd, grp def getVersion(): global _cache_getVersion if _cache_getVersion is None: _cache_getVersion = grepValue( os.path.join(SRCDIR, 'configure'), 'PACKAGE_VERSION') return _cache_getVersion def getFullVersion(): global _cache_getFullVersion if _cache_getFullVersion is not None: return _cache_getFullVersion fn = os.path.join(SRCDIR, 'Include', 'patchlevel.h') for ln in open(fn): if 'PY_VERSION' in ln: _cache_getFullVersion = ln.split()[-1][1:-1] return _cache_getFullVersion raise RuntimeError("Cannot find full version??") FW_PREFIX = ["Library", "Frameworks", "Python.framework"] FW_VERSION_PREFIX = "--undefined--" FW_SSL_DIRECTORY = "--undefined--" WORKDIR = "/tmp/_py" DEPSRC = os.path.join(WORKDIR, 'third-party') DEPSRC = os.path.expanduser('~/Universal/other-sources') universal_opts_map = { 'universal2': ('arm64', 'x86_64'), '32-bit': ('i386', 'ppc',), '64-bit': ('x86_64', 'ppc64',), 'intel': ('i386', 'x86_64'), 'intel-32': ('i386',), 'intel-64': ('x86_64',), '3-way': ('ppc', 'i386', 'x86_64'), 'all': ('i386', 'ppc', 'x86_64', 'ppc64',) } default_target_map = { 'universal2': '10.9', '64-bit': '10.5', '3-way': '10.5', 'intel': '10.5', 'intel-32': '10.4', 'intel-64': '10.5', 'all': '10.5', } UNIVERSALOPTS = tuple(universal_opts_map.keys()) UNIVERSALARCHS = '32-bit' ARCHLIST = universal_opts_map[UNIVERSALARCHS] SRCDIR = os.path.dirname( os.path.dirname( os.path.dirname( os.path.abspath(__file__ )))) DEPTARGET = '10.5' def getTargetCompilers(): target_cc_map = { '10.4': ('gcc-4.0', 'g++-4.0'), '10.5': ('gcc', 'g++'), '10.6': ('gcc', 'g++'), '10.7': ('gcc', 'g++'), '10.8': ('gcc', 'g++'), } return target_cc_map.get(DEPTARGET, ('clang', 'clang++') ) CC, CXX = getTargetCompilers() USAGE = textwrap.dedent("""\ Usage: build_python [options] Options: -? or -h: Show this message -b DIR --build-dir=DIR: Create build here (default: %(WORKDIR)r) --third-party=DIR: Store third-party sources here (default: %(DEPSRC)r) --sdk-path=DIR: Location of the SDK (deprecated, use SDKROOT env variable) --src-dir=DIR: Location of the Python sources (default: %(SRCDIR)r) --dep-target=10.n macOS deployment target (default: %(DEPTARGET)r) --universal-archs=x universal architectures (options: %(UNIVERSALOPTS)r, default: %(UNIVERSALARCHS)r) """)% globals() The provided code snippet includes necessary dependencies for implementing the `parseOptions` function. Write a Python function `def parseOptions(args=None)` to solve the following problem: Parse arguments and update global settings. Here is the function: def parseOptions(args=None): """ Parse arguments and update global settings. """ global WORKDIR, DEPSRC, SRCDIR, DEPTARGET global UNIVERSALOPTS, UNIVERSALARCHS, ARCHLIST, CC, CXX global FW_VERSION_PREFIX global FW_SSL_DIRECTORY if args is None: args = sys.argv[1:] try: options, args = getopt.getopt(args, '?hb', [ 'build-dir=', 'third-party=', 'sdk-path=' , 'src-dir=', 'dep-target=', 'universal-archs=', 'help' ]) except getopt.GetoptError: print(sys.exc_info()[1]) sys.exit(1) if args: print("Additional arguments") sys.exit(1) deptarget = None for k, v in options: if k in ('-h', '-?', '--help'): print(USAGE) sys.exit(0) elif k in ('-d', '--build-dir'): WORKDIR=v elif k in ('--third-party',): DEPSRC=v elif k in ('--sdk-path',): print(" WARNING: --sdk-path is no longer supported") elif k in ('--src-dir',): SRCDIR=v elif k in ('--dep-target', ): DEPTARGET=v deptarget=v elif k in ('--universal-archs', ): if v in UNIVERSALOPTS: UNIVERSALARCHS = v ARCHLIST = universal_opts_map[UNIVERSALARCHS] if deptarget is None: # Select alternate default deployment # target DEPTARGET = default_target_map.get(v, '10.5') else: raise NotImplementedError(v) else: raise NotImplementedError(k) SRCDIR=os.path.abspath(SRCDIR) WORKDIR=os.path.abspath(WORKDIR) DEPSRC=os.path.abspath(DEPSRC) CC, CXX = getTargetCompilers() FW_VERSION_PREFIX = FW_PREFIX[:] + ["Versions", getVersion()] FW_SSL_DIRECTORY = FW_VERSION_PREFIX[:] + ["etc", "openssl"] print("-- Settings:") print(" * Source directory: %s" % SRCDIR) print(" * Build directory: %s" % WORKDIR) print(" * Third-party source: %s" % DEPSRC) print(" * Deployment target: %s" % DEPTARGET) print(" * Universal archs: %s" % str(ARCHLIST)) print(" * C compiler: %s" % CC) print(" * C++ compiler: %s" % CXX) print("") print(" -- Building a Python %s framework at patch level %s" % (getVersion(), getFullVersion())) print("")
Parse arguments and update global settings.
185,630
import platform, os, sys, getopt, textwrap, shutil, stat, time, pwd, grp WORKDIR = "/tmp/_py" ARCHLIST = universal_opts_map[UNIVERSALARCHS] def library_recipes(): result = [] # Since Apple removed the header files for the deprecated system # OpenSSL as of the Xcode 7 release (for OS X 10.10+), we do not # have much choice but to build our own copy here, too. result.extend([ dict( name="OpenSSL 1.1.1n", url="https://www.openssl.org/source/openssl-1.1.1n.tar.gz", checksum='2aad5635f9bb338bc2c6b7d19cbc9676', buildrecipe=build_universal_openssl, configure=None, install=None, ), ]) if internalTk(): if useOldTk(): tcl_tk_ver='8.6.8' tcl_checksum='81656d3367af032e0ae6157eff134f89' tk_checksum='5e0faecba458ee1386078fb228d008ba' tk_patches = ['tk868_on_10_8_10_9.patch'] else: tcl_tk_ver='8.6.12' tcl_checksum='87ea890821d2221f2ab5157bc5eb885f' tk_checksum='1d6dcf6120356e3d211e056dff5e462a' tk_patches = [ ] result.extend([ dict( name="Tcl %s"%(tcl_tk_ver,), url="ftp://ftp.tcl.tk/pub/tcl//tcl8_6/tcl%s-src.tar.gz"%(tcl_tk_ver,), checksum=tcl_checksum, buildDir="unix", configure_pre=[ '--enable-shared', '--enable-threads', '--libdir=/Library/Frameworks/Python.framework/Versions/%s/lib'%(getVersion(),), ], useLDFlags=False, buildrecipe=tweak_tcl_build, install='make TCL_LIBRARY=%(TCL_LIBRARY)s && make install TCL_LIBRARY=%(TCL_LIBRARY)s DESTDIR=%(DESTDIR)s'%{ "DESTDIR": shellQuote(os.path.join(WORKDIR, 'libraries')), "TCL_LIBRARY": shellQuote('/Library/Frameworks/Python.framework/Versions/%s/lib/tcl8.6'%(getVersion())), }, ), dict( name="Tk %s"%(tcl_tk_ver,), url="ftp://ftp.tcl.tk/pub/tcl//tcl8_6/tk%s-src.tar.gz"%(tcl_tk_ver,), checksum=tk_checksum, patches=tk_patches, buildDir="unix", configure_pre=[ '--enable-aqua', '--enable-shared', '--enable-threads', '--libdir=/Library/Frameworks/Python.framework/Versions/%s/lib'%(getVersion(),), ], useLDFlags=False, install='make TCL_LIBRARY=%(TCL_LIBRARY)s TK_LIBRARY=%(TK_LIBRARY)s && make install TCL_LIBRARY=%(TCL_LIBRARY)s TK_LIBRARY=%(TK_LIBRARY)s DESTDIR=%(DESTDIR)s'%{ "DESTDIR": shellQuote(os.path.join(WORKDIR, 'libraries')), "TCL_LIBRARY": shellQuote('/Library/Frameworks/Python.framework/Versions/%s/lib/tcl8.6'%(getVersion())), "TK_LIBRARY": shellQuote('/Library/Frameworks/Python.framework/Versions/%s/lib/tk8.6'%(getVersion())), }, ), ]) if PYTHON_3: result.extend([ dict( name="XZ 5.2.3", url="http://tukaani.org/xz/xz-5.2.3.tar.gz", checksum='ef68674fb47a8b8e741b34e429d86e9d', configure_pre=[ '--disable-dependency-tracking', ] ), ]) result.extend([ dict( name="NCurses 5.9", url="http://ftp.gnu.org/pub/gnu/ncurses/ncurses-5.9.tar.gz", checksum='8cb9c412e5f2d96bc6f459aa8c6282a1', configure_pre=[ "--enable-widec", "--without-cxx", "--without-cxx-binding", "--without-ada", "--without-curses-h", "--enable-shared", "--with-shared", "--without-debug", "--without-normal", "--without-tests", "--without-manpages", "--datadir=/usr/share", "--sysconfdir=/etc", "--sharedstatedir=/usr/com", "--with-terminfo-dirs=/usr/share/terminfo", "--with-default-terminfo-dir=/usr/share/terminfo", "--libdir=/Library/Frameworks/Python.framework/Versions/%s/lib"%(getVersion(),), ], patchscripts=[ ("ftp://ftp.invisible-island.net/ncurses//5.9/ncurses-5.9-20120616-patch.sh.bz2", "f54bf02a349f96a7c4f0d00922f3a0d4"), ], useLDFlags=False, install='make && make install DESTDIR=%s && cd %s/usr/local/lib && ln -fs ../../../Library/Frameworks/Python.framework/Versions/%s/lib/lib* .'%( shellQuote(os.path.join(WORKDIR, 'libraries')), shellQuote(os.path.join(WORKDIR, 'libraries')), getVersion(), ), ), dict( name="SQLite 3.37.2", url="https://sqlite.org/2022/sqlite-autoconf-3370200.tar.gz", checksum='683cc5312ee74e71079c14d24b7a6d27', extra_cflags=('-Os ' '-DSQLITE_ENABLE_FTS5 ' '-DSQLITE_ENABLE_FTS4 ' '-DSQLITE_ENABLE_FTS3_PARENTHESIS ' '-DSQLITE_ENABLE_JSON1 ' '-DSQLITE_ENABLE_RTREE ' '-DSQLITE_OMIT_AUTOINIT ' '-DSQLITE_TCL=0 ' ), configure_pre=[ '--enable-threadsafe', '--enable-shared=no', '--enable-static=yes', '--disable-readline', '--disable-dependency-tracking', ] ), ]) if not PYTHON_3: result.extend([ dict( name="Sleepycat DB 4.7.25", url="http://download.oracle.com/berkeley-db/db-4.7.25.tar.gz", checksum='ec2b87e833779681a0c3a814aa71359e', buildDir="build_unix", configure="../dist/configure", configure_pre=[ '--includedir=/usr/local/include/db4', ] ), ]) return result def buildRecipe(recipe, basedir, archList): """ Build software using a recipe. This function does the 'configure;make;make install' dance for C software, with a possibility to customize this process, basically a poor-mans DarwinPorts. """ curdir = os.getcwd() name = recipe['name'] THIRD_PARTY_LIBS.append(name) url = recipe['url'] configure = recipe.get('configure', './configure') buildrecipe = recipe.get('buildrecipe', None) install = recipe.get('install', 'make && make install DESTDIR=%s'%( shellQuote(basedir))) archiveName = os.path.split(url)[-1] sourceArchive = os.path.join(DEPSRC, archiveName) if not os.path.exists(DEPSRC): os.mkdir(DEPSRC) verifyThirdPartyFile(url, recipe['checksum'], sourceArchive) print("Extracting archive for %s"%(name,)) buildDir=os.path.join(WORKDIR, '_bld') if not os.path.exists(buildDir): os.mkdir(buildDir) workDir = extractArchive(buildDir, sourceArchive) os.chdir(workDir) for patch in recipe.get('patches', ()): if isinstance(patch, tuple): url, checksum = patch fn = os.path.join(DEPSRC, os.path.basename(url)) verifyThirdPartyFile(url, checksum, fn) else: # patch is a file in the source directory fn = os.path.join(curdir, patch) runCommand('patch -p%s < %s'%(recipe.get('patchlevel', 1), shellQuote(fn),)) for patchscript in recipe.get('patchscripts', ()): if isinstance(patchscript, tuple): url, checksum = patchscript fn = os.path.join(DEPSRC, os.path.basename(url)) verifyThirdPartyFile(url, checksum, fn) else: # patch is a file in the source directory fn = os.path.join(curdir, patchscript) if fn.endswith('.bz2'): runCommand('bunzip2 -fk %s' % shellQuote(fn)) fn = fn[:-4] runCommand('sh %s' % shellQuote(fn)) os.unlink(fn) if 'buildDir' in recipe: os.chdir(recipe['buildDir']) if configure is not None: configure_args = [ "--prefix=/usr/local", "--enable-static", "--disable-shared", #"CPP=gcc -arch %s -E"%(' -arch '.join(archList,),), ] if 'configure_pre' in recipe: args = list(recipe['configure_pre']) if '--disable-static' in args: configure_args.remove('--enable-static') if '--enable-shared' in args: configure_args.remove('--disable-shared') configure_args.extend(args) if recipe.get('useLDFlags', 1): configure_args.extend([ "CFLAGS=%s-mmacosx-version-min=%s -arch %s " "-I%s/usr/local/include"%( recipe.get('extra_cflags', ''), DEPTARGET, ' -arch '.join(archList), shellQuote(basedir)[1:-1],), "LDFLAGS=-mmacosx-version-min=%s -L%s/usr/local/lib -arch %s"%( DEPTARGET, shellQuote(basedir)[1:-1], ' -arch '.join(archList)), ]) else: configure_args.extend([ "CFLAGS=%s-mmacosx-version-min=%s -arch %s " "-I%s/usr/local/include"%( recipe.get('extra_cflags', ''), DEPTARGET, ' -arch '.join(archList), shellQuote(basedir)[1:-1],), ]) if 'configure_post' in recipe: configure_args = configure_args + list(recipe['configure_post']) configure_args.insert(0, configure) configure_args = [ shellQuote(a) for a in configure_args ] print("Running configure for %s"%(name,)) runCommand(' '.join(configure_args) + ' 2>&1') if buildrecipe is not None: # call special-case build recipe, e.g. for openssl buildrecipe(basedir, archList) if install is not None: print("Running install for %s"%(name,)) runCommand('{ ' + install + ' ;} 2>&1') print("Done %s"%(name,)) print("") os.chdir(curdir) The provided code snippet includes necessary dependencies for implementing the `buildLibraries` function. Write a Python function `def buildLibraries()` to solve the following problem: Build our dependencies into $WORKDIR/libraries/usr/local Here is the function: def buildLibraries(): """ Build our dependencies into $WORKDIR/libraries/usr/local """ print("") print("Building required libraries") print("") universal = os.path.join(WORKDIR, 'libraries') os.mkdir(universal) os.makedirs(os.path.join(universal, 'usr', 'local', 'lib')) os.makedirs(os.path.join(universal, 'usr', 'local', 'include')) for recipe in library_recipes(): buildRecipe(recipe, universal, ARCHLIST)
Build our dependencies into $WORKDIR/libraries/usr/local
185,632
import platform, os, sys, getopt, textwrap, shutil, stat, time, pwd, grp STAT_0o775 = ( stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH ) RUNNING_ON_PYTHON2 = sys.version_info.major == 2 if RUNNING_ON_PYTHON2: from plistlib import writePlist else: from plistlib import dump def shellQuote(value): """ Return the string value in a form that can safely be inserted into a shell command. """ return "'%s'"%(value.replace("'", "'\"'\"'")) def grepValue(fn, variable): """ Return the unquoted value of a variable from a file.. QUOTED_VALUE='quotes' -> str('quotes') UNQUOTED_VALUE=noquotes -> str('noquotes') """ variable = variable + '=' for ln in open(fn, 'r'): if ln.startswith(variable): value = ln[len(variable):].strip() return value.strip("\"'") raise RuntimeError("Cannot find variable %s" % variable[:-1]) def getVersion(): global _cache_getVersion if _cache_getVersion is None: _cache_getVersion = grepValue( os.path.join(SRCDIR, 'configure'), 'PACKAGE_VERSION') return _cache_getVersion def getVersionMajorMinor(): return tuple([int(n) for n in getVersion().split('.', 2)]) WORKDIR = "/tmp/_py" UNIVERSALARCHS = '32-bit' SRCDIR = os.path.dirname( os.path.dirname( os.path.dirname( os.path.abspath(__file__ )))) PYTHON_3 = getVersionMajorMinor() >= (3, 0) EXPECTED_SHARED_LIBS = {} def internalTk(): return getDeptargetTuple() >= (10, 6) def compilerCanOptimize(): """ Return True iff the default Xcode version can use PGO and LTO """ # bpo-42235: The version check is pretty conservative, can be # adjusted after testing mac_ver = tuple(map(int, platform.mac_ver()[0].split('.'))) return mac_ver >= (10, 15) def fatal(msg): """ A fatal error, bail out. """ sys.stderr.write('FATAL: ') sys.stderr.write(msg) sys.stderr.write('\n') sys.exit(1) def runCommand(commandline): """ Run a command and raise RuntimeError if it fails. Output is suppressed unless the command fails. """ fd = os.popen(commandline, 'r') data = fd.read() xit = fd.close() if xit is not None: sys.stdout.write(data) raise RuntimeError("command failed: %s"%(commandline,)) if VERBOSE: sys.stdout.write(data); sys.stdout.flush() def captureCommand(commandline): fd = os.popen(commandline, 'r') data = fd.read() xit = fd.close() if xit is not None: sys.stdout.write(data) raise RuntimeError("command failed: %s"%(commandline,)) return data def buildPython(): print("Building a universal python for %s architectures" % UNIVERSALARCHS) buildDir = os.path.join(WORKDIR, '_bld', 'python') rootDir = os.path.join(WORKDIR, '_root') if os.path.exists(buildDir): shutil.rmtree(buildDir) if os.path.exists(rootDir): shutil.rmtree(rootDir) os.makedirs(buildDir) os.makedirs(rootDir) os.makedirs(os.path.join(rootDir, 'empty-dir')) curdir = os.getcwd() os.chdir(buildDir) # Extract the version from the configure file, needed to calculate # several paths. version = getVersion() # Since the extra libs are not in their installed framework location # during the build, augment the library path so that the interpreter # will find them during its extension import sanity checks. print("Running configure...") runCommand("%s -C --enable-framework --enable-universalsdk=/ " "--with-universal-archs=%s " "%s " "%s " "%s " "%s " "%s " "%s " "LDFLAGS='-g -L%s/libraries/usr/local/lib' " "CFLAGS='-g -I%s/libraries/usr/local/include' 2>&1"%( shellQuote(os.path.join(SRCDIR, 'configure')), UNIVERSALARCHS, (' ', '--with-computed-gotos ')[PYTHON_3], (' ', '--without-ensurepip ')[PYTHON_3], (' ', "--with-openssl='%s/libraries/usr/local'"%( shellQuote(WORKDIR)[1:-1],))[PYTHON_3], (' ', "--with-tcltk-includes='-I%s/libraries/usr/local/include'"%( shellQuote(WORKDIR)[1:-1],))[internalTk()], (' ', "--with-tcltk-libs='-L%s/libraries/usr/local/lib -ltcl8.6 -ltk8.6'"%( shellQuote(WORKDIR)[1:-1],))[internalTk()], (' ', "--enable-optimizations --with-lto")[compilerCanOptimize()], shellQuote(WORKDIR)[1:-1], shellQuote(WORKDIR)[1:-1])) # As of macOS 10.11 with SYSTEM INTEGRITY PROTECTION, DYLD_* # environment variables are no longer automatically inherited # by child processes from their parents. We used to just set # DYLD_LIBRARY_PATH, pointing to the third-party libs, # in build-installer.py's process environment and it was # passed through the make utility into the environment of # setup.py. Instead, we now append DYLD_LIBRARY_PATH to # the existing RUNSHARED configuration value when we call # make for extension module builds. runshared_for_make = "".join([ " RUNSHARED=", "'", grepValue("Makefile", "RUNSHARED"), ' DYLD_LIBRARY_PATH=', os.path.join(WORKDIR, 'libraries', 'usr', 'local', 'lib'), "'" ]) # Look for environment value BUILDINSTALLER_BUILDPYTHON_MAKE_EXTRAS # and, if defined, append its value to the make command. This allows # us to pass in version control tags, like GITTAG, to a build from a # tarball rather than from a vcs checkout, thus eliminating the need # to have a working copy of the vcs program on the build machine. # # A typical use might be: # export BUILDINSTALLER_BUILDPYTHON_MAKE_EXTRAS=" \ # GITVERSION='echo 123456789a' \ # GITTAG='echo v3.6.0' \ # GITBRANCH='echo 3.6'" make_extras = os.getenv("BUILDINSTALLER_BUILDPYTHON_MAKE_EXTRAS") if make_extras: make_cmd = "make " + make_extras + runshared_for_make else: make_cmd = "make" + runshared_for_make print("Running " + make_cmd) runCommand(make_cmd) make_cmd = "make install DESTDIR=%s %s"%( shellQuote(rootDir), runshared_for_make) print("Running " + make_cmd) runCommand(make_cmd) make_cmd = "make frameworkinstallextras DESTDIR=%s %s"%( shellQuote(rootDir), runshared_for_make) print("Running " + make_cmd) runCommand(make_cmd) print("Copying required shared libraries") if os.path.exists(os.path.join(WORKDIR, 'libraries', 'Library')): build_lib_dir = os.path.join( WORKDIR, 'libraries', 'Library', 'Frameworks', 'Python.framework', 'Versions', getVersion(), 'lib') fw_lib_dir = os.path.join( WORKDIR, '_root', 'Library', 'Frameworks', 'Python.framework', 'Versions', getVersion(), 'lib') if internalTk(): # move Tcl and Tk pkgconfig files runCommand("mv %s/pkgconfig/* %s/pkgconfig"%( shellQuote(build_lib_dir), shellQuote(fw_lib_dir) )) runCommand("rm -r %s/pkgconfig"%( shellQuote(build_lib_dir), )) runCommand("mv %s/* %s"%( shellQuote(build_lib_dir), shellQuote(fw_lib_dir) )) frmDir = os.path.join(rootDir, 'Library', 'Frameworks', 'Python.framework') frmDirVersioned = os.path.join(frmDir, 'Versions', version) path_to_lib = os.path.join(frmDirVersioned, 'lib', 'python%s'%(version,)) # create directory for OpenSSL certificates sslDir = os.path.join(frmDirVersioned, 'etc', 'openssl') os.makedirs(sslDir) print("Fix file modes") gid = grp.getgrnam('admin').gr_gid shared_lib_error = False for dirpath, dirnames, filenames in os.walk(frmDir): for dn in dirnames: os.chmod(os.path.join(dirpath, dn), STAT_0o775) os.chown(os.path.join(dirpath, dn), -1, gid) for fn in filenames: if os.path.islink(fn): continue # "chmod g+w $fn" p = os.path.join(dirpath, fn) st = os.stat(p) os.chmod(p, stat.S_IMODE(st.st_mode) | stat.S_IWGRP) os.chown(p, -1, gid) if fn in EXPECTED_SHARED_LIBS: # check to see that this file was linked with the # expected library path and version data = captureCommand("otool -L %s" % shellQuote(p)) for sl in EXPECTED_SHARED_LIBS[fn]: if ("\t%s " % sl) not in data: print("Expected shared lib %s was not linked with %s" % (sl, p)) shared_lib_error = True if shared_lib_error: fatal("Unexpected shared library errors.") if PYTHON_3: LDVERSION=None VERSION=None ABIFLAGS=None fp = open(os.path.join(buildDir, 'Makefile'), 'r') for ln in fp: if ln.startswith('VERSION='): VERSION=ln.split()[1] if ln.startswith('ABIFLAGS='): ABIFLAGS=ln.split() ABIFLAGS=ABIFLAGS[1] if len(ABIFLAGS) > 1 else '' if ln.startswith('LDVERSION='): LDVERSION=ln.split()[1] fp.close() LDVERSION = LDVERSION.replace('$(VERSION)', VERSION) LDVERSION = LDVERSION.replace('$(ABIFLAGS)', ABIFLAGS) config_suffix = '-' + LDVERSION if getVersionMajorMinor() >= (3, 6): config_suffix = config_suffix + '-darwin' else: config_suffix = '' # Python 2.x # We added some directories to the search path during the configure # phase. Remove those because those directories won't be there on # the end-users system. Also remove the directories from _sysconfigdata.py # (added in 3.3) if it exists. include_path = '-I%s/libraries/usr/local/include' % (WORKDIR,) lib_path = '-L%s/libraries/usr/local/lib' % (WORKDIR,) # fix Makefile path = os.path.join(path_to_lib, 'config' + config_suffix, 'Makefile') fp = open(path, 'r') data = fp.read() fp.close() for p in (include_path, lib_path): data = data.replace(" " + p, '') data = data.replace(p + " ", '') fp = open(path, 'w') fp.write(data) fp.close() # fix _sysconfigdata # # TODO: make this more robust! test_sysconfig_module of # distutils.tests.test_sysconfig.SysconfigTestCase tests that # the output from get_config_var in both sysconfig and # distutils.sysconfig is exactly the same for both CFLAGS and # LDFLAGS. The fixing up is now complicated by the pretty # printing in _sysconfigdata.py. Also, we are using the # pprint from the Python running the installer build which # may not cosmetically format the same as the pprint in the Python # being built (and which is used to originally generate # _sysconfigdata.py). import pprint if getVersionMajorMinor() >= (3, 6): # XXX this is extra-fragile path = os.path.join(path_to_lib, '_sysconfigdata_%s_darwin_darwin.py' % (ABIFLAGS,)) else: path = os.path.join(path_to_lib, '_sysconfigdata.py') fp = open(path, 'r') data = fp.read() fp.close() # create build_time_vars dict if RUNNING_ON_PYTHON2: exec(data) else: g_dict = {} l_dict = {} exec(data, g_dict, l_dict) build_time_vars = l_dict['build_time_vars'] vars = {} for k, v in build_time_vars.items(): if type(v) == type(''): for p in (include_path, lib_path): v = v.replace(' ' + p, '') v = v.replace(p + ' ', '') vars[k] = v fp = open(path, 'w') # duplicated from sysconfig._generate_posix_vars() fp.write('# system configuration generated and used by' ' the sysconfig module\n') fp.write('build_time_vars = ') pprint.pprint(vars, stream=fp) fp.close() # Add symlinks in /usr/local/bin, using relative links usr_local_bin = os.path.join(rootDir, 'usr', 'local', 'bin') to_framework = os.path.join('..', '..', '..', 'Library', 'Frameworks', 'Python.framework', 'Versions', version, 'bin') if os.path.exists(usr_local_bin): shutil.rmtree(usr_local_bin) os.makedirs(usr_local_bin) for fn in os.listdir( os.path.join(frmDir, 'Versions', version, 'bin')): os.symlink(os.path.join(to_framework, fn), os.path.join(usr_local_bin, fn)) os.chdir(curdir)
null
185,635
import os import setuptools from typing import List from distutils.command.build_ext import build_ext from concurrent.futures import ThreadPoolExecutor List = _alias(list, 1, inst=False, name='List') def find_header_files(directories: List[str]) -> List[str]: header_files = [] for directory in directories: for root, dirs, files in os.walk(directory): for file in files: if file.endswith('.h'): header_files.append(os.path.join(root, file)) return header_files
null
185,636
import lldb import os import sys import re The provided code snippet includes necessary dependencies for implementing the `_get_function_name` function. Write a Python function `def _get_function_name(instance=None)` to solve the following problem: Return the name of the calling function Here is the function: def _get_function_name(instance=None): """Return the name of the calling function""" class_name = f"{type(instance).__name__}." if instance else "" return class_name + sys._getframe(1).f_code.co_name
Return the name of the calling function
185,637
import lldb import os import sys import re _MODULE_NAME = os.path.basename(__file__).split(".")[0] class flat_map_slot_type: CLASS_PATTERN = "^phmap::priv::raw_hash_set<phmap::priv::FlatHashMapPolicy.*>::slot_type$" HAS_SUMMARY = True IS_SYNTHETIC_PROVIDER = False def summary(valobj, _): try: valobj = valobj.GetChildMemberWithName('value') first = valobj.GetChildMemberWithName('first').GetSummary() if not first: first = "{...}" second = valobj.GetChildMemberWithName('second').GetSummary() if not second: second = "{...}" return f"{{{first}, {second}}}" except BaseException as ex: print(f"{_get_function_name()} -> {ex}") return "" class node_map_slot_type: CLASS_PATTERN = r"phmap::priv::raw_hash_set<phmap::priv::NodeHashMapPolicy.*>::slot_type$" HAS_SUMMARY = True IS_SYNTHETIC_PROVIDER = False def summary(valobj, _): try: valobj = valobj.Dereference() first = valobj.GetChildMemberWithName('first').GetSummary() if not first: first = "{...}" second = valobj.GetChildMemberWithName('second').GetSummary() if not second: second = "{...}" return f"{{{first}, {second}}}" except BaseException as ex: print(f"{_get_function_name()} -> {ex}") return "{?}" class node_set_slot_type: CLASS_PATTERN = r"phmap::priv::raw_hash_set<phmap::priv::NodeHashSetPolicy.*>::slot_type$" HAS_SUMMARY = True IS_SYNTHETIC_PROVIDER = False def summary(valobj, _): try: summary = valobj.Dereference().GetSummary() if not summary: summary = "{...}" return summary except BaseException as ex: print(f"{_get_function_name()} -> {ex}") return "{?}" class flat_hash_map_or_set: CLASS_PATTERN = "^phmap::flat_hash_(map|set)<.*>$" HAS_SUMMARY = True IS_SYNTHETIC_PROVIDER = True def summary(valobj, _): try: valobj = valobj.GetNonSyntheticValue() size = valobj.GetChildMemberWithName('size_').GetValueAsUnsigned() capacity = valobj.GetChildMemberWithName('capacity_').GetValueAsUnsigned() return f"size = {size} (capacity = {capacity})" except BaseException as ex: print(f"{_get_function_name()} -> {ex}") return "{?}" def __init__(self, valobj, _): self.valobj = valobj self.slots_ = self.slot_type = self.ctrl_ = None self.size_ = self.capacity_ = self.slot_size = 0 def num_children(self): return min(self.size_, _MAX_CHILDREN) def has_children(self): return True def update(self): try: self.size_ = self.valobj.GetChildMemberWithName('size_').GetValueAsUnsigned() self.capacity_ = self.valobj.GetChildMemberWithName('capacity_').GetValueAsUnsigned() self.slots_ = self.valobj.GetChildMemberWithName("slots_") self.slot_type = self.slots_.GetType().GetPointeeType() self.slot_size = self.slot_type.GetByteSize() self.ctrl_ = self.valobj.GetChildMemberWithName("ctrl_") except BaseException as ex: print(f"{_get_function_name(self)} -> {ex}") def get_child_index(self, name): try: if name in ('size_', 'capacity_'): return -1 return int(name.lstrip('[').rstrip(']')) except: return -1 def get_child_at_index(self, index): try: if index < 0: return None if index >= self.size_ or index >= _MAX_CHILDREN: return None real_idx = -1 for idx in range(min(self.capacity_ + 3, _MAX_CTRL_INDEX)): ctrl = self.ctrl_.GetChildAtIndex(idx).GetValueAsSigned() if ctrl >= -1: real_idx += 1 if real_idx == index: return self.slots_.CreateChildAtOffset(f'[{index}]', idx * self.slot_size, self.slot_type) except BaseException as ex: print(f"{_get_function_name(self)} -> {ex}") return None class parallel_flat_or_node_map_or_set: CLASS_PATTERN = "^phmap::parallel_(flat|node)_hash_(map|set)<.*>$" HAS_SUMMARY = True IS_SYNTHETIC_PROVIDER = True REGEX_EXTRACT_ARRAY_SIZE = re.compile(r"std::array\s*<.*,\s*(\d+)\s*>") def _get_size_and_capacity(valobj): try: valobj = valobj.GetNonSyntheticValue() sets = valobj.GetChildMemberWithName('sets_') # sets is an std::array<T, SIZE>. # It's not possible to get the size of the array with templates parameters # "set.GetType().GetTemplateArgumentType(1)" returns an "unsigned long" type but not the value # so we must extract it with a regex m = parallel_flat_or_node_map_or_set.REGEX_EXTRACT_ARRAY_SIZE.match(sets.GetType().GetName()) n_buckets = int(m.group(1)) # this is dependent on the implementation of the standard library buckets = sets.GetChildMemberWithName('_M_elems') size = capacity = 0 for idx in range(n_buckets): bucket = buckets.GetChildAtIndex(idx).GetChildMemberWithName('set_') size += bucket.GetChildMemberWithName('size_').GetValueAsUnsigned() capacity += bucket.GetChildMemberWithName('capacity_').GetValueAsUnsigned() return size, capacity, n_buckets except: return '?', '?', 0 def summary(valobj, _): size, capacity, _ = parallel_flat_or_node_map_or_set._get_size_and_capacity(valobj) return f"size = {size} (capacity = {capacity})" def __init__(self, valobj, _): self.valobj = valobj self.buckets = self.slot_type = None self.size_ = self.capacity_ = self.n_buckets_ = self.slot_type = self.ctrl_size = 0 def num_children(self): return min(self.size_, _MAX_CHILDREN) def has_children(self): return True def update(self): try: self.size_, self.capacity_, self.n_buckets_ = self._get_size_and_capacity(self.valobj) self.buckets = self.valobj.GetChildMemberWithName('sets_').GetChildMemberWithName('_M_elems') bucket0 = self.buckets.GetChildAtIndex(0).GetChildMemberWithName('set_') self.slot_type = bucket0.GetChildMemberWithName('slots_').GetType().GetPointeeType() self.slot_size = self.slot_type.GetByteSize() except BaseException as ex: print(f"{_get_function_name(self)} -> {ex}") def get_child_index(self, name): try: if name in ('sets_'): return -1 return int(name.lstrip('[').rstrip(']')) except: return -1 def get_child_at_index(self, index): try: if index < 0: return None if index >= self.size_ or index >= _MAX_CHILDREN: return None real_idx = -1 total_idx = 0 for idx in range(self.n_buckets_): bucket = self.buckets.GetChildAtIndex(idx).GetChildMemberWithName('set_') size = bucket.GetChildMemberWithName("size_").GetValueAsUnsigned() if size: slots_ = bucket.GetChildMemberWithName("slots_") ctrl_ = bucket.GetChildMemberWithName("ctrl_") for jdx in range(size): ctrl = ctrl_.GetChildAtIndex(jdx).GetValueAsSigned() if ctrl >= -1: real_idx += 1 if real_idx == index: return slots_.CreateChildAtOffset(f'[{index}]', jdx * self.slot_size, self.slot_type) total_idx += size if total_idx > _MAX_CHILDREN: return None except BaseException as ex: print(f"{_get_function_name(self)} -> {ex}") return None def __lldb_init_module(debugger, internal_dict): for sp in ( flat_map_slot_type, node_map_slot_type, node_set_slot_type, flat_hash_map_or_set, parallel_flat_or_node_map_or_set, ): if sp.HAS_SUMMARY: debugger.HandleCommand( f'type summary add --regex "{sp.CLASS_PATTERN}" --python-function {_MODULE_NAME}.{sp.__name__}.summary ' f'--category phmap --expand') if sp.IS_SYNTHETIC_PROVIDER: debugger.HandleCommand( f'type synthetic add --regex "{sp.CLASS_PATTERN}" --python-class {_MODULE_NAME}.{sp.__name__} ' f'--category phmap') debugger.HandleCommand('type category enable phmap')
null
185,638
import argparse import math import operator import sys from enum import Enum from functools import reduce from typing import Dict, List, NamedTuple, Optional, Set, TextIO, Tuple class Lifetime(Enum): class TypeFlag(Enum): BUILTIN_PYTYPES: Set[str] = { "Long", "Object", *BASIC_FINAL_TYPES, *BASIC_BASE_TYPES, } HEADER1 = """// Copyright (c) Meta Platforms, Inc. and affiliates. #pragma once // This file is @""" HEADER2 = """generated by generate_jit_type_h.py. // Run 'make regen-jit' to update it. namespace jit::hir { // clang-format off """ FOOTER = """ // clang-format on } // namespace jit::hir """ def generate_types() -> Tuple[List[Type], int]: class TextIO(IO[str]): def buffer(self) -> BinaryIO: def encoding(self) -> str: def errors(self) -> Optional[str]: def line_buffering(self) -> bool: def newlines(self) -> Any: def __enter__(self) -> 'TextIO': def write_types(file: TextIO) -> None: types, num_bits = generate_types() types.sort(key=lambda t: t[0]) max_ty_len = 0 max_bits_len = 0 max_lifetime_len = len(Lifetime.Immortal.value) + 2 for ty, bits, _ in types: # +1 for ',' max_ty_len = max(max_ty_len, len(ty) + 1) # +2 for '0x' max_bits_len = max(max_bits_len, math.ceil(math.log(bits + 1, 16)) + 2) file.write(HEADER1) file.write(HEADER2) for name, member in TypeFlag.__members__.items(): file.write(f"constexpr size_t kType{name} = {member.value};\n") file.write("\n") file.write("// For all types, call X(name, bits, mortality, flags)\n") file.write("#define HIR_TYPES(X)") for ty, bits, lifetime in types: flags = [] if lifetime == Lifetime.Top or lifetime == Lifetime.Bottom: flags.append(TypeFlag.HasTrivialMortality) if ty in BUILTIN_PYTYPES: flags.append(TypeFlag.HasUniquePyType) ty_arg = ty + "," lifetime_arg = f"k{lifetime.value}," line = f" X({ty_arg:{max_ty_len}} {bits:#0{max_bits_len}x}UL, {lifetime_arg:{max_lifetime_len}}" tail = " 0)" file.write(f" \\\n{line}") if len(flags) > 0: flags_str = " | ".join([f"kType{f.name}" for f in flags]) flags_line = f" {flags_str})" line_len = len(line) + len(tail) file.write(f" \\\n{flags_line:{line_len}}") else: file.write(tail) file.write("\n\n") file.write(f"constexpr size_t kNumTypeBits = {num_bits};\n") file.write(FOOTER)
null
185,639
import argparse import math import operator import sys from enum import Enum from functools import reduce from typing import Dict, List, NamedTuple, Optional, Set, TextIO, Tuple def parse_args() -> argparse.Namespace: parser = argparse.ArgumentParser(description="Generate type_generated.h") parser.add_argument("output_file", help="Filename to write to.") return parser.parse_args()
null
185,640
try: from cinderx.static import ( set_type_code, TYPED_BOOL, TYPED_CHAR, TYPED_DOUBLE, TYPED_INT16, TYPED_INT32, TYPED_INT64, TYPED_INT8, TYPED_SINGLE, TYPED_UINT16, TYPED_UINT32, TYPED_UINT64, TYPED_UINT8, ) except ImportError: TYPED_INT8 = 0 TYPED_INT16 = 0 TYPED_INT32 = 0 TYPED_INT64 = 0 TYPED_UINT8 = 0 TYPED_UINT16 = 0 TYPED_UINT32 = 0 TYPED_UINT64 = 0 TYPED_DOUBLE = 0 TYPED_SINGLE = 0 TYPED_BOOL = 0 TYPED_CHAR = 0 def set_type_code(func, code): pass def type_code(code: int): def inner(func): set_type_code(func, code) return func return inner
null
185,641
from __future__ import annotations import inspect from typing import Iterable, Mapping, Sequence, Tuple, Type from .type_code import set_type_code, TYPED_INT64 class Enum(metaclass=EnumMeta): def __init__(self, value: object) -> None: self.value = value def __dir__(self) -> Sequence[str]: return ["name", "value"] def __str__(self) -> str: return f"{type(self).__name__}.{self.name}" def __repr__(self) -> str: return f"<{type(self).__name__}.{self.name}: {self.value}>" def __reduce_ex__(self, proto: int) -> Tuple[Type[object], Tuple[object]]: return self.__class__, (self.value,) def eq_method(self: Enum, other: Enum) -> bool: return self.value == other or ( getattr(other, "value", None) is not None and self.value == other.value )
null
185,642
from __future__ import annotations import inspect from typing import Iterable, Mapping, Sequence, Tuple, Type from .type_code import set_type_code, TYPED_INT64 class Enum(metaclass=EnumMeta): def __init__(self, value: object) -> None: self.value = value def __dir__(self) -> Sequence[str]: return ["name", "value"] def __str__(self) -> str: return f"{type(self).__name__}.{self.name}" def __repr__(self) -> str: return f"<{type(self).__name__}.{self.name}: {self.value}>" def __reduce_ex__(self, proto: int) -> Tuple[Type[object], Tuple[object]]: return self.__class__, (self.value,) def hash_method(self: Enum) -> int: return hash(self.value)
null
185,643
from __future__ import annotations import inspect from typing import Iterable, Mapping, Sequence, Tuple, Type from .type_code import set_type_code, TYPED_INT64 class Enum(metaclass=EnumMeta): def __init__(self, value: object) -> None: self.value = value def __dir__(self) -> Sequence[str]: return ["name", "value"] def __str__(self) -> str: return f"{type(self).__name__}.{self.name}" def __repr__(self) -> str: return f"<{type(self).__name__}.{self.name}: {self.value}>" def __reduce_ex__(self, proto: int) -> Tuple[Type[object], Tuple[object]]: return self.__class__, (self.value,) Type = _alias(type, 1, inst=False, name='Type') Type.__doc__ = \ """A special construct usable to annotate class objects. For example, suppose we have the following classes:: class User: ... # Abstract base for User classes class BasicUser(User): ... class ProUser(User): ... class TeamUser(User): ... And a function that takes a class argument that's a subclass of User and returns an instance of the corresponding class:: U = TypeVar('U', bound=User) def new_user(user_class: Type[U]) -> U: user = user_class() # (Here we could write the user object to a database) return user joe = new_user(BasicUser) At this point the type checker knows that joe has type BasicUser. """ The provided code snippet includes necessary dependencies for implementing the `unique` function. Write a Python function `def unique(enumeration: Type[Enum]) -> Type[Enum]` to solve the following problem: Class decorator for enumerations ensuring unique member values Here is the function: def unique(enumeration: Type[Enum]) -> Type[Enum]: """ Class decorator for enumerations ensuring unique member values """ duplicates = [] for name, member in enumeration.__members__.items(): if name != member.name: duplicates.append((name, member.name)) if duplicates: raise ValueError(f"duplicate values found in {enumeration!r}") return enumeration
Class decorator for enumerations ensuring unique member values
185,644
from __future__ import annotations import ctypes from typing import Tuple from cinderx.static import ( resolve_primitive_descr, TYPED_BOOL, TYPED_CHAR, TYPED_DOUBLE, TYPED_INT16, TYPED_INT32, TYPED_INT64, TYPED_INT8, TYPED_UINT16, TYPED_UINT32, TYPED_UINT64, TYPED_UINT8, ) def _create_args( signature: Tuple[Tuple[str, ...], ...], args: Tuple[object] ) -> List[object]: Tuple = _TupleType(tuple, -1, inst=False, name='Tuple') Tuple.__doc__ = \ """Tuple type; Tuple[X, Y] is the cross-product type of X and Y. Example: Tuple[T1, T2] is a tuple of two elements corresponding to type variables T1 and T2. Tuple[int, float, str] is a tuple of an int, a float and a string. To specify a variable-length tuple of homogeneous type, use Tuple[T, ...]. """ def invoke_native( libname: str, symbol: str, signature: Tuple[Tuple[str, ...], ...], args: Tuple[object], ) -> int: # This is basically just a `dlopen()` under the hood lib = ctypes.CDLL(libname) # Python wrapper over `dlsym` fn = getattr(lib, symbol) call_args = _create_args(signature, args) res = fn(*call_args) return res
null
185,645
shadowop = set() cinderxop = set() def init(opname, opmap, hasname, hasjrel, hasjabs, hasconst): def def_op(name, op): opname[op] = name opmap[name] = op cinderxop.add(name) def name_op(name, op): def_op(name, op) hasname.append(op) def jrel_op(name, op): def_op(name, op) hasjrel.append(op) def jabs_op(name, op): def_op(name, op) hasjabs.append(op) def shadow_op(name, op): def_op(name, op) shadowop.add(op) def_op("INVOKE_METHOD", 158) hasconst.append(158) def_op("LOAD_FIELD", 159) hasconst.append(159) def_op("STORE_FIELD", 166) hasconst.append(166) def_op("BUILD_CHECKED_LIST", 168) hasconst.append(168) def_op("LOAD_TYPE", 169) hasconst.append(169) def_op("CAST", 170) hasconst.append(170) def_op("LOAD_LOCAL", 171) hasconst.append(171) def_op("STORE_LOCAL", 172) hasconst.append(172) def_op("PRIMITIVE_BOX", 174) jabs_op("POP_JUMP_IF_ZERO", 175) jabs_op("POP_JUMP_IF_NONZERO", 176) def_op("PRIMITIVE_UNBOX", 177) def_op("PRIMITIVE_BINARY_OP", 178) def_op("PRIMITIVE_UNARY_OP", 179) def_op("PRIMITIVE_COMPARE_OP", 180) def_op("LOAD_ITERABLE_ARG", 181) def_op("LOAD_MAPPING_ARG", 182) def_op("INVOKE_FUNCTION", 183) hasconst.append(183) jabs_op("JUMP_IF_ZERO_OR_POP", 184) jabs_op("JUMP_IF_NONZERO_OR_POP", 185) def_op("FAST_LEN", 186) def_op("CONVERT_PRIMITIVE", 187) def_op("LOAD_CLASS", 190) hasconst.append(190) def_op("INVOKE_NATIVE", 189) hasconst.append(189) def_op("BUILD_CHECKED_MAP", 191) hasconst.append(191) def_op("SEQUENCE_GET", 192) def_op("SEQUENCE_SET", 193) def_op("LIST_DEL", 194) def_op("REFINE_TYPE", 195) hasconst.append(195) def_op("PRIMITIVE_LOAD_CONST", 196) hasconst.append(196) def_op("RETURN_PRIMITIVE", 197) def_op("TP_ALLOC", 200) hasconst.append(200) shadow_op("LOAD_METHOD_UNSHADOWED_METHOD", 205) shadow_op("LOAD_METHOD_TYPE_METHODLIKE", 206) shadow_op("BUILD_CHECKED_LIST_CACHED", 207) shadow_op("TP_ALLOC_CACHED", 208) shadow_op("LOAD_ATTR_S_MODULE", 209) shadow_op("LOAD_METHOD_S_MODULE", 210) shadow_op("INVOKE_FUNCTION_CACHED", 211) shadow_op("INVOKE_FUNCTION_INDIRECT_CACHED", 212) shadow_op("BUILD_CHECKED_MAP_CACHED", 213) shadow_op("PRIMITIVE_STORE_FAST", 215) shadow_op("CAST_CACHED_OPTIONAL", 216) shadow_op("CAST_CACHED", 217) shadow_op("CAST_CACHED_EXACT", 218) shadow_op("CAST_CACHED_OPTIONAL_EXACT", 219) shadow_op("LOAD_PRIMITIVE_FIELD", 220) shadow_op("STORE_PRIMITIVE_FIELD", 221) shadow_op("LOAD_OBJ_FIELD", 222) shadow_op("STORE_OBJ_FIELD", 223) shadow_op("INVOKE_METHOD_CACHED", 224) shadow_op("BINARY_SUBSCR_TUPLE_CONST_INT", 225) shadow_op("BINARY_SUBSCR_DICT_STR", 226) shadow_op("BINARY_SUBSCR_LIST", 227) shadow_op("BINARY_SUBSCR_TUPLE", 228) shadow_op("BINARY_SUBSCR_DICT", 229) shadow_op("LOAD_METHOD_UNCACHABLE", 230) shadow_op("LOAD_METHOD_MODULE", 231) shadow_op("LOAD_METHOD_TYPE", 232) shadow_op("LOAD_METHOD_SPLIT_DICT_DESCR", 233) shadow_op("LOAD_METHOD_SPLIT_DICT_METHOD", 234) shadow_op("LOAD_METHOD_DICT_DESCR", 235) shadow_op("LOAD_METHOD_DICT_METHOD", 236) shadow_op("LOAD_METHOD_NO_DICT_METHOD", 237) shadow_op("LOAD_METHOD_NO_DICT_DESCR", 238) shadow_op("STORE_ATTR_SLOT", 239) shadow_op("STORE_ATTR_SPLIT_DICT", 240) shadow_op("STORE_ATTR_DESCR", 241) shadow_op("STORE_ATTR_UNCACHABLE", 242) shadow_op("STORE_ATTR_DICT", 243) shadow_op("LOAD_ATTR_POLYMORPHIC", 244) shadow_op("LOAD_ATTR_SLOT", 245) shadow_op("LOAD_ATTR_MODULE", 246) shadow_op("LOAD_ATTR_TYPE", 247) shadow_op("LOAD_ATTR_SPLIT_DICT_DESCR", 248) shadow_op("LOAD_ATTR_SPLIT_DICT", 249) shadow_op("LOAD_ATTR_DICT_NO_DESCR", 250) shadow_op("LOAD_ATTR_NO_DICT_DESCR", 251) shadow_op("LOAD_ATTR_DICT_DESCR", 252) shadow_op("LOAD_ATTR_UNCACHABLE", 253) shadow_op("LOAD_GLOBAL_CACHED", 254) shadow_op("SHADOW_NOP", 255)
null
185,646
from __future__ import print_function import ast import os import sys from .consts import ( CO_FUTURE_ANNOTATIONS, SC_CELL, SC_FREE, SC_GLOBAL_EXPLICIT, SC_GLOBAL_IMPLICIT, SC_LOCAL, SC_UNKNOWN, ) from .misc import mangle from .visitor import ASTVisitor def list_eq(l1, l2): return sorted(l1) == sorted(l2)
null
185,647
import ast from typing import Any, Callable, Dict, List, Optional, Type def _format_name(node: ast.Name, level: int) -> str: return node.id
null
185,648
import ast from typing import Any, Callable, Dict, List, Optional, Type PR_CMP = 5 def get_op(node: ast.cmpop) -> str: if isinstance(node, ast.Is): return " is " elif isinstance(node, ast.IsNot): return " is not " elif isinstance(node, ast.In): return " in " elif isinstance(node, ast.NotIn): return " not in " elif isinstance(node, ast.Lt): return " < " elif isinstance(node, ast.Gt): return " > " elif isinstance(node, ast.LtE): return " <= " elif isinstance(node, ast.GtE): return " >= " elif isinstance(node, ast.Eq): return " == " elif isinstance(node, ast.NotEq): return " != " else: return "unknown op: " + type(node).__name__ def parens(level: int, target_lvl: int, value: str) -> str: if level > target_lvl: return f"({value})" return value def to_expr(node: Optional[ast.AST], level=PR_TEST) -> str: if node is None: return "" formatter = _FORMATTERS.get(type(node)) if formatter is not None: return formatter(node, level) return "<unsupported node: " + type(node).__name__ + ">" def _format_compare(node: ast.Compare, level: int) -> str: return parens( level, PR_CMP, to_expr(node.left, PR_CMP + 1) + "".join( ( get_op(op) + to_expr(comp, PR_CMP + 1) for comp, op in zip(node.comparators, node.ops) ) ), )
null
185,649
import ast from typing import Any, Callable, Dict, List, Optional, Type def _format_nameconstant(node: ast.NameConstant, level: int) -> str: if node.value is None: return "None" elif node.value is True: return "True" elif node.value is False: return "False" return "<unknown constant>"
null
185,650
import ast from typing import Any, Callable, Dict, List, Optional, Type def _format_num(node: ast.Num, level: int) -> str: return repr(node.n)
null
185,651
import ast from typing import Any, Callable, Dict, List, Optional, Type def _format_str(node: ast.Str, level: int) -> str: return repr(node.s)
null