after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def begin_epoch(self, epoch):
"""Called at the beginning of each epoch."""
logger.info("begin training epoch {}".format(epoch))
self.lr_step_begin_epoch(epoch)
if self.quantizer is not None:
self.quantizer.begin_epoch(epoch)
# task specific setup per epoch
self.task.begin_epoch(epoch, self.get_model())
if self.tpu:
import torch_xla.core.xla_model as xm
xm.rendezvous("begin_epoch") # wait for all workers
xm.mark_step()
|
def begin_epoch(self, epoch):
"""Called at the beginning of each epoch."""
logger.info("begin training epoch {}".format(epoch))
if self.quantizer is not None:
self.quantizer.begin_epoch(epoch)
# task specific setup per epoch
self.task.begin_epoch(epoch, self.get_model())
if self.tpu:
import torch_xla.core.xla_model as xm
xm.rendezvous("begin_epoch") # wait for all workers
xm.mark_step()
|
https://github.com/pytorch/fairseq/issues/2811
|
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/torch/multiprocessing/spawn.py", line 20, in _wrap
fn(i, *args)
File "/workspace/fairseq/fairseq/distributed_utils.py", line 283, in distributed_main
main(cfg, **kwargs)
File "/workspace/fairseq/fairseq_cli/train.py", line 124, in main
valid_losses, should_stop = train(cfg, trainer, task, epoch_itr)
File "/opt/conda/lib/python3.6/contextlib.py", line 52, in inner
return func(*args, **kwds)
File "/workspace/fairseq/fairseq_cli/train.py", line 202, in train
log_output = trainer.train_step(samples)
File "/opt/conda/lib/python3.6/contextlib.py", line 52, in inner
return func(*args, **kwds)
File "/workspace/fairseq/fairseq/trainer.py", line 459, in train_step
self.zero_grad()
File "/workspace/fairseq/fairseq/trainer.py", line 783, in zero_grad
self.optimizer.zero_grad()
File "/workspace/fairseq/fairseq/optim/fp16_optimizer.py", line 218, in zero_grad
p32.grad.zero_()
AttributeError: 'NoneType' object has no attribute 'zero_'
|
AttributeError
|
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
for p in self.fp16_params:
p.grad = None
if self.has_flat_params:
if torch.is_tensor(self.fp32_params):
self.fp32_params.grad.zero_()
elif isinstance(self.fp32_params, dict):
for fp32_params in self.fp32_params.values():
fp32_params.grad.zero_()
else:
raise ("self.fp32_params must be a tensor or dict")
else:
for p32 in self.fp32_params:
if p32.grad:
p32.grad.zero_()
self._needs_sync = False
if self.scaler is not None:
self._multiply_factor = 1.0 / float(self.scaler.loss_scale)
|
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
for p in self.fp16_params:
p.grad = None
if self.has_flat_params:
if torch.is_tensor(self.fp32_params):
self.fp32_params.grad.zero_()
elif isinstance(self.fp32_params, dict):
for fp32_params in self.fp32_params.values():
fp32_params.grad.zero_()
else:
raise ("self.fp32_params must be a tensor or dict")
else:
for p32 in self.fp32_params:
p32.grad.zero_()
self._needs_sync = False
if self.scaler is not None:
self._multiply_factor = 1.0 / float(self.scaler.loss_scale)
|
https://github.com/pytorch/fairseq/issues/2811
|
Traceback (most recent call last):
File "/opt/conda/lib/python3.6/site-packages/torch/multiprocessing/spawn.py", line 20, in _wrap
fn(i, *args)
File "/workspace/fairseq/fairseq/distributed_utils.py", line 283, in distributed_main
main(cfg, **kwargs)
File "/workspace/fairseq/fairseq_cli/train.py", line 124, in main
valid_losses, should_stop = train(cfg, trainer, task, epoch_itr)
File "/opt/conda/lib/python3.6/contextlib.py", line 52, in inner
return func(*args, **kwds)
File "/workspace/fairseq/fairseq_cli/train.py", line 202, in train
log_output = trainer.train_step(samples)
File "/opt/conda/lib/python3.6/contextlib.py", line 52, in inner
return func(*args, **kwds)
File "/workspace/fairseq/fairseq/trainer.py", line 459, in train_step
self.zero_grad()
File "/workspace/fairseq/fairseq/trainer.py", line 783, in zero_grad
self.optimizer.zero_grad()
File "/workspace/fairseq/fairseq/optim/fp16_optimizer.py", line 218, in zero_grad
p32.grad.zero_()
AttributeError: 'NoneType' object has no attribute 'zero_'
|
AttributeError
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument("tsv")
parser.add_argument("--output-dir", required=True)
parser.add_argument("--output-name", required=True)
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
transcriptions = {}
with (
open(args.tsv, "r") as tsv,
open(os.path.join(args.output_dir, args.output_name + ".ltr"), "w") as ltr_out,
open(os.path.join(args.output_dir, args.output_name + ".wrd"), "w") as wrd_out,
):
root = next(tsv).strip()
for line in tsv:
line = line.strip()
dir = os.path.dirname(line)
if dir not in transcriptions:
parts = dir.split(os.path.sep)
trans_path = f"{parts[-2]}-{parts[-1]}.trans.txt"
path = os.path.join(root, dir, trans_path)
assert os.path.exists(path)
texts = {}
with open(path, "r") as trans_f:
for tline in trans_f:
items = tline.strip().split()
texts[items[0]] = " ".join(items[1:])
transcriptions[dir] = texts
part = os.path.basename(line).split(".")[0]
assert part in transcriptions[dir]
print(transcriptions[dir][part], file=wrd_out)
print(
" ".join(list(transcriptions[dir][part].replace(" ", "|"))) + " |",
file=ltr_out,
)
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument("tsv")
parser.add_argument("--output-dir", required=True)
parser.add_argument("--output-name", required=True)
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
transcriptions = {}
with (
open(args.tsv, "r") as tsv,
open(os.path.join(args.output_dir, args.output_name + ".ltr"), "w") as ltr_out,
open(os.path.join(args.output_dir, args.output_name + ".wrd"), "w") as wrd_out,
):
root = next(tsv).strip()
for line in tsv:
line = line.strip()
dir = os.path.dirname(line)
if dir not in transcriptions:
parts = dir.split("/")
trans_path = f"{parts[-2]}-{parts[-1]}.trans.txt"
path = os.path.join(root, dir, trans_path)
assert os.path.exists(path)
texts = {}
with open(path, "r") as trans_f:
for tline in trans_f:
items = tline.strip().split()
texts[items[0]] = " ".join(items[1:])
transcriptions[dir] = texts
part = os.path.basename(line).split(".")[0]
assert part in transcriptions[dir]
print(transcriptions[dir][part], file=wrd_out)
print(
" ".join(list(transcriptions[dir][part].replace(" ", "|"))) + " |",
file=ltr_out,
)
|
https://github.com/pytorch/fairseq/issues/2744
|
Traceback (most recent call last):
File "libri_labels.py", line 56, in <module>
main()
File "libri_labels.py", line 37, in main
trans_path = f"{parts[-2]}-{parts[-1]}.trans.txt"
IndexError: list index out of range
|
IndexError
|
def forward(
self,
prev_output_tokens,
encoder_out: Optional[EncoderOut] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
full_context_alignment=full_context_alignment,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
)
if not features_only:
x = self.output_layer(x)
return x, extra
|
def forward(
self,
prev_output_tokens,
encoder_out: Optional[EncoderOut] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
features_only: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
src_lengths: Optional[Any] = None,
return_all_hiddens: bool = False,
):
"""
Args:
prev_output_tokens (LongTensor): previous decoder outputs of shape
`(batch, tgt_len)`, for teacher forcing
encoder_out (optional): output from the encoder, used for
encoder-side attention
incremental_state (dict): dictionary used for storing state during
:ref:`Incremental decoding`
features_only (bool, optional): only return features without
applying output layer (default: False).
Returns:
tuple:
- the decoder's output of shape `(batch, tgt_len, vocab)`
- a dictionary with any model-specific outputs
"""
x, extra = self.extract_features(
prev_output_tokens,
encoder_out=encoder_out,
incremental_state=incremental_state,
alignment_layer=alignment_layer,
alignment_heads=alignment_heads,
)
if not features_only:
x = self.output_layer(x)
return x, extra
|
https://github.com/pytorch/fairseq/issues/2673
|
Traceback (most recent call last):
File ".../bin/fairseq-train", line 11, in <module>
load_entry_point('fairseq', 'console_scripts', 'fairseq-train')()
File ".../fairseq_cli/train.py", line 351, in cli_main
distributed_utils.call_main(args, main)
File ".../fairseq/distributed_utils.py", line 254, in call_main
main(args, **kwargs)
File ".../fairseq_cli/train.py", line 125, in main
valid_losses, should_stop = train(args, trainer, task, epoch_itr)
File "/usr/lib64/python3.6/contextlib.py", line 52, in inner
return func(*args, **kwds)
File ".../fairseq_cli/train.py", line 207, in train
log_output = trainer.train_step(samples)
File "/usr/lib64/python3.6/contextlib.py", line 52, in inner
return func(*args, **kwds)
File ".../fairseq/trainer.py", line 479, in train_step
ignore_grad=is_dummy_batch,
File ".../fairseq/tasks/fairseq_task.py", line 408, in train_step
loss, sample_size, logging_output = criterion(model, sample)
File ".../lib64/python3.6/site-packages/torch/nn/modules/module.py", line 722, in _call_impl
result = self.forward(*input, **kwargs)
File ".../fairseq/criterions/label_smoothed_cross_entropy_with_alignment.py", line 36, in forward
net_output = model(**sample['net_input'])
File ".../lib64/python3.6/site-packages/torch/nn/modules/module.py", line 722, in _call_impl
result = self.forward(*input, **kwargs)
File ".../fairseq/models/transformer_align.py", line 51, in forward
return self.forward_decoder(prev_output_tokens, encoder_out)
File ".../fairseq/models/transformer_align.py", line 75, in forward_decoder
**extra_args,
File ".../lib64/python3.6/site-packages/torch/nn/modules/module.py", line 722, in _call_impl
result = self.forward(*input, **kwargs)
TypeError: forward() got an unexpected keyword argument 'full_context_alignment'
|
TypeError
|
def add_args(parser):
# fmt: off
super(TransformerAlignModel, TransformerAlignModel).add_args(parser)
parser.add_argument('--alignment-heads', type=int, metavar='D',
help='Number of cross attention heads per layer to supervised with alignments')
parser.add_argument('--alignment-layer', type=int, metavar='D',
help='Layer number which has to be supervised. 0 corresponding to the bottommost layer.')
parser.add_argument('--full-context-alignment', action='store_true',
help='Whether or not alignment is supervised conditioned on the full target context.')
|
def add_args(parser):
# fmt: off
super(TransformerAlignModel, TransformerAlignModel).add_args(parser)
parser.add_argument('--alignment-heads', type=int, metavar='D',
help='Number of cross attention heads per layer to supervised with alignments')
parser.add_argument('--alignment-layer', type=int, metavar='D',
help='Layer number which has to be supervised. 0 corresponding to the bottommost layer.')
parser.add_argument('--full-context-alignment', type=bool, metavar='D',
help='Whether or not alignment is supervised conditioned on the full target context.')
|
https://github.com/pytorch/fairseq/issues/2673
|
Traceback (most recent call last):
File ".../bin/fairseq-train", line 11, in <module>
load_entry_point('fairseq', 'console_scripts', 'fairseq-train')()
File ".../fairseq_cli/train.py", line 351, in cli_main
distributed_utils.call_main(args, main)
File ".../fairseq/distributed_utils.py", line 254, in call_main
main(args, **kwargs)
File ".../fairseq_cli/train.py", line 125, in main
valid_losses, should_stop = train(args, trainer, task, epoch_itr)
File "/usr/lib64/python3.6/contextlib.py", line 52, in inner
return func(*args, **kwds)
File ".../fairseq_cli/train.py", line 207, in train
log_output = trainer.train_step(samples)
File "/usr/lib64/python3.6/contextlib.py", line 52, in inner
return func(*args, **kwds)
File ".../fairseq/trainer.py", line 479, in train_step
ignore_grad=is_dummy_batch,
File ".../fairseq/tasks/fairseq_task.py", line 408, in train_step
loss, sample_size, logging_output = criterion(model, sample)
File ".../lib64/python3.6/site-packages/torch/nn/modules/module.py", line 722, in _call_impl
result = self.forward(*input, **kwargs)
File ".../fairseq/criterions/label_smoothed_cross_entropy_with_alignment.py", line 36, in forward
net_output = model(**sample['net_input'])
File ".../lib64/python3.6/site-packages/torch/nn/modules/module.py", line 722, in _call_impl
result = self.forward(*input, **kwargs)
File ".../fairseq/models/transformer_align.py", line 51, in forward
return self.forward_decoder(prev_output_tokens, encoder_out)
File ".../fairseq/models/transformer_align.py", line 75, in forward_decoder
**extra_args,
File ".../lib64/python3.6/site-packages/torch/nn/modules/module.py", line 722, in _call_impl
result = self.forward(*input, **kwargs)
TypeError: forward() got an unexpected keyword argument 'full_context_alignment'
|
TypeError
|
def forward_align(self, src_tokens, src_lengths, prev_output_tokens):
avg_attn = None
for model in self.models:
decoder_out = model(src_tokens, src_lengths, prev_output_tokens)
attn = decoder_out[1]["attn"][0]
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(self.models) > 1:
avg_attn.div_(len(self.models))
return avg_attn
|
def forward_align(self, src_tokens, src_lengths, prev_output_tokens):
avg_attn = None
for model in self.models:
decoder_out = model(src_tokens, src_lengths, prev_output_tokens)
attn = decoder_out[1]["attn"]
if avg_attn is None:
avg_attn = attn
else:
avg_attn.add_(attn)
if len(self.models) > 1:
avg_attn.div_(len(self.models))
return avg_attn
|
https://github.com/pytorch/fairseq/issues/2673
|
Traceback (most recent call last):
File ".../bin/fairseq-train", line 11, in <module>
load_entry_point('fairseq', 'console_scripts', 'fairseq-train')()
File ".../fairseq_cli/train.py", line 351, in cli_main
distributed_utils.call_main(args, main)
File ".../fairseq/distributed_utils.py", line 254, in call_main
main(args, **kwargs)
File ".../fairseq_cli/train.py", line 125, in main
valid_losses, should_stop = train(args, trainer, task, epoch_itr)
File "/usr/lib64/python3.6/contextlib.py", line 52, in inner
return func(*args, **kwds)
File ".../fairseq_cli/train.py", line 207, in train
log_output = trainer.train_step(samples)
File "/usr/lib64/python3.6/contextlib.py", line 52, in inner
return func(*args, **kwds)
File ".../fairseq/trainer.py", line 479, in train_step
ignore_grad=is_dummy_batch,
File ".../fairseq/tasks/fairseq_task.py", line 408, in train_step
loss, sample_size, logging_output = criterion(model, sample)
File ".../lib64/python3.6/site-packages/torch/nn/modules/module.py", line 722, in _call_impl
result = self.forward(*input, **kwargs)
File ".../fairseq/criterions/label_smoothed_cross_entropy_with_alignment.py", line 36, in forward
net_output = model(**sample['net_input'])
File ".../lib64/python3.6/site-packages/torch/nn/modules/module.py", line 722, in _call_impl
result = self.forward(*input, **kwargs)
File ".../fairseq/models/transformer_align.py", line 51, in forward
return self.forward_decoder(prev_output_tokens, encoder_out)
File ".../fairseq/models/transformer_align.py", line 75, in forward_decoder
**extra_args,
File ".../lib64/python3.6/site-packages/torch/nn/modules/module.py", line 722, in _call_impl
result = self.forward(*input, **kwargs)
TypeError: forward() got an unexpected keyword argument 'full_context_alignment'
|
TypeError
|
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--layernorm-embedding', action='store_true',
help='add layernorm to embedding')
parser.add_argument('--no-scale-embedding', action='store_true',
help='if True, dont scale embeddings')
# args for "Cross+Self-Attention for Transformer Models" (Peitz et al., 2019)
parser.add_argument('--no-cross-attention', default=False, action='store_true',
help='do not perform cross-attention')
parser.add_argument('--cross-self-attention', default=False, action='store_true',
help='perform cross+self-attention')
# args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019)
parser.add_argument('--encoder-layerdrop', type=float, metavar='D', default=0,
help='LayerDrop probability for encoder')
parser.add_argument('--decoder-layerdrop', type=float, metavar='D', default=0,
help='LayerDrop probability for decoder')
parser.add_argument('--encoder-layers-to-keep', default=None,
help='which layers to *keep* when pruning as a comma-separated list')
parser.add_argument('--decoder-layers-to-keep', default=None,
help='which layers to *keep* when pruning as a comma-separated list')
# args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020)
parser.add_argument('--quant-noise-pq', type=float, metavar='D', default=0,
help='iterative PQ quantization noise at training time')
parser.add_argument('--quant-noise-pq-block-size', type=int, metavar='D', default=8,
help='block size of quantization noise at training time')
parser.add_argument('--quant-noise-scalar', type=float, metavar='D', default=0,
help='scalar quantization noise and scalar quantization at training time')
|
def add_args(parser):
"""Add model-specific arguments to the parser."""
# fmt: off
parser.add_argument('--activation-fn',
choices=utils.get_available_activation_fns(),
help='activation function to use')
parser.add_argument('--dropout', type=float, metavar='D',
help='dropout probability')
parser.add_argument('--attention-dropout', type=float, metavar='D',
help='dropout probability for attention weights')
parser.add_argument('--activation-dropout', '--relu-dropout', type=float, metavar='D',
help='dropout probability after activation in FFN.')
parser.add_argument('--encoder-embed-path', type=str, metavar='STR',
help='path to pre-trained encoder embedding')
parser.add_argument('--encoder-embed-dim', type=int, metavar='N',
help='encoder embedding dimension')
parser.add_argument('--encoder-ffn-embed-dim', type=int, metavar='N',
help='encoder embedding dimension for FFN')
parser.add_argument('--encoder-layers', type=int, metavar='N',
help='num encoder layers')
parser.add_argument('--encoder-attention-heads', type=int, metavar='N',
help='num encoder attention heads')
parser.add_argument('--encoder-normalize-before', action='store_true',
help='apply layernorm before each encoder block')
parser.add_argument('--encoder-learned-pos', action='store_true',
help='use learned positional embeddings in the encoder')
parser.add_argument('--decoder-embed-path', type=str, metavar='STR',
help='path to pre-trained decoder embedding')
parser.add_argument('--decoder-embed-dim', type=int, metavar='N',
help='decoder embedding dimension')
parser.add_argument('--decoder-ffn-embed-dim', type=int, metavar='N',
help='decoder embedding dimension for FFN')
parser.add_argument('--decoder-layers', type=int, metavar='N',
help='num decoder layers')
parser.add_argument('--decoder-attention-heads', type=int, metavar='N',
help='num decoder attention heads')
parser.add_argument('--decoder-learned-pos', action='store_true',
help='use learned positional embeddings in the decoder')
parser.add_argument('--decoder-normalize-before', action='store_true',
help='apply layernorm before each decoder block')
parser.add_argument('--share-decoder-input-output-embed', action='store_true',
help='share decoder input and output embeddings')
parser.add_argument('--share-all-embeddings', action='store_true',
help='share encoder, decoder and output embeddings'
' (requires shared dictionary and embed dim)')
parser.add_argument('--no-token-positional-embeddings', default=False, action='store_true',
help='if set, disables positional embeddings (outside self attention)')
parser.add_argument('--adaptive-softmax-cutoff', metavar='EXPR',
help='comma separated list of adaptive softmax cutoff points. '
'Must be used with adaptive_loss criterion'),
parser.add_argument('--adaptive-softmax-dropout', type=float, metavar='D',
help='sets adaptive softmax dropout for the tail projections')
parser.add_argument('--layernorm-embedding', action='store_true',
help='add layernorm to embedding')
parser.add_argument('--no-scale-embedding', action='store_true',
help='if True, dont scale embeddings')
# args for "Cross+Self-Attention for Transformer Models" (Peitz et al., 2019)
parser.add_argument('--no-cross-attention', default=False, action='store_true',
help='do not perform cross-attention')
parser.add_argument('--cross-self-attention', default=False, action='store_true',
help='perform cross+self-attention')
parser.add_argument('--layer-wise-attention', default=False, action='store_true',
help='perform layer-wise attention (cross-attention or cross+self-attention)')
# args for "Reducing Transformer Depth on Demand with Structured Dropout" (Fan et al., 2019)
parser.add_argument('--encoder-layerdrop', type=float, metavar='D', default=0,
help='LayerDrop probability for encoder')
parser.add_argument('--decoder-layerdrop', type=float, metavar='D', default=0,
help='LayerDrop probability for decoder')
parser.add_argument('--encoder-layers-to-keep', default=None,
help='which layers to *keep* when pruning as a comma-separated list')
parser.add_argument('--decoder-layers-to-keep', default=None,
help='which layers to *keep* when pruning as a comma-separated list')
# args for Training with Quantization Noise for Extreme Model Compression ({Fan*, Stock*} et al., 2020)
parser.add_argument('--quant-noise-pq', type=float, metavar='D', default=0,
help='iterative PQ quantization noise at training time')
parser.add_argument('--quant-noise-pq-block-size', type=int, metavar='D', default=8,
help='block size of quantization noise at training time')
parser.add_argument('--quant-noise-scalar', type=float, metavar='D', default=0,
help='scalar quantization noise and scalar quantization at training time')
|
https://github.com/pytorch/fairseq/issues/2079
|
bash train1.sh
2020-04-29 21:50:54 | INFO | fairseq.distributed_utils | distributed init (rank 1): tcp://localhost:14321
2020-04-29 21:50:54 | INFO | fairseq.distributed_utils | distributed init (rank 0): tcp://localhost:14321
2020-04-29 21:50:55 | INFO | fairseq.distributed_utils | initialized host zixi-MS-7B79 as rank 1
2020-04-29 21:50:55 | INFO | fairseq.distributed_utils | initialized host zixi-MS-7B79 as rank 0
2020-04-29 21:50:57 | INFO | fairseq_cli.train | Namespace(activation_dropout=0.1, activation_fn='relu', adam_betas='(0.9, 0.999)', adam_eps=1e-08, adaptive_input=False, adaptive_softmax_cutoff=None, adaptive_softmax_dropout=0, all_gather_list_size=16384, arch='transformer', attention_dropout=0.1, best_checkpoint_metric='ppl', bpe='subword_nmt', bpe_codes=None, bpe_separator='@@', broadcast_buffers=False, bucket_cap_mb=25, checkpoint_suffix='', clip_norm=25, cpu=False, criterion='cross_entropy', cross_self_attention=False, curriculum=0, data='temp/bpe/bin', data_buffer_size=0, dataset_impl=None, ddp_backend='no_c10d', decoder_attention_heads=4, decoder_embed_dim=256, decoder_embed_path=None, decoder_ffn_embed_dim=256, decoder_input_dim=256, decoder_layerdrop=0.1, decoder_layers=4, decoder_layers_to_keep=None, decoder_learned_pos=False, decoder_normalize_before=True, decoder_output_dim=256, device_id=0, disable_validation=False, distributed_backend='nccl', distributed_init_method='tcp://localhost:14321', distributed_no_spawn=False, distributed_port=-1, distributed_rank=0, distributed_world_size=2, distributed_wrapper='DDP', dropout=0.1, empty_cache_freq=0, encoder_attention_heads=4, encoder_embed_dim=256, encoder_embed_path=None, encoder_ffn_embed_dim=256, encoder_layerdrop=0.1, encoder_layers=4, encoder_layers_to_keep=None, encoder_learned_pos=False, encoder_normalize_before=True, eval_bleu=False, eval_bleu_args=None, eval_bleu_detok='space', eval_bleu_detok_args=None, eval_bleu_print_samples=False, eval_bleu_remove_bpe=None, eval_tokenized_bleu=False, fast_stat_sync=False, find_unused_parameters=False, fix_batches_to_gpus=False, fixed_validation_seed=None, force_anneal=None, fp16=False, fp16_init_scale=128, fp16_no_flatten_grads=False, fp16_scale_tolerance=0.0, fp16_scale_window=None, keep_best_checkpoints=10, keep_interval_updates=-1, keep_last_epochs=-1, layer_wise_attention=False, layernorm_embedding=False, left_pad_source='True', left_pad_target='False', load_alignments=False, localsgd_frequency=3, log_format=None, log_interval=100, lr=[0.005], lr_scheduler='fixed', lr_shrink=0.5, max_epoch=0, max_sentences=None, max_sentences_valid=None, max_source_positions=1024, max_target_positions=1024, max_tokens=5000, max_tokens_valid=5000, max_update=0, maximize_best_checkpoint_metric=False, memory_efficient_fp16=False, min_loss_scale=0.0001, min_lr=-1, model_parallel_size=1, no_cross_attention=False, no_epoch_checkpoints=False, no_last_checkpoints=False, no_progress_bar=False, no_save=False, no_save_optimizer_state=True, no_scale_embedding=False, no_token_positional_embeddings=False, nprocs_per_node=2, num_workers=1, optimizer='adam', optimizer_overrides='{}', patience=8, quant_noise_pq=0, quant_noise_pq_block_size=8, quant_noise_scalar=0, quantization_config_path=None, required_batch_size_multiple=8, reset_dataloader=False, reset_lr_scheduler=False, reset_meters=False, reset_optimizer=False, restore_file='checkpoint_last.pt', save_dir='checkpoints/transformer', save_interval=1, save_interval_updates=0, seed=1, sentence_avg=False, share_all_embeddings=False, share_decoder_input_output_embed=False, skip_invalid_size_inputs_valid_test=False, slowmo_algorithm='LocalSGD', slowmo_momentum=None, source_lang=None, target_lang=None, task='translation', tensorboard_logdir='', threshold_loss_scale=None, tokenizer=None, train_subset='train', truncate_source=False, update_freq=[8], upsample_primary=1, use_bmuf=False, use_old_adam=False, user_dir=None, valid_subset='valid', validate_interval=1, warmup_updates=0, weight_decay=0.0)
2020-04-29 21:50:57 | INFO | fairseq.tasks.translation | [src] dictionary: 48947 types
2020-04-29 21:50:57 | INFO | fairseq.tasks.translation | [tgt] dictionary: 48613 types
2020-04-29 21:50:57 | INFO | fairseq.data.data_utils | loaded 8204 examples from: temp/bpe/bin/valid.src-tgt.src
2020-04-29 21:50:57 | INFO | fairseq.data.data_utils | loaded 8204 examples from: temp/bpe/bin/valid.src-tgt.tgt
2020-04-29 21:50:57 | INFO | fairseq.tasks.translation | temp/bpe/bin valid src-tgt 8204 examples
2020-04-29 21:50:58 | INFO | fairseq_cli.train | TransformerModel(
(encoder): TransformerEncoder(
(embed_tokens): Embedding(48947, 256, padding_idx=1)
(embed_positions): SinusoidalPositionalEmbedding()
(layers): ModuleList(
(0): TransformerEncoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(1): TransformerEncoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(2): TransformerEncoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(3): TransformerEncoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
)
(layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(decoder): TransformerDecoder(
(embed_tokens): Embedding(48613, 256, padding_idx=1)
(embed_positions): SinusoidalPositionalEmbedding()
(layers): ModuleList(
(0): TransformerDecoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(encoder_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(encoder_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(1): TransformerDecoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(encoder_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(encoder_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(2): TransformerDecoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(encoder_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(encoder_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(3): TransformerDecoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(encoder_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(encoder_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
)
(layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(output_projection): Linear(in_features=256, out_features=48613, bias=False)
)
)
2020-04-29 21:50:58 | INFO | fairseq_cli.train | model transformer, criterion CrossEntropyCriterion
2020-04-29 21:50:58 | INFO | fairseq_cli.train | num. model params: 41642240 (num. trained: 41642240)
2020-04-29 21:50:58 | INFO | fairseq_cli.train | training on 2 GPUs
2020-04-29 21:50:58 | INFO | fairseq_cli.train | max tokens per GPU = 5000 and max sentences per GPU = None
2020-04-29 21:50:58 | INFO | fairseq.trainer | no existing checkpoint found checkpoints/transformer/checkpoint_last.pt
2020-04-29 21:50:58 | INFO | fairseq.trainer | loading train data for epoch 1
2020-04-29 21:50:58 | INFO | fairseq.data.data_utils | loaded 1130841 examples from: temp/bpe/bin/train.src-tgt.src
2020-04-29 21:50:58 | INFO | fairseq.data.data_utils | loaded 1130841 examples from: temp/bpe/bin/train.src-tgt.tgt
2020-04-29 21:50:58 | INFO | fairseq.tasks.translation | temp/bpe/bin train src-tgt 1130841 examples
2020-04-29 21:51:05 | INFO | fairseq.trainer | NOTE: your device may support faster training with --fp16
epoch 001: 0%| | 0/210 [00:00<?, ?it/s]/opt/conda/conda-bld/pytorch_1587428398394/work/torch/csrc/utils/python_arg_parser.cpp:756: UserWarning: This overload of add_ is deprecated:
add_(Number alpha, Tensor other)
Consider using one of the following signatures instead:
add_(Tensor other, *, Number alpha)
/opt/conda/conda-bld/pytorch_1587428398394/work/torch/csrc/utils/python_arg_parser.cpp:756: UserWarning: This overload of add_ is deprecated:
add_(Number alpha, Tensor other)
Consider using one of the following signatures instead:
add_(Tensor other, *, Number alpha)
epoch 001 | valid on 'valid' subset | loss 4.067 | ppl 16.77 | wps 119770 | wpb 6793.9 | bsz 341.8 | num_updates 210
epoch 001 | valid on 'valid' subset | loss 4.067 | ppl 16.77 | wps 119685 | wpb 6793.9 | bsz 341.8 | num_updates 210
epoch 001 | loss 6.743 | ppl 107.11 | wps 59759.2 | ups 0.79 | wpb 76004.5 | bsz 5385 | num_updates 210 | lr 0.005 | gnorm 0.564 | clip 0 | train_wall 236 | wall 276
epoch 002: 0%| | 0/210 [00:00<?, ?it/s]2020-04-29 21:55:35 | INFO | fairseq.checkpoint_utils | saved checkpoint checkpoints/transformer/checkpoint1.pt (epoch 1 @ 210 updates, score 16.77) (writing took 0.49035206900043704 seconds)
epoch 001 | loss 6.743 | ppl 107.11 | wps 59649.1 | ups 0.78 | wpb 76004.5 | bsz 5385 | num_updates 210 | lr 0.005 | gnorm 0.564 | clip 0 | train_wall 236 | wall 276
epoch 002 | valid on 'valid' subset | loss 1.464 | ppl 2.76 | wps 121245 | wpb 6793.9 | bsz 341.8 | num_updates 420 | best_ppl 2.76
epoch 002 | valid on 'valid' subset | loss 1.464 | ppl 2.76 | wps 118177 | wpb 6793.9 | bsz 341.8 | num_updates 420 | best_ppl 2.76
epoch 002 | loss 1.986 | ppl 3.96 | wps 57927.6 | ups 0.76 | wpb 76004.5 | bsz 5385 | num_updates 420 | lr 0.005 | gnorm 0.262 | clip 0 | train_wall 243 | wall 551
epoch 003: 0%| | 0/210 [00:00<?, ?it/s]2020-04-29 22:00:13 | INFO | fairseq.checkpoint_utils | saved checkpoint checkpoints/transformer/checkpoint2.pt (epoch 2 @ 420 updates, score 2.76) (writing took 3.3108816809999553 seconds)
epoch 002 | loss 1.986 | ppl 3.96 | wps 57341.3 | ups 0.75 | wpb 76004.5 | bsz 5385 | num_updates 420 | lr 0.005 | gnorm 0.262 | clip 0 | train_wall 243 | wall 555
epoch 003 | valid on 'valid' subset | loss 1.184 | ppl 2.27 | wps 120108 | wpb 6793.9 | bsz 341.8 | num_updates 630 | best_ppl 2.27
epoch 003 | valid on 'valid' subset | loss 1.184 | ppl 2.27 | wps 117797 | wpb 6793.9 | bsz 341.8 | num_updates 630 | best_ppl 2.27
epoch 003 | loss 1.224 | ppl 2.34 | wps 57350.6 | ups 0.75 | wpb 76004.5 | bsz 5385 | num_updates 630 | lr 0.005 | gnorm 0.137 | clip 0 | train_wall 246 | wall 830
epoch 004: 0%| | 0/210 [00:00<?, ?it/s]2020-04-29 22:04:51 | INFO | fairseq.checkpoint_utils | saved checkpoint checkpoints/transformer/checkpoint3.pt (epoch 3 @ 630 updates, score 2.27) (writing took 3.109906989000592 seconds)
epoch 003 | loss 1.224 | ppl 2.34 | wps 57392.5 | ups 0.76 | wpb 76004.5 | bsz 5385 | num_updates 630 | lr 0.005 | gnorm 0.137 | clip 0 | train_wall 243 | wall 833
Traceback (most recent call last):
File "train.py", line 11, in <module>
cli_main()
File "/home/zixi/EE-599/fairseq/fairseq_cli/train.py", line 355, in cli_main
nprocs=args.distributed_world_size,
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 200, in spawn
return start_processes(fn, args, nprocs, join, daemon, start_method='spawn')
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 158, in start_processes
while not context.join():
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 119, in join
raise Exception(msg)
Exception:
-- Process 1 terminated with the following error:
Traceback (most recent call last):
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 20, in _wrap
fn(i, *args)
File "/home/zixi/EE-599/fairseq/fairseq_cli/train.py", line 324, in distributed_main
main(args, init_distributed=True)
File "/home/zixi/EE-599/fairseq/fairseq_cli/train.py", line 117, in main
valid_losses = train(args, trainer, task, epoch_itr, max_update)
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/home/zixi/EE-599/fairseq/fairseq_cli/train.py", line 187, in train
log_output = trainer.train_step(samples)
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/home/zixi/EE-599/fairseq/fairseq/trainer.py", line 379, in train_step
ignore_grad=is_dummy_batch,
File "/home/zixi/EE-599/fairseq/fairseq/tasks/fairseq_task.py", line 341, in train_step
loss, sample_size, logging_output = criterion(model, sample)
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/zixi/EE-599/fairseq/fairseq/criterions/cross_entropy.py", line 29, in forward
net_output = model(**sample['net_input'])
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/zixi/EE-599/fairseq/fairseq/legacy_distributed_data_parallel.py", line 86, in forward
return self.module(*inputs, **kwargs)
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/zixi/EE-599/fairseq/fairseq/models/transformer.py", line 272, in forward
return_all_hiddens=return_all_hiddens,
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/zixi/EE-599/fairseq/fairseq/models/transformer.py", line 498, in forward
encoder_states[-1] = x
IndexError: list assignment index out of range
|
IndexError
|
def __init__(self, args, dictionary, embed_tokens):
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
self.dropout = args.dropout
self.encoder_layerdrop = args.encoder_layerdrop
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
self.embed_positions = (
PositionalEmbedding(
args.max_source_positions,
embed_dim,
self.padding_idx,
learned=args.encoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
if not args.adaptive_input and args.quant_noise_pq > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(embed_dim, embed_dim, bias=False),
args.quant_noise_pq,
args.quant_noise_pq_block_size,
)
else:
self.quant_noise = None
if self.encoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.encoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend(
[self.build_encoder_layer(args) for i in range(args.encoder_layers)]
)
self.num_layers = len(self.layers)
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
if getattr(args, "layernorm_embedding", False):
self.layernorm_embedding = LayerNorm(embed_dim)
else:
self.layernorm_embedding = None
|
def __init__(self, args, dictionary, embed_tokens):
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
self.dropout = args.dropout
self.encoder_layerdrop = args.encoder_layerdrop
embed_dim = embed_tokens.embedding_dim
self.padding_idx = embed_tokens.padding_idx
self.max_source_positions = args.max_source_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
self.embed_positions = (
PositionalEmbedding(
args.max_source_positions,
embed_dim,
self.padding_idx,
learned=args.encoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
if not args.adaptive_input and args.quant_noise_pq > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(embed_dim, embed_dim, bias=False),
args.quant_noise_pq,
args.quant_noise_pq_block_size,
)
else:
self.quant_noise = None
self.layer_wise_attention = getattr(args, "layer_wise_attention", False)
self.layers = nn.ModuleList([])
self.layers.extend(
[self.build_encoder_layer(args) for i in range(args.encoder_layers)]
)
self.num_layers = len(self.layers)
if args.encoder_normalize_before:
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
if getattr(args, "layernorm_embedding", False):
self.layernorm_embedding = LayerNorm(embed_dim)
else:
self.layernorm_embedding = None
|
https://github.com/pytorch/fairseq/issues/2079
|
bash train1.sh
2020-04-29 21:50:54 | INFO | fairseq.distributed_utils | distributed init (rank 1): tcp://localhost:14321
2020-04-29 21:50:54 | INFO | fairseq.distributed_utils | distributed init (rank 0): tcp://localhost:14321
2020-04-29 21:50:55 | INFO | fairseq.distributed_utils | initialized host zixi-MS-7B79 as rank 1
2020-04-29 21:50:55 | INFO | fairseq.distributed_utils | initialized host zixi-MS-7B79 as rank 0
2020-04-29 21:50:57 | INFO | fairseq_cli.train | Namespace(activation_dropout=0.1, activation_fn='relu', adam_betas='(0.9, 0.999)', adam_eps=1e-08, adaptive_input=False, adaptive_softmax_cutoff=None, adaptive_softmax_dropout=0, all_gather_list_size=16384, arch='transformer', attention_dropout=0.1, best_checkpoint_metric='ppl', bpe='subword_nmt', bpe_codes=None, bpe_separator='@@', broadcast_buffers=False, bucket_cap_mb=25, checkpoint_suffix='', clip_norm=25, cpu=False, criterion='cross_entropy', cross_self_attention=False, curriculum=0, data='temp/bpe/bin', data_buffer_size=0, dataset_impl=None, ddp_backend='no_c10d', decoder_attention_heads=4, decoder_embed_dim=256, decoder_embed_path=None, decoder_ffn_embed_dim=256, decoder_input_dim=256, decoder_layerdrop=0.1, decoder_layers=4, decoder_layers_to_keep=None, decoder_learned_pos=False, decoder_normalize_before=True, decoder_output_dim=256, device_id=0, disable_validation=False, distributed_backend='nccl', distributed_init_method='tcp://localhost:14321', distributed_no_spawn=False, distributed_port=-1, distributed_rank=0, distributed_world_size=2, distributed_wrapper='DDP', dropout=0.1, empty_cache_freq=0, encoder_attention_heads=4, encoder_embed_dim=256, encoder_embed_path=None, encoder_ffn_embed_dim=256, encoder_layerdrop=0.1, encoder_layers=4, encoder_layers_to_keep=None, encoder_learned_pos=False, encoder_normalize_before=True, eval_bleu=False, eval_bleu_args=None, eval_bleu_detok='space', eval_bleu_detok_args=None, eval_bleu_print_samples=False, eval_bleu_remove_bpe=None, eval_tokenized_bleu=False, fast_stat_sync=False, find_unused_parameters=False, fix_batches_to_gpus=False, fixed_validation_seed=None, force_anneal=None, fp16=False, fp16_init_scale=128, fp16_no_flatten_grads=False, fp16_scale_tolerance=0.0, fp16_scale_window=None, keep_best_checkpoints=10, keep_interval_updates=-1, keep_last_epochs=-1, layer_wise_attention=False, layernorm_embedding=False, left_pad_source='True', left_pad_target='False', load_alignments=False, localsgd_frequency=3, log_format=None, log_interval=100, lr=[0.005], lr_scheduler='fixed', lr_shrink=0.5, max_epoch=0, max_sentences=None, max_sentences_valid=None, max_source_positions=1024, max_target_positions=1024, max_tokens=5000, max_tokens_valid=5000, max_update=0, maximize_best_checkpoint_metric=False, memory_efficient_fp16=False, min_loss_scale=0.0001, min_lr=-1, model_parallel_size=1, no_cross_attention=False, no_epoch_checkpoints=False, no_last_checkpoints=False, no_progress_bar=False, no_save=False, no_save_optimizer_state=True, no_scale_embedding=False, no_token_positional_embeddings=False, nprocs_per_node=2, num_workers=1, optimizer='adam', optimizer_overrides='{}', patience=8, quant_noise_pq=0, quant_noise_pq_block_size=8, quant_noise_scalar=0, quantization_config_path=None, required_batch_size_multiple=8, reset_dataloader=False, reset_lr_scheduler=False, reset_meters=False, reset_optimizer=False, restore_file='checkpoint_last.pt', save_dir='checkpoints/transformer', save_interval=1, save_interval_updates=0, seed=1, sentence_avg=False, share_all_embeddings=False, share_decoder_input_output_embed=False, skip_invalid_size_inputs_valid_test=False, slowmo_algorithm='LocalSGD', slowmo_momentum=None, source_lang=None, target_lang=None, task='translation', tensorboard_logdir='', threshold_loss_scale=None, tokenizer=None, train_subset='train', truncate_source=False, update_freq=[8], upsample_primary=1, use_bmuf=False, use_old_adam=False, user_dir=None, valid_subset='valid', validate_interval=1, warmup_updates=0, weight_decay=0.0)
2020-04-29 21:50:57 | INFO | fairseq.tasks.translation | [src] dictionary: 48947 types
2020-04-29 21:50:57 | INFO | fairseq.tasks.translation | [tgt] dictionary: 48613 types
2020-04-29 21:50:57 | INFO | fairseq.data.data_utils | loaded 8204 examples from: temp/bpe/bin/valid.src-tgt.src
2020-04-29 21:50:57 | INFO | fairseq.data.data_utils | loaded 8204 examples from: temp/bpe/bin/valid.src-tgt.tgt
2020-04-29 21:50:57 | INFO | fairseq.tasks.translation | temp/bpe/bin valid src-tgt 8204 examples
2020-04-29 21:50:58 | INFO | fairseq_cli.train | TransformerModel(
(encoder): TransformerEncoder(
(embed_tokens): Embedding(48947, 256, padding_idx=1)
(embed_positions): SinusoidalPositionalEmbedding()
(layers): ModuleList(
(0): TransformerEncoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(1): TransformerEncoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(2): TransformerEncoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(3): TransformerEncoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
)
(layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(decoder): TransformerDecoder(
(embed_tokens): Embedding(48613, 256, padding_idx=1)
(embed_positions): SinusoidalPositionalEmbedding()
(layers): ModuleList(
(0): TransformerDecoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(encoder_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(encoder_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(1): TransformerDecoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(encoder_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(encoder_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(2): TransformerDecoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(encoder_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(encoder_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(3): TransformerDecoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(encoder_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(encoder_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
)
(layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(output_projection): Linear(in_features=256, out_features=48613, bias=False)
)
)
2020-04-29 21:50:58 | INFO | fairseq_cli.train | model transformer, criterion CrossEntropyCriterion
2020-04-29 21:50:58 | INFO | fairseq_cli.train | num. model params: 41642240 (num. trained: 41642240)
2020-04-29 21:50:58 | INFO | fairseq_cli.train | training on 2 GPUs
2020-04-29 21:50:58 | INFO | fairseq_cli.train | max tokens per GPU = 5000 and max sentences per GPU = None
2020-04-29 21:50:58 | INFO | fairseq.trainer | no existing checkpoint found checkpoints/transformer/checkpoint_last.pt
2020-04-29 21:50:58 | INFO | fairseq.trainer | loading train data for epoch 1
2020-04-29 21:50:58 | INFO | fairseq.data.data_utils | loaded 1130841 examples from: temp/bpe/bin/train.src-tgt.src
2020-04-29 21:50:58 | INFO | fairseq.data.data_utils | loaded 1130841 examples from: temp/bpe/bin/train.src-tgt.tgt
2020-04-29 21:50:58 | INFO | fairseq.tasks.translation | temp/bpe/bin train src-tgt 1130841 examples
2020-04-29 21:51:05 | INFO | fairseq.trainer | NOTE: your device may support faster training with --fp16
epoch 001: 0%| | 0/210 [00:00<?, ?it/s]/opt/conda/conda-bld/pytorch_1587428398394/work/torch/csrc/utils/python_arg_parser.cpp:756: UserWarning: This overload of add_ is deprecated:
add_(Number alpha, Tensor other)
Consider using one of the following signatures instead:
add_(Tensor other, *, Number alpha)
/opt/conda/conda-bld/pytorch_1587428398394/work/torch/csrc/utils/python_arg_parser.cpp:756: UserWarning: This overload of add_ is deprecated:
add_(Number alpha, Tensor other)
Consider using one of the following signatures instead:
add_(Tensor other, *, Number alpha)
epoch 001 | valid on 'valid' subset | loss 4.067 | ppl 16.77 | wps 119770 | wpb 6793.9 | bsz 341.8 | num_updates 210
epoch 001 | valid on 'valid' subset | loss 4.067 | ppl 16.77 | wps 119685 | wpb 6793.9 | bsz 341.8 | num_updates 210
epoch 001 | loss 6.743 | ppl 107.11 | wps 59759.2 | ups 0.79 | wpb 76004.5 | bsz 5385 | num_updates 210 | lr 0.005 | gnorm 0.564 | clip 0 | train_wall 236 | wall 276
epoch 002: 0%| | 0/210 [00:00<?, ?it/s]2020-04-29 21:55:35 | INFO | fairseq.checkpoint_utils | saved checkpoint checkpoints/transformer/checkpoint1.pt (epoch 1 @ 210 updates, score 16.77) (writing took 0.49035206900043704 seconds)
epoch 001 | loss 6.743 | ppl 107.11 | wps 59649.1 | ups 0.78 | wpb 76004.5 | bsz 5385 | num_updates 210 | lr 0.005 | gnorm 0.564 | clip 0 | train_wall 236 | wall 276
epoch 002 | valid on 'valid' subset | loss 1.464 | ppl 2.76 | wps 121245 | wpb 6793.9 | bsz 341.8 | num_updates 420 | best_ppl 2.76
epoch 002 | valid on 'valid' subset | loss 1.464 | ppl 2.76 | wps 118177 | wpb 6793.9 | bsz 341.8 | num_updates 420 | best_ppl 2.76
epoch 002 | loss 1.986 | ppl 3.96 | wps 57927.6 | ups 0.76 | wpb 76004.5 | bsz 5385 | num_updates 420 | lr 0.005 | gnorm 0.262 | clip 0 | train_wall 243 | wall 551
epoch 003: 0%| | 0/210 [00:00<?, ?it/s]2020-04-29 22:00:13 | INFO | fairseq.checkpoint_utils | saved checkpoint checkpoints/transformer/checkpoint2.pt (epoch 2 @ 420 updates, score 2.76) (writing took 3.3108816809999553 seconds)
epoch 002 | loss 1.986 | ppl 3.96 | wps 57341.3 | ups 0.75 | wpb 76004.5 | bsz 5385 | num_updates 420 | lr 0.005 | gnorm 0.262 | clip 0 | train_wall 243 | wall 555
epoch 003 | valid on 'valid' subset | loss 1.184 | ppl 2.27 | wps 120108 | wpb 6793.9 | bsz 341.8 | num_updates 630 | best_ppl 2.27
epoch 003 | valid on 'valid' subset | loss 1.184 | ppl 2.27 | wps 117797 | wpb 6793.9 | bsz 341.8 | num_updates 630 | best_ppl 2.27
epoch 003 | loss 1.224 | ppl 2.34 | wps 57350.6 | ups 0.75 | wpb 76004.5 | bsz 5385 | num_updates 630 | lr 0.005 | gnorm 0.137 | clip 0 | train_wall 246 | wall 830
epoch 004: 0%| | 0/210 [00:00<?, ?it/s]2020-04-29 22:04:51 | INFO | fairseq.checkpoint_utils | saved checkpoint checkpoints/transformer/checkpoint3.pt (epoch 3 @ 630 updates, score 2.27) (writing took 3.109906989000592 seconds)
epoch 003 | loss 1.224 | ppl 2.34 | wps 57392.5 | ups 0.76 | wpb 76004.5 | bsz 5385 | num_updates 630 | lr 0.005 | gnorm 0.137 | clip 0 | train_wall 243 | wall 833
Traceback (most recent call last):
File "train.py", line 11, in <module>
cli_main()
File "/home/zixi/EE-599/fairseq/fairseq_cli/train.py", line 355, in cli_main
nprocs=args.distributed_world_size,
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 200, in spawn
return start_processes(fn, args, nprocs, join, daemon, start_method='spawn')
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 158, in start_processes
while not context.join():
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 119, in join
raise Exception(msg)
Exception:
-- Process 1 terminated with the following error:
Traceback (most recent call last):
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 20, in _wrap
fn(i, *args)
File "/home/zixi/EE-599/fairseq/fairseq_cli/train.py", line 324, in distributed_main
main(args, init_distributed=True)
File "/home/zixi/EE-599/fairseq/fairseq_cli/train.py", line 117, in main
valid_losses = train(args, trainer, task, epoch_itr, max_update)
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/home/zixi/EE-599/fairseq/fairseq_cli/train.py", line 187, in train
log_output = trainer.train_step(samples)
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/home/zixi/EE-599/fairseq/fairseq/trainer.py", line 379, in train_step
ignore_grad=is_dummy_batch,
File "/home/zixi/EE-599/fairseq/fairseq/tasks/fairseq_task.py", line 341, in train_step
loss, sample_size, logging_output = criterion(model, sample)
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/zixi/EE-599/fairseq/fairseq/criterions/cross_entropy.py", line 29, in forward
net_output = model(**sample['net_input'])
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/zixi/EE-599/fairseq/fairseq/legacy_distributed_data_parallel.py", line 86, in forward
return self.module(*inputs, **kwargs)
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/zixi/EE-599/fairseq/fairseq/models/transformer.py", line 272, in forward
return_all_hiddens=return_all_hiddens,
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/zixi/EE-599/fairseq/fairseq/models/transformer.py", line 498, in forward
encoder_states[-1] = x
IndexError: list assignment index out of range
|
IndexError
|
def forward(
self,
src_tokens,
src_lengths,
cls_input: Optional[Tensor] = None,
return_all_hiddens: bool = False,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
Returns:
namedtuple:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
x, encoder_embedding = self.forward_embedding(src_tokens)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
encoder_states = [] if return_all_hiddens else None
# encoder layers
for layer in self.layers:
x = layer(x, encoder_padding_mask)
if return_all_hiddens:
assert encoder_states is not None
encoder_states.append(x)
if self.layer_norm is not None:
x = self.layer_norm(x)
return EncoderOut(
encoder_out=x, # T x B x C
encoder_padding_mask=encoder_padding_mask, # B x T
encoder_embedding=encoder_embedding, # B x T x C
encoder_states=encoder_states, # List[T x B x C]
src_tokens=None,
src_lengths=None,
)
|
def forward(
self,
src_tokens,
src_lengths,
cls_input: Optional[Tensor] = None,
return_all_hiddens: bool = False,
):
"""
Args:
src_tokens (LongTensor): tokens in the source language of shape
`(batch, src_len)`
src_lengths (torch.LongTensor): lengths of each source sentence of
shape `(batch)`
return_all_hiddens (bool, optional): also return all of the
intermediate hidden states (default: False).
Returns:
namedtuple:
- **encoder_out** (Tensor): the last encoder layer's output of
shape `(src_len, batch, embed_dim)`
- **encoder_padding_mask** (ByteTensor): the positions of
padding elements of shape `(batch, src_len)`
- **encoder_embedding** (Tensor): the (scaled) embedding lookup
of shape `(batch, src_len, embed_dim)`
- **encoder_states** (List[Tensor]): all intermediate
hidden states of shape `(src_len, batch, embed_dim)`.
Only populated if *return_all_hiddens* is True.
"""
if self.layer_wise_attention:
return_all_hiddens = True
x, encoder_embedding = self.forward_embedding(src_tokens)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
# compute padding mask
encoder_padding_mask = src_tokens.eq(self.padding_idx)
encoder_states = [] if return_all_hiddens else None
# encoder layers
for layer in self.layers:
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = torch.empty(1).uniform_()
if not self.training or (dropout_probability > self.encoder_layerdrop):
x = layer(x, encoder_padding_mask)
if return_all_hiddens:
assert encoder_states is not None
encoder_states.append(x)
if self.layer_norm is not None:
x = self.layer_norm(x)
if return_all_hiddens:
encoder_states[-1] = x
return EncoderOut(
encoder_out=x, # T x B x C
encoder_padding_mask=encoder_padding_mask, # B x T
encoder_embedding=encoder_embedding, # B x T x C
encoder_states=encoder_states, # List[T x B x C]
src_tokens=None,
src_lengths=None,
)
|
https://github.com/pytorch/fairseq/issues/2079
|
bash train1.sh
2020-04-29 21:50:54 | INFO | fairseq.distributed_utils | distributed init (rank 1): tcp://localhost:14321
2020-04-29 21:50:54 | INFO | fairseq.distributed_utils | distributed init (rank 0): tcp://localhost:14321
2020-04-29 21:50:55 | INFO | fairseq.distributed_utils | initialized host zixi-MS-7B79 as rank 1
2020-04-29 21:50:55 | INFO | fairseq.distributed_utils | initialized host zixi-MS-7B79 as rank 0
2020-04-29 21:50:57 | INFO | fairseq_cli.train | Namespace(activation_dropout=0.1, activation_fn='relu', adam_betas='(0.9, 0.999)', adam_eps=1e-08, adaptive_input=False, adaptive_softmax_cutoff=None, adaptive_softmax_dropout=0, all_gather_list_size=16384, arch='transformer', attention_dropout=0.1, best_checkpoint_metric='ppl', bpe='subword_nmt', bpe_codes=None, bpe_separator='@@', broadcast_buffers=False, bucket_cap_mb=25, checkpoint_suffix='', clip_norm=25, cpu=False, criterion='cross_entropy', cross_self_attention=False, curriculum=0, data='temp/bpe/bin', data_buffer_size=0, dataset_impl=None, ddp_backend='no_c10d', decoder_attention_heads=4, decoder_embed_dim=256, decoder_embed_path=None, decoder_ffn_embed_dim=256, decoder_input_dim=256, decoder_layerdrop=0.1, decoder_layers=4, decoder_layers_to_keep=None, decoder_learned_pos=False, decoder_normalize_before=True, decoder_output_dim=256, device_id=0, disable_validation=False, distributed_backend='nccl', distributed_init_method='tcp://localhost:14321', distributed_no_spawn=False, distributed_port=-1, distributed_rank=0, distributed_world_size=2, distributed_wrapper='DDP', dropout=0.1, empty_cache_freq=0, encoder_attention_heads=4, encoder_embed_dim=256, encoder_embed_path=None, encoder_ffn_embed_dim=256, encoder_layerdrop=0.1, encoder_layers=4, encoder_layers_to_keep=None, encoder_learned_pos=False, encoder_normalize_before=True, eval_bleu=False, eval_bleu_args=None, eval_bleu_detok='space', eval_bleu_detok_args=None, eval_bleu_print_samples=False, eval_bleu_remove_bpe=None, eval_tokenized_bleu=False, fast_stat_sync=False, find_unused_parameters=False, fix_batches_to_gpus=False, fixed_validation_seed=None, force_anneal=None, fp16=False, fp16_init_scale=128, fp16_no_flatten_grads=False, fp16_scale_tolerance=0.0, fp16_scale_window=None, keep_best_checkpoints=10, keep_interval_updates=-1, keep_last_epochs=-1, layer_wise_attention=False, layernorm_embedding=False, left_pad_source='True', left_pad_target='False', load_alignments=False, localsgd_frequency=3, log_format=None, log_interval=100, lr=[0.005], lr_scheduler='fixed', lr_shrink=0.5, max_epoch=0, max_sentences=None, max_sentences_valid=None, max_source_positions=1024, max_target_positions=1024, max_tokens=5000, max_tokens_valid=5000, max_update=0, maximize_best_checkpoint_metric=False, memory_efficient_fp16=False, min_loss_scale=0.0001, min_lr=-1, model_parallel_size=1, no_cross_attention=False, no_epoch_checkpoints=False, no_last_checkpoints=False, no_progress_bar=False, no_save=False, no_save_optimizer_state=True, no_scale_embedding=False, no_token_positional_embeddings=False, nprocs_per_node=2, num_workers=1, optimizer='adam', optimizer_overrides='{}', patience=8, quant_noise_pq=0, quant_noise_pq_block_size=8, quant_noise_scalar=0, quantization_config_path=None, required_batch_size_multiple=8, reset_dataloader=False, reset_lr_scheduler=False, reset_meters=False, reset_optimizer=False, restore_file='checkpoint_last.pt', save_dir='checkpoints/transformer', save_interval=1, save_interval_updates=0, seed=1, sentence_avg=False, share_all_embeddings=False, share_decoder_input_output_embed=False, skip_invalid_size_inputs_valid_test=False, slowmo_algorithm='LocalSGD', slowmo_momentum=None, source_lang=None, target_lang=None, task='translation', tensorboard_logdir='', threshold_loss_scale=None, tokenizer=None, train_subset='train', truncate_source=False, update_freq=[8], upsample_primary=1, use_bmuf=False, use_old_adam=False, user_dir=None, valid_subset='valid', validate_interval=1, warmup_updates=0, weight_decay=0.0)
2020-04-29 21:50:57 | INFO | fairseq.tasks.translation | [src] dictionary: 48947 types
2020-04-29 21:50:57 | INFO | fairseq.tasks.translation | [tgt] dictionary: 48613 types
2020-04-29 21:50:57 | INFO | fairseq.data.data_utils | loaded 8204 examples from: temp/bpe/bin/valid.src-tgt.src
2020-04-29 21:50:57 | INFO | fairseq.data.data_utils | loaded 8204 examples from: temp/bpe/bin/valid.src-tgt.tgt
2020-04-29 21:50:57 | INFO | fairseq.tasks.translation | temp/bpe/bin valid src-tgt 8204 examples
2020-04-29 21:50:58 | INFO | fairseq_cli.train | TransformerModel(
(encoder): TransformerEncoder(
(embed_tokens): Embedding(48947, 256, padding_idx=1)
(embed_positions): SinusoidalPositionalEmbedding()
(layers): ModuleList(
(0): TransformerEncoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(1): TransformerEncoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(2): TransformerEncoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(3): TransformerEncoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
)
(layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(decoder): TransformerDecoder(
(embed_tokens): Embedding(48613, 256, padding_idx=1)
(embed_positions): SinusoidalPositionalEmbedding()
(layers): ModuleList(
(0): TransformerDecoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(encoder_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(encoder_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(1): TransformerDecoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(encoder_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(encoder_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(2): TransformerDecoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(encoder_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(encoder_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(3): TransformerDecoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(encoder_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(encoder_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
)
(layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(output_projection): Linear(in_features=256, out_features=48613, bias=False)
)
)
2020-04-29 21:50:58 | INFO | fairseq_cli.train | model transformer, criterion CrossEntropyCriterion
2020-04-29 21:50:58 | INFO | fairseq_cli.train | num. model params: 41642240 (num. trained: 41642240)
2020-04-29 21:50:58 | INFO | fairseq_cli.train | training on 2 GPUs
2020-04-29 21:50:58 | INFO | fairseq_cli.train | max tokens per GPU = 5000 and max sentences per GPU = None
2020-04-29 21:50:58 | INFO | fairseq.trainer | no existing checkpoint found checkpoints/transformer/checkpoint_last.pt
2020-04-29 21:50:58 | INFO | fairseq.trainer | loading train data for epoch 1
2020-04-29 21:50:58 | INFO | fairseq.data.data_utils | loaded 1130841 examples from: temp/bpe/bin/train.src-tgt.src
2020-04-29 21:50:58 | INFO | fairseq.data.data_utils | loaded 1130841 examples from: temp/bpe/bin/train.src-tgt.tgt
2020-04-29 21:50:58 | INFO | fairseq.tasks.translation | temp/bpe/bin train src-tgt 1130841 examples
2020-04-29 21:51:05 | INFO | fairseq.trainer | NOTE: your device may support faster training with --fp16
epoch 001: 0%| | 0/210 [00:00<?, ?it/s]/opt/conda/conda-bld/pytorch_1587428398394/work/torch/csrc/utils/python_arg_parser.cpp:756: UserWarning: This overload of add_ is deprecated:
add_(Number alpha, Tensor other)
Consider using one of the following signatures instead:
add_(Tensor other, *, Number alpha)
/opt/conda/conda-bld/pytorch_1587428398394/work/torch/csrc/utils/python_arg_parser.cpp:756: UserWarning: This overload of add_ is deprecated:
add_(Number alpha, Tensor other)
Consider using one of the following signatures instead:
add_(Tensor other, *, Number alpha)
epoch 001 | valid on 'valid' subset | loss 4.067 | ppl 16.77 | wps 119770 | wpb 6793.9 | bsz 341.8 | num_updates 210
epoch 001 | valid on 'valid' subset | loss 4.067 | ppl 16.77 | wps 119685 | wpb 6793.9 | bsz 341.8 | num_updates 210
epoch 001 | loss 6.743 | ppl 107.11 | wps 59759.2 | ups 0.79 | wpb 76004.5 | bsz 5385 | num_updates 210 | lr 0.005 | gnorm 0.564 | clip 0 | train_wall 236 | wall 276
epoch 002: 0%| | 0/210 [00:00<?, ?it/s]2020-04-29 21:55:35 | INFO | fairseq.checkpoint_utils | saved checkpoint checkpoints/transformer/checkpoint1.pt (epoch 1 @ 210 updates, score 16.77) (writing took 0.49035206900043704 seconds)
epoch 001 | loss 6.743 | ppl 107.11 | wps 59649.1 | ups 0.78 | wpb 76004.5 | bsz 5385 | num_updates 210 | lr 0.005 | gnorm 0.564 | clip 0 | train_wall 236 | wall 276
epoch 002 | valid on 'valid' subset | loss 1.464 | ppl 2.76 | wps 121245 | wpb 6793.9 | bsz 341.8 | num_updates 420 | best_ppl 2.76
epoch 002 | valid on 'valid' subset | loss 1.464 | ppl 2.76 | wps 118177 | wpb 6793.9 | bsz 341.8 | num_updates 420 | best_ppl 2.76
epoch 002 | loss 1.986 | ppl 3.96 | wps 57927.6 | ups 0.76 | wpb 76004.5 | bsz 5385 | num_updates 420 | lr 0.005 | gnorm 0.262 | clip 0 | train_wall 243 | wall 551
epoch 003: 0%| | 0/210 [00:00<?, ?it/s]2020-04-29 22:00:13 | INFO | fairseq.checkpoint_utils | saved checkpoint checkpoints/transformer/checkpoint2.pt (epoch 2 @ 420 updates, score 2.76) (writing took 3.3108816809999553 seconds)
epoch 002 | loss 1.986 | ppl 3.96 | wps 57341.3 | ups 0.75 | wpb 76004.5 | bsz 5385 | num_updates 420 | lr 0.005 | gnorm 0.262 | clip 0 | train_wall 243 | wall 555
epoch 003 | valid on 'valid' subset | loss 1.184 | ppl 2.27 | wps 120108 | wpb 6793.9 | bsz 341.8 | num_updates 630 | best_ppl 2.27
epoch 003 | valid on 'valid' subset | loss 1.184 | ppl 2.27 | wps 117797 | wpb 6793.9 | bsz 341.8 | num_updates 630 | best_ppl 2.27
epoch 003 | loss 1.224 | ppl 2.34 | wps 57350.6 | ups 0.75 | wpb 76004.5 | bsz 5385 | num_updates 630 | lr 0.005 | gnorm 0.137 | clip 0 | train_wall 246 | wall 830
epoch 004: 0%| | 0/210 [00:00<?, ?it/s]2020-04-29 22:04:51 | INFO | fairseq.checkpoint_utils | saved checkpoint checkpoints/transformer/checkpoint3.pt (epoch 3 @ 630 updates, score 2.27) (writing took 3.109906989000592 seconds)
epoch 003 | loss 1.224 | ppl 2.34 | wps 57392.5 | ups 0.76 | wpb 76004.5 | bsz 5385 | num_updates 630 | lr 0.005 | gnorm 0.137 | clip 0 | train_wall 243 | wall 833
Traceback (most recent call last):
File "train.py", line 11, in <module>
cli_main()
File "/home/zixi/EE-599/fairseq/fairseq_cli/train.py", line 355, in cli_main
nprocs=args.distributed_world_size,
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 200, in spawn
return start_processes(fn, args, nprocs, join, daemon, start_method='spawn')
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 158, in start_processes
while not context.join():
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 119, in join
raise Exception(msg)
Exception:
-- Process 1 terminated with the following error:
Traceback (most recent call last):
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 20, in _wrap
fn(i, *args)
File "/home/zixi/EE-599/fairseq/fairseq_cli/train.py", line 324, in distributed_main
main(args, init_distributed=True)
File "/home/zixi/EE-599/fairseq/fairseq_cli/train.py", line 117, in main
valid_losses = train(args, trainer, task, epoch_itr, max_update)
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/home/zixi/EE-599/fairseq/fairseq_cli/train.py", line 187, in train
log_output = trainer.train_step(samples)
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/home/zixi/EE-599/fairseq/fairseq/trainer.py", line 379, in train_step
ignore_grad=is_dummy_batch,
File "/home/zixi/EE-599/fairseq/fairseq/tasks/fairseq_task.py", line 341, in train_step
loss, sample_size, logging_output = criterion(model, sample)
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/zixi/EE-599/fairseq/fairseq/criterions/cross_entropy.py", line 29, in forward
net_output = model(**sample['net_input'])
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/zixi/EE-599/fairseq/fairseq/legacy_distributed_data_parallel.py", line 86, in forward
return self.module(*inputs, **kwargs)
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/zixi/EE-599/fairseq/fairseq/models/transformer.py", line 272, in forward
return_all_hiddens=return_all_hiddens,
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/zixi/EE-599/fairseq/fairseq/models/transformer.py", line 498, in forward
encoder_states[-1] = x
IndexError: list assignment index out of range
|
IndexError
|
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
self.args = args
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
self._future_mask = torch.empty(0)
self.dropout = args.dropout
self.decoder_layerdrop = args.decoder_layerdrop
self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = args.decoder_embed_dim
self.embed_dim = embed_dim
self.output_embed_dim = args.decoder_output_dim
self.padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
if not args.adaptive_input and args.quant_noise_pq > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(embed_dim, embed_dim, bias=False),
args.quant_noise_pq,
args.quant_noise_pq_block_size,
)
else:
self.quant_noise = None
self.project_in_dim = (
Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
args.max_target_positions,
embed_dim,
self.padding_idx,
learned=args.decoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
self.cross_self_attention = getattr(args, "cross_self_attention", False)
if self.decoder_layerdrop > 0.0:
self.layers = LayerDropModuleList(p=self.decoder_layerdrop)
else:
self.layers = nn.ModuleList([])
self.layers.extend(
[
self.build_decoder_layer(args, no_encoder_attn)
for _ in range(args.decoder_layers)
]
)
self.num_layers = len(self.layers)
self.adaptive_softmax = None
self.project_out_dim = (
Linear(embed_dim, self.output_embed_dim, bias=False)
if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights
else None
)
if args.adaptive_softmax_cutoff is not None:
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
self.output_embed_dim,
options.eval_str_list(args.adaptive_softmax_cutoff, type=int),
dropout=args.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
factor=args.adaptive_softmax_factor,
tie_proj=args.tie_adaptive_proj,
)
if args.decoder_normalize_before and not getattr(
args, "no_decoder_final_norm", False
):
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
if getattr(args, "layernorm_embedding", False):
self.layernorm_embedding = LayerNorm(embed_dim)
else:
self.layernorm_embedding = None
if self.share_input_output_embed:
self.output_projection = nn.Linear(
self.embed_tokens.weight.shape[1],
self.embed_tokens.weight.shape[0],
bias=False,
)
self.output_projection.weight = self.embed_tokens.weight
else:
self.output_projection = nn.Linear(
self.output_embed_dim, len(dictionary), bias=False
)
nn.init.normal_(
self.output_projection.weight, mean=0, std=self.output_embed_dim**-0.5
)
|
def __init__(self, args, dictionary, embed_tokens, no_encoder_attn=False):
self.args = args
super().__init__(dictionary)
self.register_buffer("version", torch.Tensor([3]))
self._future_mask = torch.empty(0)
self.dropout = args.dropout
self.decoder_layerdrop = args.decoder_layerdrop
self.share_input_output_embed = args.share_decoder_input_output_embed
input_embed_dim = embed_tokens.embedding_dim
embed_dim = args.decoder_embed_dim
self.embed_dim = embed_dim
self.output_embed_dim = args.decoder_output_dim
self.padding_idx = embed_tokens.padding_idx
self.max_target_positions = args.max_target_positions
self.embed_tokens = embed_tokens
self.embed_scale = 1.0 if args.no_scale_embedding else math.sqrt(embed_dim)
if not args.adaptive_input and args.quant_noise_pq > 0:
self.quant_noise = apply_quant_noise_(
nn.Linear(embed_dim, embed_dim, bias=False),
args.quant_noise_pq,
args.quant_noise_pq_block_size,
)
else:
self.quant_noise = None
self.project_in_dim = (
Linear(input_embed_dim, embed_dim, bias=False)
if embed_dim != input_embed_dim
else None
)
self.embed_positions = (
PositionalEmbedding(
args.max_target_positions,
embed_dim,
self.padding_idx,
learned=args.decoder_learned_pos,
)
if not args.no_token_positional_embeddings
else None
)
self.cross_self_attention = getattr(args, "cross_self_attention", False)
self.layer_wise_attention = getattr(args, "layer_wise_attention", False)
self.layers = nn.ModuleList([])
self.layers.extend(
[
self.build_decoder_layer(args, no_encoder_attn)
for _ in range(args.decoder_layers)
]
)
self.num_layers = len(self.layers)
self.adaptive_softmax = None
self.project_out_dim = (
Linear(embed_dim, self.output_embed_dim, bias=False)
if embed_dim != self.output_embed_dim and not args.tie_adaptive_weights
else None
)
if args.adaptive_softmax_cutoff is not None:
self.adaptive_softmax = AdaptiveSoftmax(
len(dictionary),
self.output_embed_dim,
options.eval_str_list(args.adaptive_softmax_cutoff, type=int),
dropout=args.adaptive_softmax_dropout,
adaptive_inputs=embed_tokens if args.tie_adaptive_weights else None,
factor=args.adaptive_softmax_factor,
tie_proj=args.tie_adaptive_proj,
)
if args.decoder_normalize_before and not getattr(
args, "no_decoder_final_norm", False
):
self.layer_norm = LayerNorm(embed_dim)
else:
self.layer_norm = None
if getattr(args, "layernorm_embedding", False):
self.layernorm_embedding = LayerNorm(embed_dim)
else:
self.layernorm_embedding = None
if self.share_input_output_embed:
self.output_projection = nn.Linear(
self.embed_tokens.weight.shape[1],
self.embed_tokens.weight.shape[0],
bias=False,
)
self.output_projection.weight = self.embed_tokens.weight
else:
self.output_projection = nn.Linear(
self.output_embed_dim, len(dictionary), bias=False
)
nn.init.normal_(
self.output_projection.weight, mean=0, std=self.output_embed_dim**-0.5
)
|
https://github.com/pytorch/fairseq/issues/2079
|
bash train1.sh
2020-04-29 21:50:54 | INFO | fairseq.distributed_utils | distributed init (rank 1): tcp://localhost:14321
2020-04-29 21:50:54 | INFO | fairseq.distributed_utils | distributed init (rank 0): tcp://localhost:14321
2020-04-29 21:50:55 | INFO | fairseq.distributed_utils | initialized host zixi-MS-7B79 as rank 1
2020-04-29 21:50:55 | INFO | fairseq.distributed_utils | initialized host zixi-MS-7B79 as rank 0
2020-04-29 21:50:57 | INFO | fairseq_cli.train | Namespace(activation_dropout=0.1, activation_fn='relu', adam_betas='(0.9, 0.999)', adam_eps=1e-08, adaptive_input=False, adaptive_softmax_cutoff=None, adaptive_softmax_dropout=0, all_gather_list_size=16384, arch='transformer', attention_dropout=0.1, best_checkpoint_metric='ppl', bpe='subword_nmt', bpe_codes=None, bpe_separator='@@', broadcast_buffers=False, bucket_cap_mb=25, checkpoint_suffix='', clip_norm=25, cpu=False, criterion='cross_entropy', cross_self_attention=False, curriculum=0, data='temp/bpe/bin', data_buffer_size=0, dataset_impl=None, ddp_backend='no_c10d', decoder_attention_heads=4, decoder_embed_dim=256, decoder_embed_path=None, decoder_ffn_embed_dim=256, decoder_input_dim=256, decoder_layerdrop=0.1, decoder_layers=4, decoder_layers_to_keep=None, decoder_learned_pos=False, decoder_normalize_before=True, decoder_output_dim=256, device_id=0, disable_validation=False, distributed_backend='nccl', distributed_init_method='tcp://localhost:14321', distributed_no_spawn=False, distributed_port=-1, distributed_rank=0, distributed_world_size=2, distributed_wrapper='DDP', dropout=0.1, empty_cache_freq=0, encoder_attention_heads=4, encoder_embed_dim=256, encoder_embed_path=None, encoder_ffn_embed_dim=256, encoder_layerdrop=0.1, encoder_layers=4, encoder_layers_to_keep=None, encoder_learned_pos=False, encoder_normalize_before=True, eval_bleu=False, eval_bleu_args=None, eval_bleu_detok='space', eval_bleu_detok_args=None, eval_bleu_print_samples=False, eval_bleu_remove_bpe=None, eval_tokenized_bleu=False, fast_stat_sync=False, find_unused_parameters=False, fix_batches_to_gpus=False, fixed_validation_seed=None, force_anneal=None, fp16=False, fp16_init_scale=128, fp16_no_flatten_grads=False, fp16_scale_tolerance=0.0, fp16_scale_window=None, keep_best_checkpoints=10, keep_interval_updates=-1, keep_last_epochs=-1, layer_wise_attention=False, layernorm_embedding=False, left_pad_source='True', left_pad_target='False', load_alignments=False, localsgd_frequency=3, log_format=None, log_interval=100, lr=[0.005], lr_scheduler='fixed', lr_shrink=0.5, max_epoch=0, max_sentences=None, max_sentences_valid=None, max_source_positions=1024, max_target_positions=1024, max_tokens=5000, max_tokens_valid=5000, max_update=0, maximize_best_checkpoint_metric=False, memory_efficient_fp16=False, min_loss_scale=0.0001, min_lr=-1, model_parallel_size=1, no_cross_attention=False, no_epoch_checkpoints=False, no_last_checkpoints=False, no_progress_bar=False, no_save=False, no_save_optimizer_state=True, no_scale_embedding=False, no_token_positional_embeddings=False, nprocs_per_node=2, num_workers=1, optimizer='adam', optimizer_overrides='{}', patience=8, quant_noise_pq=0, quant_noise_pq_block_size=8, quant_noise_scalar=0, quantization_config_path=None, required_batch_size_multiple=8, reset_dataloader=False, reset_lr_scheduler=False, reset_meters=False, reset_optimizer=False, restore_file='checkpoint_last.pt', save_dir='checkpoints/transformer', save_interval=1, save_interval_updates=0, seed=1, sentence_avg=False, share_all_embeddings=False, share_decoder_input_output_embed=False, skip_invalid_size_inputs_valid_test=False, slowmo_algorithm='LocalSGD', slowmo_momentum=None, source_lang=None, target_lang=None, task='translation', tensorboard_logdir='', threshold_loss_scale=None, tokenizer=None, train_subset='train', truncate_source=False, update_freq=[8], upsample_primary=1, use_bmuf=False, use_old_adam=False, user_dir=None, valid_subset='valid', validate_interval=1, warmup_updates=0, weight_decay=0.0)
2020-04-29 21:50:57 | INFO | fairseq.tasks.translation | [src] dictionary: 48947 types
2020-04-29 21:50:57 | INFO | fairseq.tasks.translation | [tgt] dictionary: 48613 types
2020-04-29 21:50:57 | INFO | fairseq.data.data_utils | loaded 8204 examples from: temp/bpe/bin/valid.src-tgt.src
2020-04-29 21:50:57 | INFO | fairseq.data.data_utils | loaded 8204 examples from: temp/bpe/bin/valid.src-tgt.tgt
2020-04-29 21:50:57 | INFO | fairseq.tasks.translation | temp/bpe/bin valid src-tgt 8204 examples
2020-04-29 21:50:58 | INFO | fairseq_cli.train | TransformerModel(
(encoder): TransformerEncoder(
(embed_tokens): Embedding(48947, 256, padding_idx=1)
(embed_positions): SinusoidalPositionalEmbedding()
(layers): ModuleList(
(0): TransformerEncoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(1): TransformerEncoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(2): TransformerEncoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(3): TransformerEncoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
)
(layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(decoder): TransformerDecoder(
(embed_tokens): Embedding(48613, 256, padding_idx=1)
(embed_positions): SinusoidalPositionalEmbedding()
(layers): ModuleList(
(0): TransformerDecoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(encoder_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(encoder_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(1): TransformerDecoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(encoder_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(encoder_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(2): TransformerDecoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(encoder_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(encoder_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(3): TransformerDecoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(encoder_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(encoder_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
)
(layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(output_projection): Linear(in_features=256, out_features=48613, bias=False)
)
)
2020-04-29 21:50:58 | INFO | fairseq_cli.train | model transformer, criterion CrossEntropyCriterion
2020-04-29 21:50:58 | INFO | fairseq_cli.train | num. model params: 41642240 (num. trained: 41642240)
2020-04-29 21:50:58 | INFO | fairseq_cli.train | training on 2 GPUs
2020-04-29 21:50:58 | INFO | fairseq_cli.train | max tokens per GPU = 5000 and max sentences per GPU = None
2020-04-29 21:50:58 | INFO | fairseq.trainer | no existing checkpoint found checkpoints/transformer/checkpoint_last.pt
2020-04-29 21:50:58 | INFO | fairseq.trainer | loading train data for epoch 1
2020-04-29 21:50:58 | INFO | fairseq.data.data_utils | loaded 1130841 examples from: temp/bpe/bin/train.src-tgt.src
2020-04-29 21:50:58 | INFO | fairseq.data.data_utils | loaded 1130841 examples from: temp/bpe/bin/train.src-tgt.tgt
2020-04-29 21:50:58 | INFO | fairseq.tasks.translation | temp/bpe/bin train src-tgt 1130841 examples
2020-04-29 21:51:05 | INFO | fairseq.trainer | NOTE: your device may support faster training with --fp16
epoch 001: 0%| | 0/210 [00:00<?, ?it/s]/opt/conda/conda-bld/pytorch_1587428398394/work/torch/csrc/utils/python_arg_parser.cpp:756: UserWarning: This overload of add_ is deprecated:
add_(Number alpha, Tensor other)
Consider using one of the following signatures instead:
add_(Tensor other, *, Number alpha)
/opt/conda/conda-bld/pytorch_1587428398394/work/torch/csrc/utils/python_arg_parser.cpp:756: UserWarning: This overload of add_ is deprecated:
add_(Number alpha, Tensor other)
Consider using one of the following signatures instead:
add_(Tensor other, *, Number alpha)
epoch 001 | valid on 'valid' subset | loss 4.067 | ppl 16.77 | wps 119770 | wpb 6793.9 | bsz 341.8 | num_updates 210
epoch 001 | valid on 'valid' subset | loss 4.067 | ppl 16.77 | wps 119685 | wpb 6793.9 | bsz 341.8 | num_updates 210
epoch 001 | loss 6.743 | ppl 107.11 | wps 59759.2 | ups 0.79 | wpb 76004.5 | bsz 5385 | num_updates 210 | lr 0.005 | gnorm 0.564 | clip 0 | train_wall 236 | wall 276
epoch 002: 0%| | 0/210 [00:00<?, ?it/s]2020-04-29 21:55:35 | INFO | fairseq.checkpoint_utils | saved checkpoint checkpoints/transformer/checkpoint1.pt (epoch 1 @ 210 updates, score 16.77) (writing took 0.49035206900043704 seconds)
epoch 001 | loss 6.743 | ppl 107.11 | wps 59649.1 | ups 0.78 | wpb 76004.5 | bsz 5385 | num_updates 210 | lr 0.005 | gnorm 0.564 | clip 0 | train_wall 236 | wall 276
epoch 002 | valid on 'valid' subset | loss 1.464 | ppl 2.76 | wps 121245 | wpb 6793.9 | bsz 341.8 | num_updates 420 | best_ppl 2.76
epoch 002 | valid on 'valid' subset | loss 1.464 | ppl 2.76 | wps 118177 | wpb 6793.9 | bsz 341.8 | num_updates 420 | best_ppl 2.76
epoch 002 | loss 1.986 | ppl 3.96 | wps 57927.6 | ups 0.76 | wpb 76004.5 | bsz 5385 | num_updates 420 | lr 0.005 | gnorm 0.262 | clip 0 | train_wall 243 | wall 551
epoch 003: 0%| | 0/210 [00:00<?, ?it/s]2020-04-29 22:00:13 | INFO | fairseq.checkpoint_utils | saved checkpoint checkpoints/transformer/checkpoint2.pt (epoch 2 @ 420 updates, score 2.76) (writing took 3.3108816809999553 seconds)
epoch 002 | loss 1.986 | ppl 3.96 | wps 57341.3 | ups 0.75 | wpb 76004.5 | bsz 5385 | num_updates 420 | lr 0.005 | gnorm 0.262 | clip 0 | train_wall 243 | wall 555
epoch 003 | valid on 'valid' subset | loss 1.184 | ppl 2.27 | wps 120108 | wpb 6793.9 | bsz 341.8 | num_updates 630 | best_ppl 2.27
epoch 003 | valid on 'valid' subset | loss 1.184 | ppl 2.27 | wps 117797 | wpb 6793.9 | bsz 341.8 | num_updates 630 | best_ppl 2.27
epoch 003 | loss 1.224 | ppl 2.34 | wps 57350.6 | ups 0.75 | wpb 76004.5 | bsz 5385 | num_updates 630 | lr 0.005 | gnorm 0.137 | clip 0 | train_wall 246 | wall 830
epoch 004: 0%| | 0/210 [00:00<?, ?it/s]2020-04-29 22:04:51 | INFO | fairseq.checkpoint_utils | saved checkpoint checkpoints/transformer/checkpoint3.pt (epoch 3 @ 630 updates, score 2.27) (writing took 3.109906989000592 seconds)
epoch 003 | loss 1.224 | ppl 2.34 | wps 57392.5 | ups 0.76 | wpb 76004.5 | bsz 5385 | num_updates 630 | lr 0.005 | gnorm 0.137 | clip 0 | train_wall 243 | wall 833
Traceback (most recent call last):
File "train.py", line 11, in <module>
cli_main()
File "/home/zixi/EE-599/fairseq/fairseq_cli/train.py", line 355, in cli_main
nprocs=args.distributed_world_size,
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 200, in spawn
return start_processes(fn, args, nprocs, join, daemon, start_method='spawn')
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 158, in start_processes
while not context.join():
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 119, in join
raise Exception(msg)
Exception:
-- Process 1 terminated with the following error:
Traceback (most recent call last):
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 20, in _wrap
fn(i, *args)
File "/home/zixi/EE-599/fairseq/fairseq_cli/train.py", line 324, in distributed_main
main(args, init_distributed=True)
File "/home/zixi/EE-599/fairseq/fairseq_cli/train.py", line 117, in main
valid_losses = train(args, trainer, task, epoch_itr, max_update)
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/home/zixi/EE-599/fairseq/fairseq_cli/train.py", line 187, in train
log_output = trainer.train_step(samples)
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/home/zixi/EE-599/fairseq/fairseq/trainer.py", line 379, in train_step
ignore_grad=is_dummy_batch,
File "/home/zixi/EE-599/fairseq/fairseq/tasks/fairseq_task.py", line 341, in train_step
loss, sample_size, logging_output = criterion(model, sample)
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/zixi/EE-599/fairseq/fairseq/criterions/cross_entropy.py", line 29, in forward
net_output = model(**sample['net_input'])
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/zixi/EE-599/fairseq/fairseq/legacy_distributed_data_parallel.py", line 86, in forward
return self.module(*inputs, **kwargs)
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/zixi/EE-599/fairseq/fairseq/models/transformer.py", line 272, in forward
return_all_hiddens=return_all_hiddens,
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/zixi/EE-599/fairseq/fairseq/models/transformer.py", line 498, in forward
encoder_states[-1] = x
IndexError: list assignment index out of range
|
IndexError
|
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[EncoderOut] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
"""
Similar to *forward* but only return features.
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
alignment_layer (int, optional): return mean alignment over
heads at this layer (default: last layer).
alignment_heads (int, optional): only average alignment over
this many heads (default: all heads).
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
if alignment_layer is None:
alignment_layer = self.num_layers - 1
# embed positions
positions = (
self.embed_positions(prev_output_tokens, incremental_state=incremental_state)
if self.embed_positions is not None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.quant_noise is not None:
x = self.quant_noise(x)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
self_attn_padding_mask: Optional[Tensor] = None
if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any():
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
# decoder layers
attn: Optional[Tensor] = None
inner_states: List[Optional[Tensor]] = [x]
for idx, layer in enumerate(self.layers):
if incremental_state is None and not full_context_alignment:
self_attn_mask = self.buffered_future_mask(x)
else:
self_attn_mask = None
x, layer_attn, _ = layer(
x,
encoder_out.encoder_out if encoder_out is not None else None,
encoder_out.encoder_padding_mask if encoder_out is not None else None,
incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
need_attn=bool((idx == alignment_layer)),
need_head_weights=bool((idx == alignment_layer)),
)
inner_states.append(x)
if layer_attn is not None and idx == alignment_layer:
attn = layer_attn.float().to(x)
if attn is not None:
if alignment_heads is not None:
attn = attn[:alignment_heads]
# average probabilities over heads
attn = attn.mean(dim=0)
if self.layer_norm is not None:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {"attn": [attn], "inner_states": inner_states}
|
def extract_features(
self,
prev_output_tokens,
encoder_out: Optional[EncoderOut] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
full_context_alignment: bool = False,
alignment_layer: Optional[int] = None,
alignment_heads: Optional[int] = None,
):
"""
Similar to *forward* but only return features.
Includes several features from "Jointly Learning to Align and
Translate with Transformer Models" (Garg et al., EMNLP 2019).
Args:
full_context_alignment (bool, optional): don't apply
auto-regressive mask to self-attention (default: False).
alignment_layer (int, optional): return mean alignment over
heads at this layer (default: last layer).
alignment_heads (int, optional): only average alignment over
this many heads (default: all heads).
Returns:
tuple:
- the decoder's features of shape `(batch, tgt_len, embed_dim)`
- a dictionary with any model-specific outputs
"""
if alignment_layer is None:
alignment_layer = self.num_layers - 1
# embed positions
positions = (
self.embed_positions(prev_output_tokens, incremental_state=incremental_state)
if self.embed_positions is not None
else None
)
if incremental_state is not None:
prev_output_tokens = prev_output_tokens[:, -1:]
if positions is not None:
positions = positions[:, -1:]
# embed tokens and positions
x = self.embed_scale * self.embed_tokens(prev_output_tokens)
if self.quant_noise is not None:
x = self.quant_noise(x)
if self.project_in_dim is not None:
x = self.project_in_dim(x)
if positions is not None:
x += positions
if self.layernorm_embedding is not None:
x = self.layernorm_embedding(x)
x = F.dropout(x, p=self.dropout, training=self.training)
# B x T x C -> T x B x C
x = x.transpose(0, 1)
self_attn_padding_mask: Optional[Tensor] = None
if self.cross_self_attention or prev_output_tokens.eq(self.padding_idx).any():
self_attn_padding_mask = prev_output_tokens.eq(self.padding_idx)
# decoder layers
attn: Optional[Tensor] = None
inner_states: List[Optional[Tensor]] = [x]
for idx, layer in enumerate(self.layers):
encoder_state: Optional[Tensor] = None
if encoder_out is not None:
if self.layer_wise_attention:
encoder_states = encoder_out.encoder_states
assert encoder_states is not None
encoder_state = encoder_states[idx]
else:
encoder_state = encoder_out.encoder_out
if incremental_state is None and not full_context_alignment:
self_attn_mask = self.buffered_future_mask(x)
else:
self_attn_mask = None
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
dropout_probability = torch.empty(1).uniform_()
if not self.training or (dropout_probability > self.decoder_layerdrop):
x, layer_attn, _ = layer(
x,
encoder_state,
encoder_out.encoder_padding_mask if encoder_out is not None else None,
incremental_state,
self_attn_mask=self_attn_mask,
self_attn_padding_mask=self_attn_padding_mask,
need_attn=bool((idx == alignment_layer)),
need_head_weights=bool((idx == alignment_layer)),
)
inner_states.append(x)
if layer_attn is not None and idx == alignment_layer:
attn = layer_attn.float().to(x)
if attn is not None:
if alignment_heads is not None:
attn = attn[:alignment_heads]
# average probabilities over heads
attn = attn.mean(dim=0)
if self.layer_norm is not None:
x = self.layer_norm(x)
# T x B x C -> B x T x C
x = x.transpose(0, 1)
if self.project_out_dim is not None:
x = self.project_out_dim(x)
return x, {"attn": [attn], "inner_states": inner_states}
|
https://github.com/pytorch/fairseq/issues/2079
|
bash train1.sh
2020-04-29 21:50:54 | INFO | fairseq.distributed_utils | distributed init (rank 1): tcp://localhost:14321
2020-04-29 21:50:54 | INFO | fairseq.distributed_utils | distributed init (rank 0): tcp://localhost:14321
2020-04-29 21:50:55 | INFO | fairseq.distributed_utils | initialized host zixi-MS-7B79 as rank 1
2020-04-29 21:50:55 | INFO | fairseq.distributed_utils | initialized host zixi-MS-7B79 as rank 0
2020-04-29 21:50:57 | INFO | fairseq_cli.train | Namespace(activation_dropout=0.1, activation_fn='relu', adam_betas='(0.9, 0.999)', adam_eps=1e-08, adaptive_input=False, adaptive_softmax_cutoff=None, adaptive_softmax_dropout=0, all_gather_list_size=16384, arch='transformer', attention_dropout=0.1, best_checkpoint_metric='ppl', bpe='subword_nmt', bpe_codes=None, bpe_separator='@@', broadcast_buffers=False, bucket_cap_mb=25, checkpoint_suffix='', clip_norm=25, cpu=False, criterion='cross_entropy', cross_self_attention=False, curriculum=0, data='temp/bpe/bin', data_buffer_size=0, dataset_impl=None, ddp_backend='no_c10d', decoder_attention_heads=4, decoder_embed_dim=256, decoder_embed_path=None, decoder_ffn_embed_dim=256, decoder_input_dim=256, decoder_layerdrop=0.1, decoder_layers=4, decoder_layers_to_keep=None, decoder_learned_pos=False, decoder_normalize_before=True, decoder_output_dim=256, device_id=0, disable_validation=False, distributed_backend='nccl', distributed_init_method='tcp://localhost:14321', distributed_no_spawn=False, distributed_port=-1, distributed_rank=0, distributed_world_size=2, distributed_wrapper='DDP', dropout=0.1, empty_cache_freq=0, encoder_attention_heads=4, encoder_embed_dim=256, encoder_embed_path=None, encoder_ffn_embed_dim=256, encoder_layerdrop=0.1, encoder_layers=4, encoder_layers_to_keep=None, encoder_learned_pos=False, encoder_normalize_before=True, eval_bleu=False, eval_bleu_args=None, eval_bleu_detok='space', eval_bleu_detok_args=None, eval_bleu_print_samples=False, eval_bleu_remove_bpe=None, eval_tokenized_bleu=False, fast_stat_sync=False, find_unused_parameters=False, fix_batches_to_gpus=False, fixed_validation_seed=None, force_anneal=None, fp16=False, fp16_init_scale=128, fp16_no_flatten_grads=False, fp16_scale_tolerance=0.0, fp16_scale_window=None, keep_best_checkpoints=10, keep_interval_updates=-1, keep_last_epochs=-1, layer_wise_attention=False, layernorm_embedding=False, left_pad_source='True', left_pad_target='False', load_alignments=False, localsgd_frequency=3, log_format=None, log_interval=100, lr=[0.005], lr_scheduler='fixed', lr_shrink=0.5, max_epoch=0, max_sentences=None, max_sentences_valid=None, max_source_positions=1024, max_target_positions=1024, max_tokens=5000, max_tokens_valid=5000, max_update=0, maximize_best_checkpoint_metric=False, memory_efficient_fp16=False, min_loss_scale=0.0001, min_lr=-1, model_parallel_size=1, no_cross_attention=False, no_epoch_checkpoints=False, no_last_checkpoints=False, no_progress_bar=False, no_save=False, no_save_optimizer_state=True, no_scale_embedding=False, no_token_positional_embeddings=False, nprocs_per_node=2, num_workers=1, optimizer='adam', optimizer_overrides='{}', patience=8, quant_noise_pq=0, quant_noise_pq_block_size=8, quant_noise_scalar=0, quantization_config_path=None, required_batch_size_multiple=8, reset_dataloader=False, reset_lr_scheduler=False, reset_meters=False, reset_optimizer=False, restore_file='checkpoint_last.pt', save_dir='checkpoints/transformer', save_interval=1, save_interval_updates=0, seed=1, sentence_avg=False, share_all_embeddings=False, share_decoder_input_output_embed=False, skip_invalid_size_inputs_valid_test=False, slowmo_algorithm='LocalSGD', slowmo_momentum=None, source_lang=None, target_lang=None, task='translation', tensorboard_logdir='', threshold_loss_scale=None, tokenizer=None, train_subset='train', truncate_source=False, update_freq=[8], upsample_primary=1, use_bmuf=False, use_old_adam=False, user_dir=None, valid_subset='valid', validate_interval=1, warmup_updates=0, weight_decay=0.0)
2020-04-29 21:50:57 | INFO | fairseq.tasks.translation | [src] dictionary: 48947 types
2020-04-29 21:50:57 | INFO | fairseq.tasks.translation | [tgt] dictionary: 48613 types
2020-04-29 21:50:57 | INFO | fairseq.data.data_utils | loaded 8204 examples from: temp/bpe/bin/valid.src-tgt.src
2020-04-29 21:50:57 | INFO | fairseq.data.data_utils | loaded 8204 examples from: temp/bpe/bin/valid.src-tgt.tgt
2020-04-29 21:50:57 | INFO | fairseq.tasks.translation | temp/bpe/bin valid src-tgt 8204 examples
2020-04-29 21:50:58 | INFO | fairseq_cli.train | TransformerModel(
(encoder): TransformerEncoder(
(embed_tokens): Embedding(48947, 256, padding_idx=1)
(embed_positions): SinusoidalPositionalEmbedding()
(layers): ModuleList(
(0): TransformerEncoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(1): TransformerEncoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(2): TransformerEncoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(3): TransformerEncoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
)
(layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(decoder): TransformerDecoder(
(embed_tokens): Embedding(48613, 256, padding_idx=1)
(embed_positions): SinusoidalPositionalEmbedding()
(layers): ModuleList(
(0): TransformerDecoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(encoder_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(encoder_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(1): TransformerDecoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(encoder_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(encoder_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(2): TransformerDecoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(encoder_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(encoder_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(3): TransformerDecoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(encoder_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(encoder_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
)
(layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(output_projection): Linear(in_features=256, out_features=48613, bias=False)
)
)
2020-04-29 21:50:58 | INFO | fairseq_cli.train | model transformer, criterion CrossEntropyCriterion
2020-04-29 21:50:58 | INFO | fairseq_cli.train | num. model params: 41642240 (num. trained: 41642240)
2020-04-29 21:50:58 | INFO | fairseq_cli.train | training on 2 GPUs
2020-04-29 21:50:58 | INFO | fairseq_cli.train | max tokens per GPU = 5000 and max sentences per GPU = None
2020-04-29 21:50:58 | INFO | fairseq.trainer | no existing checkpoint found checkpoints/transformer/checkpoint_last.pt
2020-04-29 21:50:58 | INFO | fairseq.trainer | loading train data for epoch 1
2020-04-29 21:50:58 | INFO | fairseq.data.data_utils | loaded 1130841 examples from: temp/bpe/bin/train.src-tgt.src
2020-04-29 21:50:58 | INFO | fairseq.data.data_utils | loaded 1130841 examples from: temp/bpe/bin/train.src-tgt.tgt
2020-04-29 21:50:58 | INFO | fairseq.tasks.translation | temp/bpe/bin train src-tgt 1130841 examples
2020-04-29 21:51:05 | INFO | fairseq.trainer | NOTE: your device may support faster training with --fp16
epoch 001: 0%| | 0/210 [00:00<?, ?it/s]/opt/conda/conda-bld/pytorch_1587428398394/work/torch/csrc/utils/python_arg_parser.cpp:756: UserWarning: This overload of add_ is deprecated:
add_(Number alpha, Tensor other)
Consider using one of the following signatures instead:
add_(Tensor other, *, Number alpha)
/opt/conda/conda-bld/pytorch_1587428398394/work/torch/csrc/utils/python_arg_parser.cpp:756: UserWarning: This overload of add_ is deprecated:
add_(Number alpha, Tensor other)
Consider using one of the following signatures instead:
add_(Tensor other, *, Number alpha)
epoch 001 | valid on 'valid' subset | loss 4.067 | ppl 16.77 | wps 119770 | wpb 6793.9 | bsz 341.8 | num_updates 210
epoch 001 | valid on 'valid' subset | loss 4.067 | ppl 16.77 | wps 119685 | wpb 6793.9 | bsz 341.8 | num_updates 210
epoch 001 | loss 6.743 | ppl 107.11 | wps 59759.2 | ups 0.79 | wpb 76004.5 | bsz 5385 | num_updates 210 | lr 0.005 | gnorm 0.564 | clip 0 | train_wall 236 | wall 276
epoch 002: 0%| | 0/210 [00:00<?, ?it/s]2020-04-29 21:55:35 | INFO | fairseq.checkpoint_utils | saved checkpoint checkpoints/transformer/checkpoint1.pt (epoch 1 @ 210 updates, score 16.77) (writing took 0.49035206900043704 seconds)
epoch 001 | loss 6.743 | ppl 107.11 | wps 59649.1 | ups 0.78 | wpb 76004.5 | bsz 5385 | num_updates 210 | lr 0.005 | gnorm 0.564 | clip 0 | train_wall 236 | wall 276
epoch 002 | valid on 'valid' subset | loss 1.464 | ppl 2.76 | wps 121245 | wpb 6793.9 | bsz 341.8 | num_updates 420 | best_ppl 2.76
epoch 002 | valid on 'valid' subset | loss 1.464 | ppl 2.76 | wps 118177 | wpb 6793.9 | bsz 341.8 | num_updates 420 | best_ppl 2.76
epoch 002 | loss 1.986 | ppl 3.96 | wps 57927.6 | ups 0.76 | wpb 76004.5 | bsz 5385 | num_updates 420 | lr 0.005 | gnorm 0.262 | clip 0 | train_wall 243 | wall 551
epoch 003: 0%| | 0/210 [00:00<?, ?it/s]2020-04-29 22:00:13 | INFO | fairseq.checkpoint_utils | saved checkpoint checkpoints/transformer/checkpoint2.pt (epoch 2 @ 420 updates, score 2.76) (writing took 3.3108816809999553 seconds)
epoch 002 | loss 1.986 | ppl 3.96 | wps 57341.3 | ups 0.75 | wpb 76004.5 | bsz 5385 | num_updates 420 | lr 0.005 | gnorm 0.262 | clip 0 | train_wall 243 | wall 555
epoch 003 | valid on 'valid' subset | loss 1.184 | ppl 2.27 | wps 120108 | wpb 6793.9 | bsz 341.8 | num_updates 630 | best_ppl 2.27
epoch 003 | valid on 'valid' subset | loss 1.184 | ppl 2.27 | wps 117797 | wpb 6793.9 | bsz 341.8 | num_updates 630 | best_ppl 2.27
epoch 003 | loss 1.224 | ppl 2.34 | wps 57350.6 | ups 0.75 | wpb 76004.5 | bsz 5385 | num_updates 630 | lr 0.005 | gnorm 0.137 | clip 0 | train_wall 246 | wall 830
epoch 004: 0%| | 0/210 [00:00<?, ?it/s]2020-04-29 22:04:51 | INFO | fairseq.checkpoint_utils | saved checkpoint checkpoints/transformer/checkpoint3.pt (epoch 3 @ 630 updates, score 2.27) (writing took 3.109906989000592 seconds)
epoch 003 | loss 1.224 | ppl 2.34 | wps 57392.5 | ups 0.76 | wpb 76004.5 | bsz 5385 | num_updates 630 | lr 0.005 | gnorm 0.137 | clip 0 | train_wall 243 | wall 833
Traceback (most recent call last):
File "train.py", line 11, in <module>
cli_main()
File "/home/zixi/EE-599/fairseq/fairseq_cli/train.py", line 355, in cli_main
nprocs=args.distributed_world_size,
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 200, in spawn
return start_processes(fn, args, nprocs, join, daemon, start_method='spawn')
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 158, in start_processes
while not context.join():
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 119, in join
raise Exception(msg)
Exception:
-- Process 1 terminated with the following error:
Traceback (most recent call last):
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 20, in _wrap
fn(i, *args)
File "/home/zixi/EE-599/fairseq/fairseq_cli/train.py", line 324, in distributed_main
main(args, init_distributed=True)
File "/home/zixi/EE-599/fairseq/fairseq_cli/train.py", line 117, in main
valid_losses = train(args, trainer, task, epoch_itr, max_update)
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/home/zixi/EE-599/fairseq/fairseq_cli/train.py", line 187, in train
log_output = trainer.train_step(samples)
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/home/zixi/EE-599/fairseq/fairseq/trainer.py", line 379, in train_step
ignore_grad=is_dummy_batch,
File "/home/zixi/EE-599/fairseq/fairseq/tasks/fairseq_task.py", line 341, in train_step
loss, sample_size, logging_output = criterion(model, sample)
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/zixi/EE-599/fairseq/fairseq/criterions/cross_entropy.py", line 29, in forward
net_output = model(**sample['net_input'])
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/zixi/EE-599/fairseq/fairseq/legacy_distributed_data_parallel.py", line 86, in forward
return self.module(*inputs, **kwargs)
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/zixi/EE-599/fairseq/fairseq/models/transformer.py", line 272, in forward
return_all_hiddens=return_all_hiddens,
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/zixi/EE-599/fairseq/fairseq/models/transformer.py", line 498, in forward
encoder_states[-1] = x
IndexError: list assignment index out of range
|
IndexError
|
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.no_cross_attention = getattr(args, "no_cross_attention", False)
args.cross_self_attention = getattr(args, "cross_self_attention", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
|
def base_architecture(args):
args.encoder_embed_path = getattr(args, "encoder_embed_path", None)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 512)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 2048)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 8)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.encoder_learned_pos = getattr(args, "encoder_learned_pos", False)
args.decoder_embed_path = getattr(args, "decoder_embed_path", None)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 8)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.activation_fn = getattr(args, "activation_fn", "relu")
args.dropout = getattr(args, "dropout", 0.1)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.share_decoder_input_output_embed = getattr(
args, "share_decoder_input_output_embed", False
)
args.share_all_embeddings = getattr(args, "share_all_embeddings", False)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.no_cross_attention = getattr(args, "no_cross_attention", False)
args.cross_self_attention = getattr(args, "cross_self_attention", False)
args.layer_wise_attention = getattr(args, "layer_wise_attention", False)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.no_scale_embedding = getattr(args, "no_scale_embedding", False)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
|
https://github.com/pytorch/fairseq/issues/2079
|
bash train1.sh
2020-04-29 21:50:54 | INFO | fairseq.distributed_utils | distributed init (rank 1): tcp://localhost:14321
2020-04-29 21:50:54 | INFO | fairseq.distributed_utils | distributed init (rank 0): tcp://localhost:14321
2020-04-29 21:50:55 | INFO | fairseq.distributed_utils | initialized host zixi-MS-7B79 as rank 1
2020-04-29 21:50:55 | INFO | fairseq.distributed_utils | initialized host zixi-MS-7B79 as rank 0
2020-04-29 21:50:57 | INFO | fairseq_cli.train | Namespace(activation_dropout=0.1, activation_fn='relu', adam_betas='(0.9, 0.999)', adam_eps=1e-08, adaptive_input=False, adaptive_softmax_cutoff=None, adaptive_softmax_dropout=0, all_gather_list_size=16384, arch='transformer', attention_dropout=0.1, best_checkpoint_metric='ppl', bpe='subword_nmt', bpe_codes=None, bpe_separator='@@', broadcast_buffers=False, bucket_cap_mb=25, checkpoint_suffix='', clip_norm=25, cpu=False, criterion='cross_entropy', cross_self_attention=False, curriculum=0, data='temp/bpe/bin', data_buffer_size=0, dataset_impl=None, ddp_backend='no_c10d', decoder_attention_heads=4, decoder_embed_dim=256, decoder_embed_path=None, decoder_ffn_embed_dim=256, decoder_input_dim=256, decoder_layerdrop=0.1, decoder_layers=4, decoder_layers_to_keep=None, decoder_learned_pos=False, decoder_normalize_before=True, decoder_output_dim=256, device_id=0, disable_validation=False, distributed_backend='nccl', distributed_init_method='tcp://localhost:14321', distributed_no_spawn=False, distributed_port=-1, distributed_rank=0, distributed_world_size=2, distributed_wrapper='DDP', dropout=0.1, empty_cache_freq=0, encoder_attention_heads=4, encoder_embed_dim=256, encoder_embed_path=None, encoder_ffn_embed_dim=256, encoder_layerdrop=0.1, encoder_layers=4, encoder_layers_to_keep=None, encoder_learned_pos=False, encoder_normalize_before=True, eval_bleu=False, eval_bleu_args=None, eval_bleu_detok='space', eval_bleu_detok_args=None, eval_bleu_print_samples=False, eval_bleu_remove_bpe=None, eval_tokenized_bleu=False, fast_stat_sync=False, find_unused_parameters=False, fix_batches_to_gpus=False, fixed_validation_seed=None, force_anneal=None, fp16=False, fp16_init_scale=128, fp16_no_flatten_grads=False, fp16_scale_tolerance=0.0, fp16_scale_window=None, keep_best_checkpoints=10, keep_interval_updates=-1, keep_last_epochs=-1, layer_wise_attention=False, layernorm_embedding=False, left_pad_source='True', left_pad_target='False', load_alignments=False, localsgd_frequency=3, log_format=None, log_interval=100, lr=[0.005], lr_scheduler='fixed', lr_shrink=0.5, max_epoch=0, max_sentences=None, max_sentences_valid=None, max_source_positions=1024, max_target_positions=1024, max_tokens=5000, max_tokens_valid=5000, max_update=0, maximize_best_checkpoint_metric=False, memory_efficient_fp16=False, min_loss_scale=0.0001, min_lr=-1, model_parallel_size=1, no_cross_attention=False, no_epoch_checkpoints=False, no_last_checkpoints=False, no_progress_bar=False, no_save=False, no_save_optimizer_state=True, no_scale_embedding=False, no_token_positional_embeddings=False, nprocs_per_node=2, num_workers=1, optimizer='adam', optimizer_overrides='{}', patience=8, quant_noise_pq=0, quant_noise_pq_block_size=8, quant_noise_scalar=0, quantization_config_path=None, required_batch_size_multiple=8, reset_dataloader=False, reset_lr_scheduler=False, reset_meters=False, reset_optimizer=False, restore_file='checkpoint_last.pt', save_dir='checkpoints/transformer', save_interval=1, save_interval_updates=0, seed=1, sentence_avg=False, share_all_embeddings=False, share_decoder_input_output_embed=False, skip_invalid_size_inputs_valid_test=False, slowmo_algorithm='LocalSGD', slowmo_momentum=None, source_lang=None, target_lang=None, task='translation', tensorboard_logdir='', threshold_loss_scale=None, tokenizer=None, train_subset='train', truncate_source=False, update_freq=[8], upsample_primary=1, use_bmuf=False, use_old_adam=False, user_dir=None, valid_subset='valid', validate_interval=1, warmup_updates=0, weight_decay=0.0)
2020-04-29 21:50:57 | INFO | fairseq.tasks.translation | [src] dictionary: 48947 types
2020-04-29 21:50:57 | INFO | fairseq.tasks.translation | [tgt] dictionary: 48613 types
2020-04-29 21:50:57 | INFO | fairseq.data.data_utils | loaded 8204 examples from: temp/bpe/bin/valid.src-tgt.src
2020-04-29 21:50:57 | INFO | fairseq.data.data_utils | loaded 8204 examples from: temp/bpe/bin/valid.src-tgt.tgt
2020-04-29 21:50:57 | INFO | fairseq.tasks.translation | temp/bpe/bin valid src-tgt 8204 examples
2020-04-29 21:50:58 | INFO | fairseq_cli.train | TransformerModel(
(encoder): TransformerEncoder(
(embed_tokens): Embedding(48947, 256, padding_idx=1)
(embed_positions): SinusoidalPositionalEmbedding()
(layers): ModuleList(
(0): TransformerEncoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(1): TransformerEncoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(2): TransformerEncoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(3): TransformerEncoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
)
(layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(decoder): TransformerDecoder(
(embed_tokens): Embedding(48613, 256, padding_idx=1)
(embed_positions): SinusoidalPositionalEmbedding()
(layers): ModuleList(
(0): TransformerDecoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(encoder_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(encoder_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(1): TransformerDecoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(encoder_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(encoder_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(2): TransformerDecoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(encoder_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(encoder_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(3): TransformerDecoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(encoder_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(encoder_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
)
(layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(output_projection): Linear(in_features=256, out_features=48613, bias=False)
)
)
2020-04-29 21:50:58 | INFO | fairseq_cli.train | model transformer, criterion CrossEntropyCriterion
2020-04-29 21:50:58 | INFO | fairseq_cli.train | num. model params: 41642240 (num. trained: 41642240)
2020-04-29 21:50:58 | INFO | fairseq_cli.train | training on 2 GPUs
2020-04-29 21:50:58 | INFO | fairseq_cli.train | max tokens per GPU = 5000 and max sentences per GPU = None
2020-04-29 21:50:58 | INFO | fairseq.trainer | no existing checkpoint found checkpoints/transformer/checkpoint_last.pt
2020-04-29 21:50:58 | INFO | fairseq.trainer | loading train data for epoch 1
2020-04-29 21:50:58 | INFO | fairseq.data.data_utils | loaded 1130841 examples from: temp/bpe/bin/train.src-tgt.src
2020-04-29 21:50:58 | INFO | fairseq.data.data_utils | loaded 1130841 examples from: temp/bpe/bin/train.src-tgt.tgt
2020-04-29 21:50:58 | INFO | fairseq.tasks.translation | temp/bpe/bin train src-tgt 1130841 examples
2020-04-29 21:51:05 | INFO | fairseq.trainer | NOTE: your device may support faster training with --fp16
epoch 001: 0%| | 0/210 [00:00<?, ?it/s]/opt/conda/conda-bld/pytorch_1587428398394/work/torch/csrc/utils/python_arg_parser.cpp:756: UserWarning: This overload of add_ is deprecated:
add_(Number alpha, Tensor other)
Consider using one of the following signatures instead:
add_(Tensor other, *, Number alpha)
/opt/conda/conda-bld/pytorch_1587428398394/work/torch/csrc/utils/python_arg_parser.cpp:756: UserWarning: This overload of add_ is deprecated:
add_(Number alpha, Tensor other)
Consider using one of the following signatures instead:
add_(Tensor other, *, Number alpha)
epoch 001 | valid on 'valid' subset | loss 4.067 | ppl 16.77 | wps 119770 | wpb 6793.9 | bsz 341.8 | num_updates 210
epoch 001 | valid on 'valid' subset | loss 4.067 | ppl 16.77 | wps 119685 | wpb 6793.9 | bsz 341.8 | num_updates 210
epoch 001 | loss 6.743 | ppl 107.11 | wps 59759.2 | ups 0.79 | wpb 76004.5 | bsz 5385 | num_updates 210 | lr 0.005 | gnorm 0.564 | clip 0 | train_wall 236 | wall 276
epoch 002: 0%| | 0/210 [00:00<?, ?it/s]2020-04-29 21:55:35 | INFO | fairseq.checkpoint_utils | saved checkpoint checkpoints/transformer/checkpoint1.pt (epoch 1 @ 210 updates, score 16.77) (writing took 0.49035206900043704 seconds)
epoch 001 | loss 6.743 | ppl 107.11 | wps 59649.1 | ups 0.78 | wpb 76004.5 | bsz 5385 | num_updates 210 | lr 0.005 | gnorm 0.564 | clip 0 | train_wall 236 | wall 276
epoch 002 | valid on 'valid' subset | loss 1.464 | ppl 2.76 | wps 121245 | wpb 6793.9 | bsz 341.8 | num_updates 420 | best_ppl 2.76
epoch 002 | valid on 'valid' subset | loss 1.464 | ppl 2.76 | wps 118177 | wpb 6793.9 | bsz 341.8 | num_updates 420 | best_ppl 2.76
epoch 002 | loss 1.986 | ppl 3.96 | wps 57927.6 | ups 0.76 | wpb 76004.5 | bsz 5385 | num_updates 420 | lr 0.005 | gnorm 0.262 | clip 0 | train_wall 243 | wall 551
epoch 003: 0%| | 0/210 [00:00<?, ?it/s]2020-04-29 22:00:13 | INFO | fairseq.checkpoint_utils | saved checkpoint checkpoints/transformer/checkpoint2.pt (epoch 2 @ 420 updates, score 2.76) (writing took 3.3108816809999553 seconds)
epoch 002 | loss 1.986 | ppl 3.96 | wps 57341.3 | ups 0.75 | wpb 76004.5 | bsz 5385 | num_updates 420 | lr 0.005 | gnorm 0.262 | clip 0 | train_wall 243 | wall 555
epoch 003 | valid on 'valid' subset | loss 1.184 | ppl 2.27 | wps 120108 | wpb 6793.9 | bsz 341.8 | num_updates 630 | best_ppl 2.27
epoch 003 | valid on 'valid' subset | loss 1.184 | ppl 2.27 | wps 117797 | wpb 6793.9 | bsz 341.8 | num_updates 630 | best_ppl 2.27
epoch 003 | loss 1.224 | ppl 2.34 | wps 57350.6 | ups 0.75 | wpb 76004.5 | bsz 5385 | num_updates 630 | lr 0.005 | gnorm 0.137 | clip 0 | train_wall 246 | wall 830
epoch 004: 0%| | 0/210 [00:00<?, ?it/s]2020-04-29 22:04:51 | INFO | fairseq.checkpoint_utils | saved checkpoint checkpoints/transformer/checkpoint3.pt (epoch 3 @ 630 updates, score 2.27) (writing took 3.109906989000592 seconds)
epoch 003 | loss 1.224 | ppl 2.34 | wps 57392.5 | ups 0.76 | wpb 76004.5 | bsz 5385 | num_updates 630 | lr 0.005 | gnorm 0.137 | clip 0 | train_wall 243 | wall 833
Traceback (most recent call last):
File "train.py", line 11, in <module>
cli_main()
File "/home/zixi/EE-599/fairseq/fairseq_cli/train.py", line 355, in cli_main
nprocs=args.distributed_world_size,
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 200, in spawn
return start_processes(fn, args, nprocs, join, daemon, start_method='spawn')
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 158, in start_processes
while not context.join():
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 119, in join
raise Exception(msg)
Exception:
-- Process 1 terminated with the following error:
Traceback (most recent call last):
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 20, in _wrap
fn(i, *args)
File "/home/zixi/EE-599/fairseq/fairseq_cli/train.py", line 324, in distributed_main
main(args, init_distributed=True)
File "/home/zixi/EE-599/fairseq/fairseq_cli/train.py", line 117, in main
valid_losses = train(args, trainer, task, epoch_itr, max_update)
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/home/zixi/EE-599/fairseq/fairseq_cli/train.py", line 187, in train
log_output = trainer.train_step(samples)
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/home/zixi/EE-599/fairseq/fairseq/trainer.py", line 379, in train_step
ignore_grad=is_dummy_batch,
File "/home/zixi/EE-599/fairseq/fairseq/tasks/fairseq_task.py", line 341, in train_step
loss, sample_size, logging_output = criterion(model, sample)
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/zixi/EE-599/fairseq/fairseq/criterions/cross_entropy.py", line 29, in forward
net_output = model(**sample['net_input'])
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/zixi/EE-599/fairseq/fairseq/legacy_distributed_data_parallel.py", line 86, in forward
return self.module(*inputs, **kwargs)
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/zixi/EE-599/fairseq/fairseq/models/transformer.py", line 272, in forward
return_all_hiddens=return_all_hiddens,
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/zixi/EE-599/fairseq/fairseq/models/transformer.py", line 498, in forward
encoder_states[-1] = x
IndexError: list assignment index out of range
|
IndexError
|
def build_model(cls, args, task):
"""Build a new model instance."""
# make sure all arguments are present in older models
base_architecture(args)
if args.encoder_layers_to_keep:
args.encoder_layers = len(args.encoder_layers_to_keep.split(","))
if args.decoder_layers_to_keep:
args.decoder_layers = len(args.decoder_layers_to_keep.split(","))
if getattr(args, "max_source_positions", None) is None:
args.max_source_positions = DEFAULT_MAX_SOURCE_POSITIONS
if getattr(args, "max_target_positions", None) is None:
args.max_target_positions = DEFAULT_MAX_TARGET_POSITIONS
src_dict, tgt_dict = task.source_dictionary, task.target_dictionary
if args.share_all_embeddings:
if src_dict != tgt_dict:
raise ValueError("--share-all-embeddings requires a joined dictionary")
if args.encoder_embed_dim != args.decoder_embed_dim:
raise ValueError(
"--share-all-embeddings requires --encoder-embed-dim to match --decoder-embed-dim"
)
if args.decoder_embed_path and (
args.decoder_embed_path != args.encoder_embed_path
):
raise ValueError(
"--share-all-embeddings not compatible with --decoder-embed-path"
)
encoder_embed_tokens = cls.build_embedding(
args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = encoder_embed_tokens
args.share_decoder_input_output_embed = True
else:
encoder_embed_tokens = cls.build_embedding(
args, src_dict, args.encoder_embed_dim, args.encoder_embed_path
)
decoder_embed_tokens = cls.build_embedding(
args, tgt_dict, args.decoder_embed_dim, args.decoder_embed_path
)
encoder = cls.build_encoder(args, src_dict, encoder_embed_tokens)
decoder = cls.build_decoder(args, tgt_dict, decoder_embed_tokens)
return cls(args, encoder, decoder)
|
def build_model(cls, args, task):
# set any default arguments
transformer_align(args)
transformer_model = TransformerModel.build_model(args, task)
return TransformerAlignModel(
transformer_model.encoder, transformer_model.decoder, args
)
|
https://github.com/pytorch/fairseq/issues/2079
|
bash train1.sh
2020-04-29 21:50:54 | INFO | fairseq.distributed_utils | distributed init (rank 1): tcp://localhost:14321
2020-04-29 21:50:54 | INFO | fairseq.distributed_utils | distributed init (rank 0): tcp://localhost:14321
2020-04-29 21:50:55 | INFO | fairseq.distributed_utils | initialized host zixi-MS-7B79 as rank 1
2020-04-29 21:50:55 | INFO | fairseq.distributed_utils | initialized host zixi-MS-7B79 as rank 0
2020-04-29 21:50:57 | INFO | fairseq_cli.train | Namespace(activation_dropout=0.1, activation_fn='relu', adam_betas='(0.9, 0.999)', adam_eps=1e-08, adaptive_input=False, adaptive_softmax_cutoff=None, adaptive_softmax_dropout=0, all_gather_list_size=16384, arch='transformer', attention_dropout=0.1, best_checkpoint_metric='ppl', bpe='subword_nmt', bpe_codes=None, bpe_separator='@@', broadcast_buffers=False, bucket_cap_mb=25, checkpoint_suffix='', clip_norm=25, cpu=False, criterion='cross_entropy', cross_self_attention=False, curriculum=0, data='temp/bpe/bin', data_buffer_size=0, dataset_impl=None, ddp_backend='no_c10d', decoder_attention_heads=4, decoder_embed_dim=256, decoder_embed_path=None, decoder_ffn_embed_dim=256, decoder_input_dim=256, decoder_layerdrop=0.1, decoder_layers=4, decoder_layers_to_keep=None, decoder_learned_pos=False, decoder_normalize_before=True, decoder_output_dim=256, device_id=0, disable_validation=False, distributed_backend='nccl', distributed_init_method='tcp://localhost:14321', distributed_no_spawn=False, distributed_port=-1, distributed_rank=0, distributed_world_size=2, distributed_wrapper='DDP', dropout=0.1, empty_cache_freq=0, encoder_attention_heads=4, encoder_embed_dim=256, encoder_embed_path=None, encoder_ffn_embed_dim=256, encoder_layerdrop=0.1, encoder_layers=4, encoder_layers_to_keep=None, encoder_learned_pos=False, encoder_normalize_before=True, eval_bleu=False, eval_bleu_args=None, eval_bleu_detok='space', eval_bleu_detok_args=None, eval_bleu_print_samples=False, eval_bleu_remove_bpe=None, eval_tokenized_bleu=False, fast_stat_sync=False, find_unused_parameters=False, fix_batches_to_gpus=False, fixed_validation_seed=None, force_anneal=None, fp16=False, fp16_init_scale=128, fp16_no_flatten_grads=False, fp16_scale_tolerance=0.0, fp16_scale_window=None, keep_best_checkpoints=10, keep_interval_updates=-1, keep_last_epochs=-1, layer_wise_attention=False, layernorm_embedding=False, left_pad_source='True', left_pad_target='False', load_alignments=False, localsgd_frequency=3, log_format=None, log_interval=100, lr=[0.005], lr_scheduler='fixed', lr_shrink=0.5, max_epoch=0, max_sentences=None, max_sentences_valid=None, max_source_positions=1024, max_target_positions=1024, max_tokens=5000, max_tokens_valid=5000, max_update=0, maximize_best_checkpoint_metric=False, memory_efficient_fp16=False, min_loss_scale=0.0001, min_lr=-1, model_parallel_size=1, no_cross_attention=False, no_epoch_checkpoints=False, no_last_checkpoints=False, no_progress_bar=False, no_save=False, no_save_optimizer_state=True, no_scale_embedding=False, no_token_positional_embeddings=False, nprocs_per_node=2, num_workers=1, optimizer='adam', optimizer_overrides='{}', patience=8, quant_noise_pq=0, quant_noise_pq_block_size=8, quant_noise_scalar=0, quantization_config_path=None, required_batch_size_multiple=8, reset_dataloader=False, reset_lr_scheduler=False, reset_meters=False, reset_optimizer=False, restore_file='checkpoint_last.pt', save_dir='checkpoints/transformer', save_interval=1, save_interval_updates=0, seed=1, sentence_avg=False, share_all_embeddings=False, share_decoder_input_output_embed=False, skip_invalid_size_inputs_valid_test=False, slowmo_algorithm='LocalSGD', slowmo_momentum=None, source_lang=None, target_lang=None, task='translation', tensorboard_logdir='', threshold_loss_scale=None, tokenizer=None, train_subset='train', truncate_source=False, update_freq=[8], upsample_primary=1, use_bmuf=False, use_old_adam=False, user_dir=None, valid_subset='valid', validate_interval=1, warmup_updates=0, weight_decay=0.0)
2020-04-29 21:50:57 | INFO | fairseq.tasks.translation | [src] dictionary: 48947 types
2020-04-29 21:50:57 | INFO | fairseq.tasks.translation | [tgt] dictionary: 48613 types
2020-04-29 21:50:57 | INFO | fairseq.data.data_utils | loaded 8204 examples from: temp/bpe/bin/valid.src-tgt.src
2020-04-29 21:50:57 | INFO | fairseq.data.data_utils | loaded 8204 examples from: temp/bpe/bin/valid.src-tgt.tgt
2020-04-29 21:50:57 | INFO | fairseq.tasks.translation | temp/bpe/bin valid src-tgt 8204 examples
2020-04-29 21:50:58 | INFO | fairseq_cli.train | TransformerModel(
(encoder): TransformerEncoder(
(embed_tokens): Embedding(48947, 256, padding_idx=1)
(embed_positions): SinusoidalPositionalEmbedding()
(layers): ModuleList(
(0): TransformerEncoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(1): TransformerEncoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(2): TransformerEncoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(3): TransformerEncoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
)
(layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(decoder): TransformerDecoder(
(embed_tokens): Embedding(48613, 256, padding_idx=1)
(embed_positions): SinusoidalPositionalEmbedding()
(layers): ModuleList(
(0): TransformerDecoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(encoder_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(encoder_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(1): TransformerDecoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(encoder_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(encoder_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(2): TransformerDecoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(encoder_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(encoder_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
(3): TransformerDecoderLayer(
(self_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(self_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(encoder_attn): MultiheadAttention(
(k_proj): Linear(in_features=256, out_features=256, bias=True)
(v_proj): Linear(in_features=256, out_features=256, bias=True)
(q_proj): Linear(in_features=256, out_features=256, bias=True)
(out_proj): Linear(in_features=256, out_features=256, bias=True)
)
(encoder_attn_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(fc1): Linear(in_features=256, out_features=256, bias=True)
(fc2): Linear(in_features=256, out_features=256, bias=True)
(final_layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
)
)
(layer_norm): LayerNorm((256,), eps=1e-05, elementwise_affine=True)
(output_projection): Linear(in_features=256, out_features=48613, bias=False)
)
)
2020-04-29 21:50:58 | INFO | fairseq_cli.train | model transformer, criterion CrossEntropyCriterion
2020-04-29 21:50:58 | INFO | fairseq_cli.train | num. model params: 41642240 (num. trained: 41642240)
2020-04-29 21:50:58 | INFO | fairseq_cli.train | training on 2 GPUs
2020-04-29 21:50:58 | INFO | fairseq_cli.train | max tokens per GPU = 5000 and max sentences per GPU = None
2020-04-29 21:50:58 | INFO | fairseq.trainer | no existing checkpoint found checkpoints/transformer/checkpoint_last.pt
2020-04-29 21:50:58 | INFO | fairseq.trainer | loading train data for epoch 1
2020-04-29 21:50:58 | INFO | fairseq.data.data_utils | loaded 1130841 examples from: temp/bpe/bin/train.src-tgt.src
2020-04-29 21:50:58 | INFO | fairseq.data.data_utils | loaded 1130841 examples from: temp/bpe/bin/train.src-tgt.tgt
2020-04-29 21:50:58 | INFO | fairseq.tasks.translation | temp/bpe/bin train src-tgt 1130841 examples
2020-04-29 21:51:05 | INFO | fairseq.trainer | NOTE: your device may support faster training with --fp16
epoch 001: 0%| | 0/210 [00:00<?, ?it/s]/opt/conda/conda-bld/pytorch_1587428398394/work/torch/csrc/utils/python_arg_parser.cpp:756: UserWarning: This overload of add_ is deprecated:
add_(Number alpha, Tensor other)
Consider using one of the following signatures instead:
add_(Tensor other, *, Number alpha)
/opt/conda/conda-bld/pytorch_1587428398394/work/torch/csrc/utils/python_arg_parser.cpp:756: UserWarning: This overload of add_ is deprecated:
add_(Number alpha, Tensor other)
Consider using one of the following signatures instead:
add_(Tensor other, *, Number alpha)
epoch 001 | valid on 'valid' subset | loss 4.067 | ppl 16.77 | wps 119770 | wpb 6793.9 | bsz 341.8 | num_updates 210
epoch 001 | valid on 'valid' subset | loss 4.067 | ppl 16.77 | wps 119685 | wpb 6793.9 | bsz 341.8 | num_updates 210
epoch 001 | loss 6.743 | ppl 107.11 | wps 59759.2 | ups 0.79 | wpb 76004.5 | bsz 5385 | num_updates 210 | lr 0.005 | gnorm 0.564 | clip 0 | train_wall 236 | wall 276
epoch 002: 0%| | 0/210 [00:00<?, ?it/s]2020-04-29 21:55:35 | INFO | fairseq.checkpoint_utils | saved checkpoint checkpoints/transformer/checkpoint1.pt (epoch 1 @ 210 updates, score 16.77) (writing took 0.49035206900043704 seconds)
epoch 001 | loss 6.743 | ppl 107.11 | wps 59649.1 | ups 0.78 | wpb 76004.5 | bsz 5385 | num_updates 210 | lr 0.005 | gnorm 0.564 | clip 0 | train_wall 236 | wall 276
epoch 002 | valid on 'valid' subset | loss 1.464 | ppl 2.76 | wps 121245 | wpb 6793.9 | bsz 341.8 | num_updates 420 | best_ppl 2.76
epoch 002 | valid on 'valid' subset | loss 1.464 | ppl 2.76 | wps 118177 | wpb 6793.9 | bsz 341.8 | num_updates 420 | best_ppl 2.76
epoch 002 | loss 1.986 | ppl 3.96 | wps 57927.6 | ups 0.76 | wpb 76004.5 | bsz 5385 | num_updates 420 | lr 0.005 | gnorm 0.262 | clip 0 | train_wall 243 | wall 551
epoch 003: 0%| | 0/210 [00:00<?, ?it/s]2020-04-29 22:00:13 | INFO | fairseq.checkpoint_utils | saved checkpoint checkpoints/transformer/checkpoint2.pt (epoch 2 @ 420 updates, score 2.76) (writing took 3.3108816809999553 seconds)
epoch 002 | loss 1.986 | ppl 3.96 | wps 57341.3 | ups 0.75 | wpb 76004.5 | bsz 5385 | num_updates 420 | lr 0.005 | gnorm 0.262 | clip 0 | train_wall 243 | wall 555
epoch 003 | valid on 'valid' subset | loss 1.184 | ppl 2.27 | wps 120108 | wpb 6793.9 | bsz 341.8 | num_updates 630 | best_ppl 2.27
epoch 003 | valid on 'valid' subset | loss 1.184 | ppl 2.27 | wps 117797 | wpb 6793.9 | bsz 341.8 | num_updates 630 | best_ppl 2.27
epoch 003 | loss 1.224 | ppl 2.34 | wps 57350.6 | ups 0.75 | wpb 76004.5 | bsz 5385 | num_updates 630 | lr 0.005 | gnorm 0.137 | clip 0 | train_wall 246 | wall 830
epoch 004: 0%| | 0/210 [00:00<?, ?it/s]2020-04-29 22:04:51 | INFO | fairseq.checkpoint_utils | saved checkpoint checkpoints/transformer/checkpoint3.pt (epoch 3 @ 630 updates, score 2.27) (writing took 3.109906989000592 seconds)
epoch 003 | loss 1.224 | ppl 2.34 | wps 57392.5 | ups 0.76 | wpb 76004.5 | bsz 5385 | num_updates 630 | lr 0.005 | gnorm 0.137 | clip 0 | train_wall 243 | wall 833
Traceback (most recent call last):
File "train.py", line 11, in <module>
cli_main()
File "/home/zixi/EE-599/fairseq/fairseq_cli/train.py", line 355, in cli_main
nprocs=args.distributed_world_size,
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 200, in spawn
return start_processes(fn, args, nprocs, join, daemon, start_method='spawn')
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 158, in start_processes
while not context.join():
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 119, in join
raise Exception(msg)
Exception:
-- Process 1 terminated with the following error:
Traceback (most recent call last):
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 20, in _wrap
fn(i, *args)
File "/home/zixi/EE-599/fairseq/fairseq_cli/train.py", line 324, in distributed_main
main(args, init_distributed=True)
File "/home/zixi/EE-599/fairseq/fairseq_cli/train.py", line 117, in main
valid_losses = train(args, trainer, task, epoch_itr, max_update)
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/home/zixi/EE-599/fairseq/fairseq_cli/train.py", line 187, in train
log_output = trainer.train_step(samples)
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/home/zixi/EE-599/fairseq/fairseq/trainer.py", line 379, in train_step
ignore_grad=is_dummy_batch,
File "/home/zixi/EE-599/fairseq/fairseq/tasks/fairseq_task.py", line 341, in train_step
loss, sample_size, logging_output = criterion(model, sample)
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/zixi/EE-599/fairseq/fairseq/criterions/cross_entropy.py", line 29, in forward
net_output = model(**sample['net_input'])
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/zixi/EE-599/fairseq/fairseq/legacy_distributed_data_parallel.py", line 86, in forward
return self.module(*inputs, **kwargs)
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/zixi/EE-599/fairseq/fairseq/models/transformer.py", line 272, in forward
return_all_hiddens=return_all_hiddens,
File "/home/zixi/anaconda3/envs/ROC/lib/python3.7/site-packages/torch/nn/modules/module.py", line 550, in __call__
result = self.forward(*input, **kwargs)
File "/home/zixi/EE-599/fairseq/fairseq/models/transformer.py", line 498, in forward
encoder_states[-1] = x
IndexError: list assignment index out of range
|
IndexError
|
def forward(self, x):
with torch.cuda.device(x.device):
return super().forward(x)
|
def forward(self, x):
return super().forward(x)
|
https://github.com/pytorch/fairseq/issues/1860
|
Using cache found in /home/vlad/.cache/torch/hub/pytorch_fairseq_master
Loading codes from /home/vlad/.cache/torch/pytorch_fairseq/0695ef328ddefcb8cbcfabc3196182f59c0e41e0468b10cc0db2ae9c91881fcc.bb1be17de4233e13870bd7d6065bfdb03fca0a51dd0f5d0b7edf5c188eda71f1/bpecodes ...
Read 30000 codes from the codes file.
Traceback (most recent call last):
File "/home/vlad/Documents/coding/experiments/paper-analyzer-Tretyak_Internship/papers/experiments/scientific_generation/scripts/paraphrase.py", line 46, in <module>
en2de.translate(['hello'])
File "/home/vlad/.cache/torch/hub/pytorch_fairseq_master/fairseq/hub_utils.py", line 126, in translate
return self.sample(sentences, beam, verbose, **kwargs)
File "/home/vlad/.cache/torch/hub/pytorch_fairseq_master/fairseq/hub_utils.py", line 132, in sample
batched_hypos = self.generate(tokenized_sentences, beam, verbose, **kwargs)
File "/home/vlad/.cache/torch/hub/pytorch_fairseq_master/fairseq/hub_utils.py", line 165, in generate
translations = self.task.inference_step(generator, self.models, batch)
File "/home/vlad/.cache/torch/hub/pytorch_fairseq_master/fairseq/tasks/fairseq_task.py", line 351, in inference_step
return generator.generate(models, sample, prefix_tokens=prefix_tokens)
File "/home/vlad/Documents/envs/p3.6/lib/python3.6/site-packages/torch/autograd/grad_mode.py", line 49, in decorate_no_grad
return func(*args, **kwargs)
File "/home/vlad/.cache/torch/hub/pytorch_fairseq_master/fairseq/sequence_generator.py", line 93, in generate
return self._generate(model, sample, **kwargs)
File "/home/vlad/Documents/envs/p3.6/lib/python3.6/site-packages/torch/autograd/grad_mode.py", line 49, in decorate_no_grad
return func(*args, **kwargs)
File "/home/vlad/.cache/torch/hub/pytorch_fairseq_master/fairseq/sequence_generator.py", line 276, in _generate
tokens[:, :step + 1], encoder_outs, temperature=self.temperature,
File "/home/vlad/Documents/envs/p3.6/lib/python3.6/site-packages/torch/autograd/grad_mode.py", line 49, in decorate_no_grad
return func(*args, **kwargs)
File "/home/vlad/.cache/torch/hub/pytorch_fairseq_master/fairseq/sequence_generator.py", line 549, in forward_decoder
temperature=temperature,
File "/home/vlad/.cache/torch/hub/pytorch_fairseq_master/fairseq/sequence_generator.py", line 568, in _decode_one
tokens, encoder_out=encoder_out, incremental_state=self.incremental_states[model],
File "/home/vlad/.cache/torch/hub/pytorch_fairseq_master/fairseq/models/fairseq_model.py", line 274, in forward_decoder
return self.decoder(prev_output_tokens, **kwargs)
File "/home/vlad/Documents/envs/p3.6/lib/python3.6/site-packages/torch/nn/modules/module.py", line 532, in __call__
result = self.forward(*input, **kwargs)
File "/home/vlad/.cache/torch/hub/pytorch_fairseq_master/fairseq/models/transformer.py", line 704, in forward
alignment_heads=alignment_heads,
File "/home/vlad/.cache/torch/hub/pytorch_fairseq_master/fairseq/models/transformer.py", line 807, in extract_features
need_head_weights=bool((idx == alignment_layer)),
File "/home/vlad/Documents/envs/p3.6/lib/python3.6/site-packages/torch/nn/modules/module.py", line 532, in __call__
result = self.forward(*input, **kwargs)
File "/home/vlad/.cache/torch/hub/pytorch_fairseq_master/fairseq/modules/transformer_layer.py", line 297, in forward
need_head_weights=need_head_weights,
File "/home/vlad/Documents/envs/p3.6/lib/python3.6/site-packages/torch/nn/modules/module.py", line 532, in __call__
result = self.forward(*input, **kwargs)
File "/home/vlad/.cache/torch/hub/pytorch_fairseq_master/fairseq/modules/multihead_attention.py", line 325, in forward
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf")
RuntimeError: arguments are located on different GPUs at /pytorch/aten/src/THC/generic/THCTensorMasked.cu:28
Process finished with exit code 1
|
RuntimeError
|
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
)
# During incremental decoding, as the padding token enters and
# leaves the frame, there will be a time when prev or current
# is None
elif prev_key_padding_mask is not None:
filler = torch.zeros(
(batch_size, src_len - prev_key_padding_mask.size(1)),
device=prev_key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), filler.float()], dim=1
)
elif key_padding_mask is not None:
filler = torch.zeros(
(batch_size, src_len - key_padding_mask.size(1)),
device=key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[filler.float(), key_padding_mask.float()], dim=1
)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
|
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
)
# During incremental decoding, as the padding token enters and
# leaves the frame, there will be a time when prev or current
# is None
elif prev_key_padding_mask is not None:
filler = torch.zeros(batch_size, src_len - prev_key_padding_mask.size(1))
if prev_key_padding_mask.is_cuda:
filler = filler.cuda()
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), filler.float()], dim=1
)
elif key_padding_mask is not None:
filler = torch.zeros(batch_size, src_len - key_padding_mask.size(1))
if key_padding_mask.is_cuda:
filler = filler.cuda()
new_key_padding_mask = torch.cat(
[filler.float(), key_padding_mask.float()], dim=1
)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
|
https://github.com/pytorch/fairseq/issues/1860
|
Using cache found in /home/vlad/.cache/torch/hub/pytorch_fairseq_master
Loading codes from /home/vlad/.cache/torch/pytorch_fairseq/0695ef328ddefcb8cbcfabc3196182f59c0e41e0468b10cc0db2ae9c91881fcc.bb1be17de4233e13870bd7d6065bfdb03fca0a51dd0f5d0b7edf5c188eda71f1/bpecodes ...
Read 30000 codes from the codes file.
Traceback (most recent call last):
File "/home/vlad/Documents/coding/experiments/paper-analyzer-Tretyak_Internship/papers/experiments/scientific_generation/scripts/paraphrase.py", line 46, in <module>
en2de.translate(['hello'])
File "/home/vlad/.cache/torch/hub/pytorch_fairseq_master/fairseq/hub_utils.py", line 126, in translate
return self.sample(sentences, beam, verbose, **kwargs)
File "/home/vlad/.cache/torch/hub/pytorch_fairseq_master/fairseq/hub_utils.py", line 132, in sample
batched_hypos = self.generate(tokenized_sentences, beam, verbose, **kwargs)
File "/home/vlad/.cache/torch/hub/pytorch_fairseq_master/fairseq/hub_utils.py", line 165, in generate
translations = self.task.inference_step(generator, self.models, batch)
File "/home/vlad/.cache/torch/hub/pytorch_fairseq_master/fairseq/tasks/fairseq_task.py", line 351, in inference_step
return generator.generate(models, sample, prefix_tokens=prefix_tokens)
File "/home/vlad/Documents/envs/p3.6/lib/python3.6/site-packages/torch/autograd/grad_mode.py", line 49, in decorate_no_grad
return func(*args, **kwargs)
File "/home/vlad/.cache/torch/hub/pytorch_fairseq_master/fairseq/sequence_generator.py", line 93, in generate
return self._generate(model, sample, **kwargs)
File "/home/vlad/Documents/envs/p3.6/lib/python3.6/site-packages/torch/autograd/grad_mode.py", line 49, in decorate_no_grad
return func(*args, **kwargs)
File "/home/vlad/.cache/torch/hub/pytorch_fairseq_master/fairseq/sequence_generator.py", line 276, in _generate
tokens[:, :step + 1], encoder_outs, temperature=self.temperature,
File "/home/vlad/Documents/envs/p3.6/lib/python3.6/site-packages/torch/autograd/grad_mode.py", line 49, in decorate_no_grad
return func(*args, **kwargs)
File "/home/vlad/.cache/torch/hub/pytorch_fairseq_master/fairseq/sequence_generator.py", line 549, in forward_decoder
temperature=temperature,
File "/home/vlad/.cache/torch/hub/pytorch_fairseq_master/fairseq/sequence_generator.py", line 568, in _decode_one
tokens, encoder_out=encoder_out, incremental_state=self.incremental_states[model],
File "/home/vlad/.cache/torch/hub/pytorch_fairseq_master/fairseq/models/fairseq_model.py", line 274, in forward_decoder
return self.decoder(prev_output_tokens, **kwargs)
File "/home/vlad/Documents/envs/p3.6/lib/python3.6/site-packages/torch/nn/modules/module.py", line 532, in __call__
result = self.forward(*input, **kwargs)
File "/home/vlad/.cache/torch/hub/pytorch_fairseq_master/fairseq/models/transformer.py", line 704, in forward
alignment_heads=alignment_heads,
File "/home/vlad/.cache/torch/hub/pytorch_fairseq_master/fairseq/models/transformer.py", line 807, in extract_features
need_head_weights=bool((idx == alignment_layer)),
File "/home/vlad/Documents/envs/p3.6/lib/python3.6/site-packages/torch/nn/modules/module.py", line 532, in __call__
result = self.forward(*input, **kwargs)
File "/home/vlad/.cache/torch/hub/pytorch_fairseq_master/fairseq/modules/transformer_layer.py", line 297, in forward
need_head_weights=need_head_weights,
File "/home/vlad/Documents/envs/p3.6/lib/python3.6/site-packages/torch/nn/modules/module.py", line 532, in __call__
result = self.forward(*input, **kwargs)
File "/home/vlad/.cache/torch/hub/pytorch_fairseq_master/fairseq/modules/multihead_attention.py", line 325, in forward
key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool), float("-inf")
RuntimeError: arguments are located on different GPUs at /pytorch/aten/src/THC/generic/THCTensorMasked.cu:28
Process finished with exit code 1
|
RuntimeError
|
def binarize_with_mask(self, txt, prefix, suffix, leading_space, trailing_space):
toks = self.binarize(
prefix + leading_space + txt + trailing_space + suffix,
append_eos=True,
)
mask = torch.zeros_like(toks, dtype=torch.bool)
mask_start = len(self.binarize(prefix))
mask_size = len(self.binarize(leading_space + txt))
mask[mask_start : mask_start + mask_size] = 1
return toks, mask
|
def binarize_with_mask(self, txt, prefix, suffix, leading_space, trailing_space):
toks = self.binarize(
prefix + leading_space + txt + trailing_space + suffix,
append_eos=True,
)
mask = torch.zeros_like(toks, dtype=torch.uint8)
mask_start = len(self.binarize(prefix))
mask_size = len(self.binarize(leading_space + txt))
mask[mask_start : mask_start + mask_size] = 1
return toks, mask
|
https://github.com/pytorch/fairseq/issues/1866
|
2020-03-19 04:44:55 | INFO | fairseq.distributed_utils | distributed init (rank 0): tcp://localhost:11739
2020-03-19 04:44:55 | INFO | fairseq.distributed_utils | distributed init (rank 1): tcp://localhost:11739
2020-03-19 04:44:55 | INFO | fairseq.distributed_utils | distributed init (rank 2): tcp://localhost:11739
2020-03-19 04:44:55 | INFO | fairseq.distributed_utils | initialized host ubuntu as rank 1
2020-03-19 04:44:55 | INFO | fairseq.distributed_utils | initialized host ubuntu as rank 2
2020-03-19 04:44:55 | INFO | fairseq.distributed_utils | distributed init (rank 3): tcp://localhost:11739
2020-03-19 04:44:55 | INFO | fairseq.distributed_utils | initialized host ubuntu as rank 3
2020-03-19 04:44:55 | INFO | fairseq.distributed_utils | initialized host ubuntu as rank 0
| dictionary: 50265 types
| dictionary: 50265 types
2020-03-19 04:45:03 | INFO | fairseq_cli.train | Namespace(activation_dropout=0.0, activation_fn='gelu', adam_betas='(0.9, 0.98)', adam_eps=1e-06, all_gather_list_size=16384, arch='roberta_base', attention_dropout=0.1, best_checkpoint_metric='accuracy', bpe='gpt2', broadcast_buffers=False, bucket_cap_mb=25, clip_norm=25, cpu=False, criterion='wsc', curriculum=0, data='WSC/', dataset_impl=None, ddp_backend='no_c10d', device_id=0, disable_validation=False, distributed_backend='nccl', distributed_init_method='tcp://localhost:11739', distributed_no_spawn=False, distributed_port=-1, distributed_rank=0, distributed_world_size=4, dropout=0.1, empty_cache_freq=0, encoder_attention_heads=12, encoder_embed_dim=768, encoder_ffn_embed_dim=3072, encoder_layerdrop=0, encoder_layers=12, encoder_layers_to_keep=None, end_learning_rate=0.0, fast_stat_sync=False, find_unused_parameters=False, fix_batches_to_gpus=False, fixed_validation_seed=None, force_anneal=None, fp16=True, fp16_init_scale=128, fp16_no_flatten_grads=False, fp16_scale_tolerance=0.0, fp16_scale_window=None, gpt2_encoder_json='https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/encoder.json', gpt2_vocab_bpe='https://dl.fbaipublicfiles.com/fairseq/gpt2_bpe/vocab.bpe', init_token=None, keep_best_checkpoints=-1, keep_interval_updates=-1, keep_last_epochs=-1, log_format='simple', log_interval=100, lr=[2e-05], lr_scheduler='polynomial_decay', max_epoch=0, max_positions=512, max_sentences=16, max_sentences_valid=16, max_tokens=None, max_tokens_valid=None, max_update=2000, maximize_best_checkpoint_metric=True, memory_efficient_fp16=False, min_loss_scale=0.0001, min_lr=-1, no_epoch_checkpoints=True, no_last_checkpoints=True, no_progress_bar=False, no_save=False, no_save_optimizer_state=True, num_workers=1, optimizer='adam', optimizer_overrides='{}', patience=-1, pooler_activation_fn='tanh', pooler_dropout=0.0, power=1.0, required_batch_size_multiple=8, reset_dataloader=True, reset_lr_scheduler=False, reset_meters=True, reset_optimizer=True, restore_file='/home/xgx/Commonsense/test/model/roberta.base/model.pt', save_dir='checkpoints', save_interval=1, save_interval_updates=0, save_predictions=None, seed=1, sentence_avg=False, skip_invalid_size_inputs_valid_test=False, task='wsc', tensorboard_logdir='', threshold_loss_scale=None, tokenizer=None, total_num_update=2000, train_subset='train', update_freq=[1], use_bmuf=False, use_old_adam=False, user_dir='/home/xgx/fairseq/examples/roberta/wsc', valid_subset='val', validate_interval=1, warmup_updates=250, weight_decay=0.01, wsc_cross_entropy=True, wsc_margin_alpha=1.0, wsc_margin_beta=0.0)
| dictionary: 50265 types
| dictionary: 50265 types
Traceback (most recent call last):
File "/home/xgx/miniconda3/bin/fairseq-train", line 11, in <module>
load_entry_point('fairseq', 'console_scripts', 'fairseq-train')()
File "/home/xgx/fairseq/fairseq_cli/train.py", line 317, in cli_main
nprocs=args.distributed_world_size,
File "/home/xgx/miniconda3/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 171, in spawn
while not spawn_context.join():
File "/home/xgx/miniconda3/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 118, in join
raise Exception(msg)
Exception:
-- Process 1 terminated with the following error:
Traceback (most recent call last):
File "/home/xgx/miniconda3/lib/python3.7/site-packages/torch/multiprocessing/spawn.py", line 19, in _wrap
fn(i, *args)
File "/home/xgx/fairseq/fairseq_cli/train.py", line 286, in distributed_main
main(args, init_distributed=True)
File "/home/xgx/fairseq/fairseq_cli/train.py", line 63, in main
criterion = task.build_criterion(args)
File "/home/xgx/fairseq/fairseq/tasks/fairseq_task.py", line 226, in build_criterion
return criterions.build_criterion(args, self)
File "/home/xgx/fairseq/fairseq/registry.py", line 41, in build_x
return builder(args, *extra_args, **extra_kwargs)
File "/home/xgx/fairseq/fairseq/criterions/fairseq_criterion.py", line 56, in build_criterion
'{}.build_criterion'.format(cls.__name__)
NotImplementedError: Unable to infer Criterion arguments, please implement WSCCriterion.build_criterion
|
NotImplementedError
|
def get_perplexity(loss, round=2, base=2):
if loss is None:
return 0.0
try:
return safe_round(base**loss, round)
except OverflowError:
return float("inf")
|
def get_perplexity(loss, round=2, base=2):
if loss is None:
return 0.0
return safe_round(base**loss, round)
|
https://github.com/pytorch/fairseq/issues/1833
|
Traceback (most recent call last):
File "/SFS/user/wp/prihodad/git/oas-training/data/examples/biophysical_properties/condaenv/bin/fairseq-train", line 11, in <module>
load_entry_point('fairseq', 'console_scripts', 'fairseq-train')()
File "/SFS/user/wp/prihodad/git/fairseq/fairseq_cli/train.py", line 321, in cli_main
main(args)
File "/SFS/user/wp/prihodad/git/fairseq/fairseq_cli/train.py", line 96, in main
train(args, trainer, task, epoch_itr)
File "/SFS/user/wp/prihodad/git/oas-training/condaenv/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/SFS/user/wp/prihodad/git/fairseq/fairseq_cli/train.py", line 203, in train
stats = get_training_stats(metrics.get_smoothed_values('train'))
File "/SFS/user/wp/prihodad/git/fairseq/fairseq_cli/train.py", line 212, in get_training_stats
stats['ppl'] = utils.get_perplexity(stats['nll_loss'])
File "/SFS/user/wp/prihodad/git/fairseq/fairseq/utils.py", line 349, in get_perplexity
return safe_round(base**loss, round)
OverflowError: (34, 'Numerical result out of range')
|
OverflowError
|
def __init__(self, task, classification_head_name, regression_target):
super().__init__(task)
self.classification_head_name = classification_head_name
self.regression_target = regression_target
|
def __init__(self, task, classification_head_name):
super().__init__(task)
self.classification_head_name = classification_head_name
|
https://github.com/pytorch/fairseq/issues/1802
|
2020-03-08 13:40:11 | INFO | fairseq_cli.train | model roberta_large, criterion SentencePredictionCriterion
2020-03-08 13:40:11 | INFO | fairseq_cli.train | num. model params: 357499983 (num. trained: 357499983)
2020-03-08 13:40:11 | INFO | fairseq_cli.train | training on 1 GPUs
2020-03-08 13:40:11 | INFO | fairseq_cli.train | max tokens per GPU = 4 and max sentences per GPU = 2
2020-03-08 13:40:11 | INFO | fairseq.models.roberta.model | Overwriting classification_heads.head_name.dense.weight
2020-03-08 13:40:11 | INFO | fairseq.models.roberta.model | Overwriting classification_heads.head_name.dense.bias
2020-03-08 13:40:11 | INFO | fairseq.models.roberta.model | Overwriting classification_heads.head_name.out_proj.weight
2020-03-08 13:40:11 | INFO | fairseq.models.roberta.model | Overwriting classification_heads.head_name.out_proj.bias
2020-03-08 13:40:11 | INFO | fairseq.trainer | loaded checkpoint roberta.large/model.pt (epoch 1 @ 0 updates)
2020-03-08 13:40:11 | INFO | fairseq.trainer | loading train data for epoch 1
2020-03-08 13:40:11 | INFO | fairseq.data.data_utils | loaded 160600 examples from: ../data/input0/train
2020-03-08 13:40:11 | INFO | fairseq.data.data_utils | loaded 160600 examples from: ../data/label/train
2020-03-08 13:40:11 | INFO | fairseq.tasks.sentence_prediction | Loaded train with #samples: 160600
2020-03-08 13:40:11 | WARNING | fairseq.data.data_utils | 160580 samples have invalid sizes and will be skipped, max_positions=4, first few sample ids=[141335, 12811, 122058, 104432, 1925, 141500, 65517, 143950, 59283, 155828]
Traceback (most recent call last):
File "train.py", line 11, in <module>
cli_main()
File "/nas/home/thawani/MCS/fairseq/fairseq_cli/train.py", line 322, in cli_main
main(args)
File "/nas/home/thawani/MCS/fairseq/fairseq_cli/train.py", line 100, in main
train(args, trainer, task, epoch_itr)
File "/nas/home/thawani/anaconda3/envs/env/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/nas/home/thawani/MCS/fairseq/fairseq_cli/train.py", line 177, in train
log_output = trainer.train_step(samples)
File "/nas/home/thawani/anaconda3/envs/env/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/nas/home/thawani/MCS/fairseq/fairseq/trainer.py", line 319, in train_step
ignore_grad=is_dummy_batch,
File "/nas/home/thawani/MCS/fairseq/fairseq/tasks/fairseq_task.py", line 337, in train_step
loss, sample_size, logging_output = criterion(model, sample)
File "/nas/home/thawani/anaconda3/envs/env/lib/python3.7/site-packages/torch/nn/modules/module.py", line 532, in __call__
result = self.forward(*input, **kwargs)
File "/nas/home/thawani/MCS/fairseq/fairseq/criterions/sentence_prediction.py", line 40, in forward
and self.args.classification_head_name in model.classification_heads
File "/nas/home/thawani/anaconda3/envs/env/lib/python3.7/site-packages/torch/nn/modules/module.py", line 576, in __getattr__
type(self).__name__, name))
AttributeError: 'SentencePredictionCriterion' object has no attribute 'args'
|
AttributeError
|
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
assert (
hasattr(model, "classification_heads")
and self.classification_head_name in model.classification_heads
), (
"model must provide sentence classification head for --criterion=sentence_prediction"
)
logits, _ = model(
**sample["net_input"],
features_only=True,
classification_head_name=self.classification_head_name,
)
targets = model.get_targets(sample, [logits]).view(-1)
sample_size = targets.numel()
if not self.regression_target:
loss = F.nll_loss(
F.log_softmax(logits, dim=-1, dtype=torch.float32),
targets,
reduction="sum",
)
else:
logits = logits.squeeze().float()
targets = targets.float()
loss = F.mse_loss(
logits,
targets,
reduction="sum",
)
logging_output = {
"loss": loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample_size,
"sample_size": sample_size,
}
if not self.regression_target:
preds = logits.argmax(dim=1)
logging_output["ncorrect"] = utils.item((preds == targets).sum())
return loss, sample_size, logging_output
|
def forward(self, model, sample, reduce=True):
"""Compute the loss for the given sample.
Returns a tuple with three elements:
1) the loss
2) the sample size, which is used as the denominator for the gradient
3) logging outputs to display while training
"""
assert (
hasattr(model, "classification_heads")
and self.args.classification_head_name in model.classification_heads
), (
"model must provide sentence classification head for --criterion=sentence_prediction"
)
logits, _ = model(
**sample["net_input"],
features_only=True,
classification_head_name=self.args.classification_head_name,
)
targets = model.get_targets(sample, [logits]).view(-1)
sample_size = targets.numel()
if not self.args.regression_target:
loss = F.nll_loss(
F.log_softmax(logits, dim=-1, dtype=torch.float32),
targets,
reduction="sum",
)
else:
logits = logits.squeeze().float()
targets = targets.float()
loss = F.mse_loss(
logits,
targets,
reduction="sum",
)
logging_output = {
"loss": loss.data,
"ntokens": sample["ntokens"],
"nsentences": sample_size,
"sample_size": sample_size,
}
if not self.args.regression_target:
preds = logits.argmax(dim=1)
logging_output["ncorrect"] = utils.item((preds == targets).sum())
return loss, sample_size, logging_output
|
https://github.com/pytorch/fairseq/issues/1802
|
2020-03-08 13:40:11 | INFO | fairseq_cli.train | model roberta_large, criterion SentencePredictionCriterion
2020-03-08 13:40:11 | INFO | fairseq_cli.train | num. model params: 357499983 (num. trained: 357499983)
2020-03-08 13:40:11 | INFO | fairseq_cli.train | training on 1 GPUs
2020-03-08 13:40:11 | INFO | fairseq_cli.train | max tokens per GPU = 4 and max sentences per GPU = 2
2020-03-08 13:40:11 | INFO | fairseq.models.roberta.model | Overwriting classification_heads.head_name.dense.weight
2020-03-08 13:40:11 | INFO | fairseq.models.roberta.model | Overwriting classification_heads.head_name.dense.bias
2020-03-08 13:40:11 | INFO | fairseq.models.roberta.model | Overwriting classification_heads.head_name.out_proj.weight
2020-03-08 13:40:11 | INFO | fairseq.models.roberta.model | Overwriting classification_heads.head_name.out_proj.bias
2020-03-08 13:40:11 | INFO | fairseq.trainer | loaded checkpoint roberta.large/model.pt (epoch 1 @ 0 updates)
2020-03-08 13:40:11 | INFO | fairseq.trainer | loading train data for epoch 1
2020-03-08 13:40:11 | INFO | fairseq.data.data_utils | loaded 160600 examples from: ../data/input0/train
2020-03-08 13:40:11 | INFO | fairseq.data.data_utils | loaded 160600 examples from: ../data/label/train
2020-03-08 13:40:11 | INFO | fairseq.tasks.sentence_prediction | Loaded train with #samples: 160600
2020-03-08 13:40:11 | WARNING | fairseq.data.data_utils | 160580 samples have invalid sizes and will be skipped, max_positions=4, first few sample ids=[141335, 12811, 122058, 104432, 1925, 141500, 65517, 143950, 59283, 155828]
Traceback (most recent call last):
File "train.py", line 11, in <module>
cli_main()
File "/nas/home/thawani/MCS/fairseq/fairseq_cli/train.py", line 322, in cli_main
main(args)
File "/nas/home/thawani/MCS/fairseq/fairseq_cli/train.py", line 100, in main
train(args, trainer, task, epoch_itr)
File "/nas/home/thawani/anaconda3/envs/env/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/nas/home/thawani/MCS/fairseq/fairseq_cli/train.py", line 177, in train
log_output = trainer.train_step(samples)
File "/nas/home/thawani/anaconda3/envs/env/lib/python3.7/contextlib.py", line 74, in inner
return func(*args, **kwds)
File "/nas/home/thawani/MCS/fairseq/fairseq/trainer.py", line 319, in train_step
ignore_grad=is_dummy_batch,
File "/nas/home/thawani/MCS/fairseq/fairseq/tasks/fairseq_task.py", line 337, in train_step
loss, sample_size, logging_output = criterion(model, sample)
File "/nas/home/thawani/anaconda3/envs/env/lib/python3.7/site-packages/torch/nn/modules/module.py", line 532, in __call__
result = self.forward(*input, **kwargs)
File "/nas/home/thawani/MCS/fairseq/fairseq/criterions/sentence_prediction.py", line 40, in forward
and self.args.classification_head_name in model.classification_heads
File "/nas/home/thawani/anaconda3/envs/env/lib/python3.7/site-packages/torch/nn/modules/module.py", line 576, in __getattr__
type(self).__name__, name))
AttributeError: 'SentencePredictionCriterion' object has no attribute 'args'
|
AttributeError
|
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.unit_lm = getattr(args, "unit_lm", False)
self.lexicon = load_words(args.lexicon) if args.lexicon else None
self.idx_to_wrd = {}
checkpoint = torch.load(args.kenlm_model, map_location="cpu")
if "cfg" in checkpoint and checkpoint["cfg"] is not None:
lm_args = checkpoint["cfg"]
else:
lm_args = convert_namespace_to_omegaconf(checkpoint["args"])
with open_dict(lm_args.task):
lm_args.task.data = osp.dirname(args.kenlm_model)
task = tasks.setup_task(lm_args.task)
model = task.build_model(lm_args.model)
model.load_state_dict(checkpoint["model"], strict=False)
self.trie = Trie(self.vocab_size, self.silence)
self.word_dict = task.dictionary
self.unk_word = self.word_dict.unk()
self.lm = FairseqLM(self.word_dict, model)
if self.lexicon:
start_state = self.lm.start(False)
for i, (word, spellings) in enumerate(self.lexicon.items()):
if self.unit_lm:
word_idx = i
self.idx_to_wrd[i] = word
score = 0
else:
word_idx = self.word_dict.index(word)
_, score = self.lm.score(start_state, word_idx, no_cache=True)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert tgt_dict.unk() not in spelling_idxs, (
f"{spelling} {spelling_idxs}"
)
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = LexiconDecoderOptions(
beam_size=args.beam,
beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))),
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
word_score=args.word_score,
unk_score=args.unk_weight,
sil_score=args.sil_weight,
log_add=False,
criterion_type=self.criterion_type,
)
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
[],
self.unit_lm,
)
else:
assert args.unit_lm, (
"lexicon free decoding can only be done with a unit language model"
)
from flashlight.lib.text.decoder import (
LexiconFreeDecoder,
LexiconFreeDecoderOptions,
)
d = {w: [[w]] for w in tgt_dict.symbols}
self.word_dict = create_word_dict(d)
self.lm = KenLM(args.kenlm_model, self.word_dict)
self.decoder_opts = LexiconFreeDecoderOptions(
beam_size=args.beam,
beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))),
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
sil_score=args.sil_weight,
log_add=False,
criterion_type=self.criterion_type,
)
self.decoder = LexiconFreeDecoder(
self.decoder_opts, self.lm, self.silence, self.blank, []
)
|
def __init__(self, args, tgt_dict):
super().__init__(args, tgt_dict)
self.unit_lm = getattr(args, "unit_lm", False)
self.lexicon = load_words(args.lexicon) if args.lexicon else None
self.idx_to_wrd = {}
checkpoint = torch.load(args.kenlm_model, map_location="cpu")
if "cfg" in checkpoint and checkpoint["cfg"] is not None:
lm_args = checkpoint["cfg"]
else:
lm_args = convert_namespace_to_omegaconf(checkpoint["args"])
with open_dict(lm_args.task):
lm_args.task.data = osp.dirname(args.kenlm_model)
task = tasks.setup_task(lm_args.task)
model = task.build_model(lm_args.model)
model.load_state_dict(checkpoint["model"], strict=False)
self.trie = Trie(self.vocab_size, self.silence)
self.word_dict = task.dictionary
self.unk_word = self.word_dict.unk()
self.lm = FairseqLM(self.word_dict, model)
if self.lexicon:
start_state = self.lm.start(False)
for i, (word, spellings) in enumerate(self.lexicon.items()):
if self.unit_lm:
word_idx = i
self.idx_to_wrd[i] = word
score = 0
else:
word_idx = self.word_dict.index(word)
_, score = self.lm.score(start_state, word_idx, no_cache=True)
for spelling in spellings:
spelling_idxs = [tgt_dict.index(token) for token in spelling]
assert tgt_dict.unk() not in spelling_idxs, (
f"{spelling} {spelling_idxs}"
)
self.trie.insert(spelling_idxs, word_idx, score)
self.trie.smear(SmearingMode.MAX)
self.decoder_opts = LexiconDecoderOptions(
beam_size=args.beam,
beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))),
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
word_score=args.word_score,
unk_score=args.unk_weight,
sil_score=args.sil_weight,
log_add=False,
criterion_type=self.criterion_type,
)
self.decoder = LexiconDecoder(
self.decoder_opts,
self.trie,
self.lm,
self.silence,
self.blank,
self.unk_word,
self.asg_transitions,
self.unit_lm,
)
else:
assert args.unit_lm, (
"lexicon free decoding can only be done with a unit language model"
)
from flashlight.lib.text.decoder import (
LexiconFreeDecoder,
LexiconFreeDecoderOptions,
)
d = {w: [[w]] for w in tgt_dict.symbols}
self.word_dict = create_word_dict(d)
self.lm = KenLM(args.kenlm_model, self.word_dict)
self.decoder_opts = LexiconFreeDecoderOptions(
beam_size=args.beam,
beam_size_token=int(getattr(args, "beam_size_token", len(tgt_dict))),
beam_threshold=args.beam_threshold,
lm_weight=args.lm_weight,
sil_score=args.sil_weight,
log_add=False,
criterion_type=self.criterion_type,
)
self.decoder = LexiconFreeDecoder(
self.decoder_opts, self.lm, self.silence, self.blank, []
)
|
https://github.com/pytorch/fairseq/issues/1617
|
Traceback (most recent call last):
File "attention_layers.py", line 80, in <module>
tokens = roberta.encode(' '.join(list(s)))
File "/home/jmorton/software/fairseq/fairseq/models/roberta/hub_interface.py", line 57, in encode
bpe_sentence = '<s> ' + self.bpe.encode(sentence) + ' </s>'
File "/home/jmorton/software/fairseq/fairseq/data/encoders/gpt2_bpe.py", line 40, in encode
return ' '.join(map(str, self.bpe.encode(x)))
File "/home/jmorton/software/fairseq/fairseq/data/encoders/gpt2_bpe_utils.py", line 110, in encode
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
File "/home/jmorton/software/fairseq/fairseq/data/encoders/gpt2_bpe_utils.py", line 110, in <genexpr>
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
KeyError: 'Ġ'
|
KeyError
|
def collater(self, samples):
samples = self.dataset.collater(samples)
if self.new_src_eos is not None:
if self.dataset.left_pad_source:
assert (
samples["net_input"]["src_tokens"][:, -1] != self.src_eos
).sum() == 0
samples["net_input"]["src_tokens"][:, -1] = self.new_src_eos
else:
eos_idx = samples["net_input"]["src_lengths"] - 1
assert (
samples["net_input"]["src_tokens"][
torch.arange(eos_idx.size(0)), eos_idx
]
!= self.src_eos
).sum() == 0
eos_idx = eos_idx.resize_(len(samples["net_input"]["src_lengths"]), 1)
samples["net_input"]["src_tokens"].scatter_(1, eos_idx, self.new_src_eos)
if self.new_tgt_bos is not None and "prev_output_tokens" in samples["net_input"]:
if self.dataset.left_pad_target:
# TODO: support different padding direction on target side
raise NotImplementedError(
"TransformEosLangPairDataset does not implement --left-pad-target True option"
)
else:
assert (
samples["net_input"]["prev_output_tokens"][:, 0] != self.tgt_bos
).sum() == 0
samples["net_input"]["prev_output_tokens"][:, 0] = self.new_tgt_bos
return samples
|
def collater(self, samples):
samples = self.dataset.collater(samples)
# TODO: support different padding direction
if self.new_src_eos is not None:
assert (samples["net_input"]["src_tokens"][:, -1] != self.src_eos).sum() == 0
samples["net_input"]["src_tokens"][:, -1] = self.new_src_eos
if self.new_tgt_bos is not None:
assert (
samples["net_input"]["prev_output_tokens"][:, 0] != self.tgt_bos
).sum() == 0
samples["net_input"]["prev_output_tokens"][:, 0] = self.new_tgt_bos
return samples
|
https://github.com/pytorch/fairseq/issues/1393
|
/experiments/falva/tools/fairseq/fairseq/models/fairseq_model.py:280: UserWarning: FairseqModel is deprecated, please use FairseqEncoderDecoderModel or BaseFairseqModel instead
for key in self.keys
Traceback (most recent call last):
File "/home/falva/anaconda3/envs/mtl4ts/bin/fairseq-generate", line 11, in <module>
load_entry_point('fairseq', 'console_scripts', 'fairseq-generate')()
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 190, in cli_main
main(args)
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 47, in main
task=task,
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 167, in load_model_ensemble
ensemble, args, _task = load_model_ensemble_and_task(filenames, arg_overrides, task)
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 186, in load_model_ensemble_and_task
model.load_state_dict(state['model'], strict=True, args=args)
TypeError: load_state_dict() got an unexpected keyword argument 'args'
|
TypeError
|
def load_state_dict(self, state_dict, strict=True, args=None):
state_dict_subset = state_dict.copy()
for k, _ in state_dict.items():
assert k.startswith("models.")
lang_pair = k.split(".")[1]
if lang_pair not in self.models:
del state_dict_subset[k]
super().load_state_dict(state_dict_subset, strict=strict, args=args)
|
def load_state_dict(self, state_dict, strict=True):
state_dict_subset = state_dict.copy()
for k, _ in state_dict.items():
assert k.startswith("models.")
lang_pair = k.split(".")[1]
if lang_pair not in self.models:
del state_dict_subset[k]
super().load_state_dict(state_dict_subset, strict=strict)
|
https://github.com/pytorch/fairseq/issues/1393
|
/experiments/falva/tools/fairseq/fairseq/models/fairseq_model.py:280: UserWarning: FairseqModel is deprecated, please use FairseqEncoderDecoderModel or BaseFairseqModel instead
for key in self.keys
Traceback (most recent call last):
File "/home/falva/anaconda3/envs/mtl4ts/bin/fairseq-generate", line 11, in <module>
load_entry_point('fairseq', 'console_scripts', 'fairseq-generate')()
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 190, in cli_main
main(args)
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 47, in main
task=task,
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 167, in load_model_ensemble
ensemble, args, _task = load_model_ensemble_and_task(filenames, arg_overrides, task)
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 186, in load_model_ensemble_and_task
model.load_state_dict(state['model'], strict=True, args=args)
TypeError: load_state_dict() got an unexpected keyword argument 'args'
|
TypeError
|
def prepare(cls, args, **kargs):
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
if args.lang_pairs is None:
raise ValueError(
"--lang-pairs is required. List all the language pairs in the training objective."
)
if isinstance(args.lang_pairs, str):
args.lang_pairs = args.lang_pairs.split(",")
sorted_langs = sorted(
list({x for lang_pair in args.lang_pairs for x in lang_pair.split("-")})
)
if args.source_lang is not None or args.target_lang is not None:
training = False
else:
training = True
# load dictionaries
dicts = OrderedDict()
for lang in sorted_langs:
paths = args.data.split(os.pathsep)
assert len(paths) > 0
dicts[lang] = Dictionary.load(
os.path.join(paths[0], "dict.{}.txt".format(lang))
)
if len(dicts) > 0:
assert dicts[lang].pad() == dicts[sorted_langs[0]].pad()
assert dicts[lang].eos() == dicts[sorted_langs[0]].eos()
assert dicts[lang].unk() == dicts[sorted_langs[0]].unk()
if args.encoder_langtok is not None or args.decoder_langtok:
for lang_to_add in sorted_langs:
dicts[lang].add_symbol(_lang_token(lang_to_add))
print("| [{}] dictionary: {} types".format(lang, len(dicts[lang])))
return dicts, training
|
def prepare(cls, args, **kargs):
args.left_pad_source = options.eval_bool(args.left_pad_source)
args.left_pad_target = options.eval_bool(args.left_pad_target)
if args.lang_pairs is None:
raise ValueError(
"--lang-pairs is required. List all the language pairs in the training objective."
)
args.lang_pairs = args.lang_pairs.split(",")
sorted_langs = sorted(
list({x for lang_pair in args.lang_pairs for x in lang_pair.split("-")})
)
if args.source_lang is not None or args.target_lang is not None:
training = False
else:
training = True
# load dictionaries
dicts = OrderedDict()
for lang in sorted_langs:
paths = args.data.split(os.pathsep)
assert len(paths) > 0
dicts[lang] = Dictionary.load(
os.path.join(paths[0], "dict.{}.txt".format(lang))
)
if len(dicts) > 0:
assert dicts[lang].pad() == dicts[sorted_langs[0]].pad()
assert dicts[lang].eos() == dicts[sorted_langs[0]].eos()
assert dicts[lang].unk() == dicts[sorted_langs[0]].unk()
if args.encoder_langtok is not None or args.decoder_langtok:
for lang_to_add in sorted_langs:
dicts[lang].add_symbol(_lang_token(lang_to_add))
print("| [{}] dictionary: {} types".format(lang, len(dicts[lang])))
return dicts, training
|
https://github.com/pytorch/fairseq/issues/1393
|
/experiments/falva/tools/fairseq/fairseq/models/fairseq_model.py:280: UserWarning: FairseqModel is deprecated, please use FairseqEncoderDecoderModel or BaseFairseqModel instead
for key in self.keys
Traceback (most recent call last):
File "/home/falva/anaconda3/envs/mtl4ts/bin/fairseq-generate", line 11, in <module>
load_entry_point('fairseq', 'console_scripts', 'fairseq-generate')()
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 190, in cli_main
main(args)
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 47, in main
task=task,
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 167, in load_model_ensemble
ensemble, args, _task = load_model_ensemble_and_task(filenames, arg_overrides, task)
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 186, in load_model_ensemble_and_task
model.load_state_dict(state['model'], strict=True, args=args)
TypeError: load_state_dict() got an unexpected keyword argument 'args'
|
TypeError
|
def _match_types(arg1, arg2):
"""Convert the numerical argument to the same type as the other argument"""
def upgrade(arg_number, arg_structure):
if isinstance(arg_structure, tuple):
return (arg_number, arg_number)
elif isinstance(arg_structure, dict):
arg = copy.deepcopy(arg_structure)
for k in arg:
arg[k] = upgrade(arg_number, arg_structure[k])
return arg
else:
return arg_number
if isinstance(arg1, float) or isinstance(arg1, int):
return upgrade(arg1, arg2), arg2
elif isinstance(arg2, float) or isinstance(arg2, int):
return arg1, upgrade(arg2, arg1)
return arg1, arg2
|
def _match_types(arg1, arg2):
if (isinstance(arg1, float) or isinstance(arg1, int)) and isinstance(arg2, tuple):
arg1_tuple = (arg1, arg1)
return arg1_tuple, arg2
if (isinstance(arg2, float) or isinstance(arg2, int)) and isinstance(arg1, tuple):
arg2_tuple = (arg2, arg2)
return arg1, arg2_tuple
return arg1, arg2
|
https://github.com/pytorch/fairseq/issues/1393
|
/experiments/falva/tools/fairseq/fairseq/models/fairseq_model.py:280: UserWarning: FairseqModel is deprecated, please use FairseqEncoderDecoderModel or BaseFairseqModel instead
for key in self.keys
Traceback (most recent call last):
File "/home/falva/anaconda3/envs/mtl4ts/bin/fairseq-generate", line 11, in <module>
load_entry_point('fairseq', 'console_scripts', 'fairseq-generate')()
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 190, in cli_main
main(args)
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 47, in main
task=task,
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 167, in load_model_ensemble
ensemble, args, _task = load_model_ensemble_and_task(filenames, arg_overrides, task)
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 186, in load_model_ensemble_and_task
model.load_state_dict(state['model'], strict=True, args=args)
TypeError: load_state_dict() got an unexpected keyword argument 'args'
|
TypeError
|
def hydra_init(cfg_name="config") -> None:
cs = ConfigStore.instance()
cs.store(name=cfg_name, node=FairseqConfig)
for k in FairseqConfig.__dataclass_fields__:
v = FairseqConfig.__dataclass_fields__[k].default
try:
cs.store(name=k, node=v)
except BaseException:
logger.error(f"{k} - {v}")
raise
register_module_dataclass(cs, TASK_DATACLASS_REGISTRY, "task")
register_module_dataclass(cs, MODEL_DATACLASS_REGISTRY, "model")
for k, v in REGISTRIES.items():
register_module_dataclass(cs, v["dataclass_registry"], k)
|
def hydra_init() -> None:
cs = ConfigStore.instance()
for k in FairseqConfig.__dataclass_fields__:
v = FairseqConfig.__dataclass_fields__[k].default
try:
cs.store(name=k, node=v)
except BaseException:
logger.error(f"{k} - {v}")
raise
register_module_dataclass(cs, TASK_DATACLASS_REGISTRY, "task")
register_module_dataclass(cs, MODEL_DATACLASS_REGISTRY, "model")
for k, v in REGISTRIES.items():
register_module_dataclass(cs, v["dataclass_registry"], k)
|
https://github.com/pytorch/fairseq/issues/1393
|
/experiments/falva/tools/fairseq/fairseq/models/fairseq_model.py:280: UserWarning: FairseqModel is deprecated, please use FairseqEncoderDecoderModel or BaseFairseqModel instead
for key in self.keys
Traceback (most recent call last):
File "/home/falva/anaconda3/envs/mtl4ts/bin/fairseq-generate", line 11, in <module>
load_entry_point('fairseq', 'console_scripts', 'fairseq-generate')()
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 190, in cli_main
main(args)
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 47, in main
task=task,
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 167, in load_model_ensemble
ensemble, args, _task = load_model_ensemble_and_task(filenames, arg_overrides, task)
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 186, in load_model_ensemble_and_task
model.load_state_dict(state['model'], strict=True, args=args)
TypeError: load_state_dict() got an unexpected keyword argument 'args'
|
TypeError
|
def _override_attr(
sub_node: str, data_class: Type[FairseqDataclass], args: Namespace
) -> List[str]:
overrides = []
if not inspect.isclass(data_class) or not issubclass(data_class, FairseqDataclass):
return overrides
def get_default(f):
if not isinstance(f.default_factory, _MISSING_TYPE):
return f.default_factory()
return f.default
for k, v in data_class.__dataclass_fields__.items():
if k.startswith("_"):
# private member, skip
continue
val = get_default(v) if not hasattr(args, k) else getattr(args, k)
if getattr(v.type, "__origin__", None) is List:
# if type is int but val is float, then we will crash later - try to convert here
t_args = v.type.__args__
if len(t_args) == 1:
val = list(map(t_args[0], val))
if val is None:
overrides.append("{}.{}=null".format(sub_node, k))
elif val == "":
overrides.append("{}.{}=''".format(sub_node, k))
elif isinstance(val, str):
overrides.append("{}.{}='{}'".format(sub_node, k, val))
else:
overrides.append("{}.{}={}".format(sub_node, k, val))
return overrides
|
def _override_attr(
sub_node: str, data_class: Type[FairseqDataclass], args: Namespace
) -> List[str]:
overrides = []
def get_default(f):
if not isinstance(f.default_factory, _MISSING_TYPE):
return f.default_factory()
return f.default
for k, v in data_class.__dataclass_fields__.items():
if k.startswith("_"):
# private member, skip
continue
val = get_default(v) if not hasattr(args, k) else getattr(args, k)
if val is None:
overrides.append("{}.{}=null".format(sub_node, k))
elif val == "":
overrides.append("{}.{}=''".format(sub_node, k))
elif isinstance(val, str):
overrides.append("{}.{}='{}'".format(sub_node, k, val))
else:
overrides.append("{}.{}={}".format(sub_node, k, val))
return overrides
|
https://github.com/pytorch/fairseq/issues/1393
|
/experiments/falva/tools/fairseq/fairseq/models/fairseq_model.py:280: UserWarning: FairseqModel is deprecated, please use FairseqEncoderDecoderModel or BaseFairseqModel instead
for key in self.keys
Traceback (most recent call last):
File "/home/falva/anaconda3/envs/mtl4ts/bin/fairseq-generate", line 11, in <module>
load_entry_point('fairseq', 'console_scripts', 'fairseq-generate')()
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 190, in cli_main
main(args)
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 47, in main
task=task,
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 167, in load_model_ensemble
ensemble, args, _task = load_model_ensemble_and_task(filenames, arg_overrides, task)
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 186, in load_model_ensemble_and_task
model.load_state_dict(state['model'], strict=True, args=args)
TypeError: load_state_dict() got an unexpected keyword argument 'args'
|
TypeError
|
def override_module_args(args: Namespace) -> Tuple[List[str], List[str]]:
"""use the field in args to overrides those in cfg"""
overrides = []
deletes = []
for k in FairseqConfig.__dataclass_fields__.keys():
overrides.extend(
_override_attr(k, FairseqConfig.__dataclass_fields__[k].type, args)
)
if args is not None:
if hasattr(args, "task"):
from fairseq.tasks import TASK_DATACLASS_REGISTRY
migrate_registry(
"task", args.task, TASK_DATACLASS_REGISTRY, args, overrides, deletes
)
else:
deletes.append("task")
# these options will be set to "None" if they have not yet been migrated
# so we can populate them with the entire flat args
CORE_REGISTRIES = {"criterion", "optimizer", "lr_scheduler"}
from fairseq.registry import REGISTRIES
for k, v in REGISTRIES.items():
if hasattr(args, k):
migrate_registry(
k,
getattr(args, k),
v["dataclass_registry"],
args,
overrides,
deletes,
use_name_as_val=k not in CORE_REGISTRIES,
)
else:
deletes.append(k)
no_dc = True
if hasattr(args, "arch"):
from fairseq.models import ARCH_MODEL_REGISTRY, ARCH_MODEL_NAME_REGISTRY
if args.arch in ARCH_MODEL_REGISTRY:
m_cls = ARCH_MODEL_REGISTRY[args.arch]
dc = getattr(m_cls, "__dataclass", None)
if dc is not None:
m_name = ARCH_MODEL_NAME_REGISTRY[args.arch]
overrides.append("model={}".format(m_name))
overrides.append("model._name={}".format(args.arch))
# override model params with those exist in args
overrides.extend(_override_attr("model", dc, args))
no_dc = False
if no_dc:
deletes.append("model")
return overrides, deletes
|
def override_module_args(args: Namespace) -> Tuple[List[str], List[str]]:
"""use the field in args to overrides those in cfg"""
overrides = []
deletes = []
for k in FairseqConfig.__dataclass_fields__.keys():
overrides.extend(
_override_attr(k, FairseqConfig.__dataclass_fields__[k].type, args)
)
if args is not None:
if hasattr(args, "task"):
from fairseq.tasks import TASK_DATACLASS_REGISTRY
migrate_registry(
"task", args.task, TASK_DATACLASS_REGISTRY, args, overrides, deletes
)
else:
deletes.append("task")
# these options will be set to "None" if they have not yet been migrated
# so we can populate them with the entire flat args
CORE_REGISTRIES = {"criterion", "optimizer", "lr_scheduler"}
from fairseq.registry import REGISTRIES
for k, v in REGISTRIES.items():
if hasattr(args, k):
migrate_registry(
k,
getattr(args, k),
v["dataclass_registry"],
args,
overrides,
deletes,
use_name_as_val=k not in CORE_REGISTRIES,
)
else:
deletes.append(k)
no_dc = True
if hasattr(args, "arch"):
from fairseq.models import ARCH_MODEL_REGISTRY
if args.arch in ARCH_MODEL_REGISTRY:
m_cls = ARCH_MODEL_REGISTRY[args.arch]
dc = getattr(m_cls, "__dataclass", None)
if dc is not None:
overrides.append("model={}".format(args.arch))
overrides.append("model._name={}".format(args.arch))
# override model params with those exist in args
overrides.extend(_override_attr("model", dc, args))
no_dc = False
if no_dc:
deletes.append("model")
return overrides, deletes
|
https://github.com/pytorch/fairseq/issues/1393
|
/experiments/falva/tools/fairseq/fairseq/models/fairseq_model.py:280: UserWarning: FairseqModel is deprecated, please use FairseqEncoderDecoderModel or BaseFairseqModel instead
for key in self.keys
Traceback (most recent call last):
File "/home/falva/anaconda3/envs/mtl4ts/bin/fairseq-generate", line 11, in <module>
load_entry_point('fairseq', 'console_scripts', 'fairseq-generate')()
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 190, in cli_main
main(args)
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 47, in main
task=task,
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 167, in load_model_ensemble
ensemble, args, _task = load_model_ensemble_and_task(filenames, arg_overrides, task)
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 186, in load_model_ensemble_and_task
model.load_state_dict(state['model'], strict=True, args=args)
TypeError: load_state_dict() got an unexpected keyword argument 'args'
|
TypeError
|
def infer_init_method(cfg: DistributedTrainingConfig, force_distributed=False):
if cfg.distributed_init_method is not None or cfg.tpu:
return
if cfg.pipeline_model_parallel:
balance_exists = (
cfg.pipeline_balance is not None
or cfg.pipeline_encoder_balance is not None
or cfg.pipeline_decoder_balance is not None
)
devices_exist = (
cfg.pipeline_devices is not None
or cfg.pipeline_encoder_devices is not None
or cfg.pipeline_decoder_devices is not None
)
if not balance_exists:
raise ValueError(
"--pipeline-balance is currently required for pipeline model parallelism"
)
if not devices_exist:
raise ValueError(
"--pipeline-devices is currently required for pipeline model parallelism"
)
cfg.pipeline_balance = utils.eval_str_list(cfg.pipeline_balance, type=int)
if cfg.pipeline_devices is not None:
cfg.pipeline_devices = utils.eval_str_list(cfg.pipeline_devices, type=int)
num_pipeline_devices = len(set(cfg.pipeline_devices))
else:
cfg.pipeline_encoder_devices = utils.eval_str_list(
cfg.pipeline_encoder_devices, type=int
)
cfg.pipeline_decoder_devices = utils.eval_str_list(
cfg.pipeline_decoder_devices, type=int
)
num_pipeline_devices = len(
set(cfg.pipeline_encoder_devices + cfg.pipeline_decoder_devices)
)
gpus_per_node = torch.cuda.device_count()
assert (
gpus_per_node >= num_pipeline_devices
and gpus_per_node % num_pipeline_devices == 0
), (
"the number of unique device IDs in --pipeline-devices must evenly divide "
"the number of GPUs per node (multi-node pipelining is not yet supported)"
)
num_pipelines_per_node = gpus_per_node // num_pipeline_devices
# support torch.distributed.launch
if all(
key in os.environ
for key in ["MASTER_ADDR", "MASTER_PORT", "WORLD_SIZE", "RANK"]
):
cfg.distributed_init_method = "env://"
cfg.distributed_world_size = int(os.environ["WORLD_SIZE"])
cfg.distributed_rank = int(os.environ["RANK"])
# processes are created by torch.distributed.launch
cfg.distributed_no_spawn = True
# we can determine the init method automatically for Slurm
elif cfg.distributed_port > 0:
node_list = os.environ.get("SLURM_STEP_NODELIST")
if node_list is None:
node_list = os.environ.get("SLURM_JOB_NODELIST")
if node_list is not None:
try:
hostnames = subprocess.check_output(
["scontrol", "show", "hostnames", node_list]
)
cfg.distributed_init_method = "tcp://{host}:{port}".format(
host=hostnames.split()[0].decode("utf-8"),
port=cfg.distributed_port,
)
nnodes = int(os.environ.get("SLURM_NNODES"))
ntasks_per_node = os.environ.get("SLURM_NTASKS_PER_NODE")
if ntasks_per_node is not None:
ntasks_per_node = int(ntasks_per_node)
else:
ntasks = int(os.environ.get("SLURM_NTASKS"))
nnodes = int(os.environ.get("SLURM_NNODES"))
assert ntasks % nnodes == 0
ntasks_per_node = int(ntasks / nnodes)
if ntasks_per_node == 1:
gpus_per_node = torch.cuda.device_count()
node_id = int(os.environ.get("SLURM_NODEID"))
cfg.distributed_rank = node_id * gpus_per_node
cfg.distributed_world_size = nnodes * gpus_per_node
elif cfg.pipeline_model_parallel:
assert ntasks_per_node == num_pipelines_per_node, (
"SLURM --ntasks-per-node must match number of pipelines per "
"node (={})".format(num_pipelines_per_node)
)
cfg.distributed_no_spawn = True
# For 4-way MP on nodes with 8 GPUs, ranks will be [0, 1] on
# the first node, [1, 2] on the second node, etc. This
# matches torch.distributed.launch.
node_id = int(os.environ.get("SLURM_NODEID"))
local_id = int(os.environ.get("SLURM_LOCALID"))
cfg.distributed_rank = node_id * num_pipelines_per_node + local_id
# In the above example, device_id will always be in [0, 1],
# which also matches torch.distributed.launch.
cfg.device_id = local_id
# We also want to set distributed_world_size to be the total
# number of pipelines across all nodes.
cfg.distributed_world_size = nnodes * num_pipelines_per_node
else:
assert ntasks_per_node == cfg.distributed_world_size // nnodes
cfg.distributed_no_spawn = True
cfg.distributed_rank = int(os.environ.get("SLURM_PROCID"))
cfg.device_id = int(os.environ.get("SLURM_LOCALID"))
except subprocess.CalledProcessError as e: # scontrol failed
raise e
except FileNotFoundError: # Slurm is not installed
pass
elif cfg.distributed_world_size > 1 or force_distributed:
# fallback for single node with multiple GPUs
assert cfg.distributed_world_size <= torch.cuda.device_count(), (
f"world size is {cfg.distributed_world_size} but have {torch.cuda.device_count()} available devices"
)
port = random.randint(10000, 20000)
cfg.distributed_init_method = "tcp://localhost:{port}".format(port=port)
if cfg.pipeline_model_parallel:
if not cfg.distributed_no_spawn:
# When distributed_no_spawn is False, we expect distributed_rank and
# distributed_world_size to be based on the total number of GPUs, so
# we need to correct them to be based on the number of pipelines.
assert cfg.distributed_world_size % num_pipeline_devices == 0
cfg.distributed_world_size = (
cfg.distributed_world_size // num_pipeline_devices
)
# In the case of 4-way MP on nodes with 8 GPUs, we want
# distributed_rank to be the starting GPU index for each pipeline
# i.e., 0, 2, ...
assert cfg.distributed_rank % gpus_per_node == 0
assert cfg.distributed_rank % num_pipeline_devices == 0
with open_dict(cfg):
cfg.distributed_rank = cfg.distributed_rank // num_pipeline_devices
# launch one process per pipeline
cfg.distributed_num_procs = num_pipelines_per_node
# if we have 4-way MP on a node with 8 GPUs, we want device_ids to be 0
# and 4, indicating the starting device IDs for each pipeline
cfg.device_id *= num_pipeline_devices
if cfg.device_id > 0:
# if there's multiple pipelines on a node (e.g., 4-way MP on an 8
# GPU node), we need to adjust pipeline_devices accordingly
logger.debug(
"setting CUDA device={} on rank {}".format(
cfg.device_id, cfg.distributed_rank
)
)
torch.cuda.set_device(cfg.device_id)
with open_dict(cfg):
cfg.pipeline_devices = [cfg.device_id + d for d in cfg.pipeline_devices]
logger.info(
"setting pipeline_devices={} on rank {}".format(
cfg.pipeline_devices, cfg.distributed_rank
)
)
elif not cfg.distributed_no_spawn:
with open_dict(cfg):
cfg.distributed_num_procs = min(
torch.cuda.device_count(), cfg.distributed_world_size
)
|
def infer_init_method(cfg: DistributedTrainingConfig, force_distributed=False):
if cfg.distributed_init_method is not None or cfg.tpu:
return
if cfg.pipeline_model_parallel:
balance_exists = (
cfg.pipeline_balance is not None
or cfg.pipeline_encoder_balance is not None
or cfg.pipeline_decoder_balance is not None
)
devices_exist = (
cfg.pipeline_devices is not None
or cfg.pipeline_encoder_devices is not None
or cfg.pipeline_decoder_devices is not None
)
if not balance_exists:
raise ValueError(
"--pipeline-balance is currently required for pipeline model parallelism"
)
if not devices_exist:
raise ValueError(
"--pipeline-devices is currently required for pipeline model parallelism"
)
cfg.pipeline_balance = utils.eval_str_list(cfg.pipeline_balance, type=int)
if cfg.pipeline_devices is not None:
cfg.pipeline_devices = utils.eval_str_list(cfg.pipeline_devices, type=int)
num_pipeline_devices = len(set(cfg.pipeline_devices))
else:
cfg.pipeline_encoder_devices = utils.eval_str_list(
cfg.pipeline_encoder_devices, type=int
)
cfg.pipeline_decoder_devices = utils.eval_str_list(
cfg.pipeline_decoder_devices, type=int
)
num_pipeline_devices = len(
set(cfg.pipeline_encoder_devices + cfg.pipeline_decoder_devices)
)
gpus_per_node = torch.cuda.device_count()
assert (
gpus_per_node >= num_pipeline_devices
and gpus_per_node % num_pipeline_devices == 0
), (
"the number of unique device IDs in --pipeline-devices must evenly divide "
"the number of GPUs per node (multi-node pipelining is not yet supported)"
)
num_pipelines_per_node = gpus_per_node // num_pipeline_devices
# support torch.distributed.launch
if all(
key in os.environ
for key in ["MASTER_ADDR", "MASTER_PORT", "WORLD_SIZE", "RANK"]
):
cfg.distributed_init_method = "env://"
cfg.distributed_world_size = int(os.environ["WORLD_SIZE"])
cfg.distributed_rank = int(os.environ["RANK"])
# processes are created by torch.distributed.launch
cfg.distributed_no_spawn = True
# we can determine the init method automatically for Slurm
elif cfg.distributed_port > 0:
node_list = os.environ.get("SLURM_STEP_NODELIST")
if node_list is None:
node_list = os.environ.get("SLURM_JOB_NODELIST")
if node_list is not None:
try:
hostnames = subprocess.check_output(
["scontrol", "show", "hostnames", node_list]
)
cfg.distributed_init_method = "tcp://{host}:{port}".format(
host=hostnames.split()[0].decode("utf-8"),
port=cfg.distributed_port,
)
nnodes = int(os.environ.get("SLURM_NNODES"))
ntasks_per_node = os.environ.get("SLURM_NTASKS_PER_NODE")
if ntasks_per_node is not None:
ntasks_per_node = int(ntasks_per_node)
else:
ntasks = int(os.environ.get("SLURM_NTASKS"))
nnodes = int(os.environ.get("SLURM_NNODES"))
assert ntasks % nnodes == 0
ntasks_per_node = int(ntasks / nnodes)
if ntasks_per_node == 1:
gpus_per_node = torch.cuda.device_count()
node_id = int(os.environ.get("SLURM_NODEID"))
cfg.distributed_rank = node_id * gpus_per_node
cfg.distributed_world_size = nnodes * gpus_per_node
elif cfg.pipeline_model_parallel:
assert ntasks_per_node == num_pipelines_per_node, (
"SLURM --ntasks-per-node must match number of pipelines per "
"node (={})".format(num_pipelines_per_node)
)
cfg.distributed_no_spawn = True
# For 4-way MP on nodes with 8 GPUs, ranks will be [0, 1] on
# the first node, [1, 2] on the second node, etc. This
# matches torch.distributed.launch.
node_id = int(os.environ.get("SLURM_NODEID"))
local_id = int(os.environ.get("SLURM_LOCALID"))
cfg.distributed_rank = node_id * num_pipelines_per_node + local_id
# In the above example, device_id will always be in [0, 1],
# which also matches torch.distributed.launch.
cfg.device_id = local_id
# We also want to set distributed_world_size to be the total
# number of pipelines across all nodes.
cfg.distributed_world_size = nnodes * num_pipelines_per_node
else:
assert ntasks_per_node == cfg.distributed_world_size // nnodes
cfg.distributed_no_spawn = True
cfg.distributed_rank = int(os.environ.get("SLURM_PROCID"))
cfg.device_id = int(os.environ.get("SLURM_LOCALID"))
except subprocess.CalledProcessError as e: # scontrol failed
raise e
except FileNotFoundError: # Slurm is not installed
pass
elif cfg.distributed_world_size > 1 or force_distributed:
# fallback for single node with multiple GPUs
assert cfg.distributed_world_size <= torch.cuda.device_count()
port = random.randint(10000, 20000)
cfg.distributed_init_method = "tcp://localhost:{port}".format(port=port)
if cfg.pipeline_model_parallel:
if not cfg.distributed_no_spawn:
# When distributed_no_spawn is False, we expect distributed_rank and
# distributed_world_size to be based on the total number of GPUs, so
# we need to correct them to be based on the number of pipelines.
assert cfg.distributed_world_size % num_pipeline_devices == 0
cfg.distributed_world_size = (
cfg.distributed_world_size // num_pipeline_devices
)
# In the case of 4-way MP on nodes with 8 GPUs, we want
# distributed_rank to be the starting GPU index for each pipeline
# i.e., 0, 2, ...
assert cfg.distributed_rank % gpus_per_node == 0
assert cfg.distributed_rank % num_pipeline_devices == 0
with open_dict(cfg):
cfg.distributed_rank = cfg.distributed_rank // num_pipeline_devices
# launch one process per pipeline
cfg.distributed_num_procs = num_pipelines_per_node
# if we have 4-way MP on a node with 8 GPUs, we want device_ids to be 0
# and 4, indicating the starting device IDs for each pipeline
cfg.device_id *= num_pipeline_devices
if cfg.device_id > 0:
# if there's multiple pipelines on a node (e.g., 4-way MP on an 8
# GPU node), we need to adjust pipeline_devices accordingly
logger.debug(
"setting CUDA device={} on rank {}".format(
cfg.device_id, cfg.distributed_rank
)
)
torch.cuda.set_device(cfg.device_id)
with open_dict(cfg):
cfg.pipeline_devices = [cfg.device_id + d for d in cfg.pipeline_devices]
logger.info(
"setting pipeline_devices={} on rank {}".format(
cfg.pipeline_devices, cfg.distributed_rank
)
)
elif not cfg.distributed_no_spawn:
with open_dict(cfg):
cfg.distributed_num_procs = min(
torch.cuda.device_count(), cfg.distributed_world_size
)
|
https://github.com/pytorch/fairseq/issues/1393
|
/experiments/falva/tools/fairseq/fairseq/models/fairseq_model.py:280: UserWarning: FairseqModel is deprecated, please use FairseqEncoderDecoderModel or BaseFairseqModel instead
for key in self.keys
Traceback (most recent call last):
File "/home/falva/anaconda3/envs/mtl4ts/bin/fairseq-generate", line 11, in <module>
load_entry_point('fairseq', 'console_scripts', 'fairseq-generate')()
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 190, in cli_main
main(args)
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 47, in main
task=task,
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 167, in load_model_ensemble
ensemble, args, _task = load_model_ensemble_and_task(filenames, arg_overrides, task)
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 186, in load_model_ensemble_and_task
model.load_state_dict(state['model'], strict=True, args=args)
TypeError: load_state_dict() got an unexpected keyword argument 'args'
|
TypeError
|
def build_model(cfg: DictConfig, task):
model = None
model_type = getattr(cfg, "_name", None) or getattr(cfg, "arch", None)
if not model_type and len(cfg) == 1:
# this is hit if config object is nested in directory that is named after model type
model_type = next(iter(cfg))
if model_type in MODEL_DATACLASS_REGISTRY:
cfg = cfg[model_type]
else:
raise Exception(
"Could not infer model type from directory. Please add _name field to indicate model type"
)
if model_type in ARCH_MODEL_REGISTRY:
# case 1: legacy models
model = ARCH_MODEL_REGISTRY[model_type]
elif model_type in MODEL_DATACLASS_REGISTRY:
# case 2: config-driven models
model = MODEL_REGISTRY[model_type]
if model_type in MODEL_DATACLASS_REGISTRY:
# set defaults from dataclass. note that arch name and model name can be the same
dc = MODEL_DATACLASS_REGISTRY[model_type]
cfg = merge_with_parent(dc(), cfg)
assert model is not None, f"Could not infer model type from {cfg}"
return model.build_model(cfg, task)
|
def build_model(cfg: DictConfig, task):
if isinstance(cfg, DictConfig):
return ARCH_MODEL_REGISTRY[cfg._name].build_model(cfg, task)
return ARCH_MODEL_REGISTRY[cfg.arch].build_model(cfg, task)
|
https://github.com/pytorch/fairseq/issues/1393
|
/experiments/falva/tools/fairseq/fairseq/models/fairseq_model.py:280: UserWarning: FairseqModel is deprecated, please use FairseqEncoderDecoderModel or BaseFairseqModel instead
for key in self.keys
Traceback (most recent call last):
File "/home/falva/anaconda3/envs/mtl4ts/bin/fairseq-generate", line 11, in <module>
load_entry_point('fairseq', 'console_scripts', 'fairseq-generate')()
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 190, in cli_main
main(args)
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 47, in main
task=task,
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 167, in load_model_ensemble
ensemble, args, _task = load_model_ensemble_and_task(filenames, arg_overrides, task)
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 186, in load_model_ensemble_and_task
model.load_state_dict(state['model'], strict=True, args=args)
TypeError: load_state_dict() got an unexpected keyword argument 'args'
|
TypeError
|
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
for p in self.fp16_params:
p.grad = None
if self.has_flat_params:
if torch.is_tensor(self.fp32_params):
self.fp32_params.grad.zero_()
elif isinstance(self.fp32_params, dict):
for fp32_params in self.fp32_params.values():
fp32_params.grad.zero_()
else:
raise RuntimeError("self.fp32_params must be a tensor or dict")
else:
for p32 in self.fp32_params:
if p32.grad is not None:
p32.grad.zero_()
self._needs_sync = False
if self.scaler is not None:
self._multiply_factor = 1.0 / float(self.scaler.loss_scale)
|
def zero_grad(self):
"""Clears the gradients of all optimized parameters."""
for p in self.fp16_params:
p.grad = None
if self.has_flat_params:
if torch.is_tensor(self.fp32_params):
self.fp32_params.grad.zero_()
elif isinstance(self.fp32_params, dict):
for fp32_params in self.fp32_params.values():
fp32_params.grad.zero_()
else:
raise RuntimeError("self.fp32_params must be a tensor or dict")
else:
for p32 in self.fp32_params:
if p32.grad:
p32.grad.zero_()
self._needs_sync = False
if self.scaler is not None:
self._multiply_factor = 1.0 / float(self.scaler.loss_scale)
|
https://github.com/pytorch/fairseq/issues/1393
|
/experiments/falva/tools/fairseq/fairseq/models/fairseq_model.py:280: UserWarning: FairseqModel is deprecated, please use FairseqEncoderDecoderModel or BaseFairseqModel instead
for key in self.keys
Traceback (most recent call last):
File "/home/falva/anaconda3/envs/mtl4ts/bin/fairseq-generate", line 11, in <module>
load_entry_point('fairseq', 'console_scripts', 'fairseq-generate')()
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 190, in cli_main
main(args)
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 47, in main
task=task,
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 167, in load_model_ensemble
ensemble, args, _task = load_model_ensemble_and_task(filenames, arg_overrides, task)
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 186, in load_model_ensemble_and_task
model.load_state_dict(state['model'], strict=True, args=args)
TypeError: load_state_dict() got an unexpected keyword argument 'args'
|
TypeError
|
def __init__(self, cfg: DictConfig, fairseq_optimizer):
super().__init__(cfg, fairseq_optimizer)
if isinstance(cfg.lr, Collection) and len(cfg.lr) > 1:
raise ValueError(
"Cannot use a fixed learning rate schedule with cosine."
f" Consider --lr-scheduler=fixed instead. ({cfg.lr})"
)
warmup_end_lr = cfg.max_lr
lr = cfg.lr[0] if isinstance(cfg.lr, Collection) else cfg.lr
if cfg.warmup_init_lr < 0:
cfg.warmup_init_lr = lr
self.min_lr = lr
self.max_lr = cfg.max_lr
assert self.max_lr > self.min_lr, "max_lr must be more than lr"
self.t_mult = cfg.t_mult
self.period = cfg.lr_period_updates
if self.period <= 0:
assert cfg.max_update >= 0, (
"Either --max_update or --lr-period-updates must be set"
)
self.period = cfg.max_update - cfg.warmup_updates
if cfg.warmup_updates > 0:
# linearly warmup for the first args.warmup_updates
self.lr_step = (warmup_end_lr - cfg.warmup_init_lr) / cfg.warmup_updates
else:
self.lr_step = 1
self.warmup_updates = cfg.warmup_updates
self.lr_shrink = cfg.lr_shrink
# initial learning rate
self.lr = cfg.warmup_init_lr
self.optimizer.set_lr(self.lr)
|
def __init__(self, cfg: DictConfig, fairseq_optimizer):
super().__init__(cfg, fairseq_optimizer)
if isinstance(cfg.lr, Collection) and len(cfg.lr) > 1:
raise ValueError(
"Cannot use a fixed learning rate schedule with cosine."
" Consider --lr-scheduler=fixed instead."
)
warmup_end_lr = cfg.max_lr
lr = cfg.lr[0] if isinstance(cfg.lr, Collection) else cfg.lr
if cfg.warmup_init_lr < 0:
cfg.warmup_init_lr = lr
self.min_lr = lr
self.max_lr = cfg.max_lr
assert self.max_lr > self.min_lr, "max_lr must be more than lr"
self.t_mult = cfg.t_mult
self.period = cfg.lr_period_updates
if self.period <= 0:
assert cfg.max_update >= 0, (
"Either --max_update or --lr-period-updates must be set"
)
self.period = cfg.max_update - cfg.warmup_updates
if cfg.warmup_updates > 0:
# linearly warmup for the first args.warmup_updates
self.lr_step = (warmup_end_lr - cfg.warmup_init_lr) / cfg.warmup_updates
else:
self.lr_step = 1
self.warmup_updates = cfg.warmup_updates
self.lr_shrink = cfg.lr_shrink
# initial learning rate
self.lr = cfg.warmup_init_lr
self.optimizer.set_lr(self.lr)
|
https://github.com/pytorch/fairseq/issues/1393
|
/experiments/falva/tools/fairseq/fairseq/models/fairseq_model.py:280: UserWarning: FairseqModel is deprecated, please use FairseqEncoderDecoderModel or BaseFairseqModel instead
for key in self.keys
Traceback (most recent call last):
File "/home/falva/anaconda3/envs/mtl4ts/bin/fairseq-generate", line 11, in <module>
load_entry_point('fairseq', 'console_scripts', 'fairseq-generate')()
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 190, in cli_main
main(args)
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 47, in main
task=task,
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 167, in load_model_ensemble
ensemble, args, _task = load_model_ensemble_and_task(filenames, arg_overrides, task)
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 186, in load_model_ensemble_and_task
model.load_state_dict(state['model'], strict=True, args=args)
TypeError: load_state_dict() got an unexpected keyword argument 'args'
|
TypeError
|
def setup_registry(registry_name: str, base_class=None, default=None, required=False):
assert registry_name.startswith("--")
registry_name = registry_name[2:].replace("-", "_")
REGISTRY = {}
REGISTRY_CLASS_NAMES = set()
DATACLASS_REGISTRY = {}
# maintain a registry of all registries
if registry_name in REGISTRIES:
return # registry already exists
REGISTRIES[registry_name] = {
"registry": REGISTRY,
"default": default,
"dataclass_registry": DATACLASS_REGISTRY,
}
def build_x(cfg: Union[DictConfig, str, Namespace], *extra_args, **extra_kwargs):
if isinstance(cfg, DictConfig):
choice = cfg._name
if choice and choice in DATACLASS_REGISTRY:
dc = DATACLASS_REGISTRY[choice]
cfg = merge_with_parent(dc(), cfg)
elif isinstance(cfg, str):
choice = cfg
if choice in DATACLASS_REGISTRY:
cfg = DATACLASS_REGISTRY[choice]()
else:
choice = getattr(cfg, registry_name, None)
if choice in DATACLASS_REGISTRY:
cfg = populate_dataclass(cfg, DATACLASS_REGISTRY[choice]())
if choice is None:
if required:
raise ValueError("{} is required!".format(registry_name))
return None
cls = REGISTRY[choice]
if hasattr(cls, "build_" + registry_name):
builder = getattr(cls, "build_" + registry_name)
else:
builder = cls
return builder(cfg, *extra_args, **extra_kwargs)
def register_x(name, dataclass=None):
def register_x_cls(cls):
if name in REGISTRY:
raise ValueError(
"Cannot register duplicate {} ({})".format(registry_name, name)
)
if cls.__name__ in REGISTRY_CLASS_NAMES:
raise ValueError(
"Cannot register {} with duplicate class name ({})".format(
registry_name, cls.__name__
)
)
if base_class is not None and not issubclass(cls, base_class):
raise ValueError(
"{} must extend {}".format(cls.__name__, base_class.__name__)
)
if dataclass is not None and not issubclass(dataclass, FairseqDataclass):
raise ValueError(
"Dataclass {} must extend FairseqDataclass".format(dataclass)
)
cls.__dataclass = dataclass
REGISTRY[name] = cls
if cls.__dataclass is not None:
DATACLASS_REGISTRY[name] = cls.__dataclass
return cls
return register_x_cls
return build_x, register_x, REGISTRY, DATACLASS_REGISTRY
|
def setup_registry(registry_name: str, base_class=None, default=None, required=False):
assert registry_name.startswith("--")
registry_name = registry_name[2:].replace("-", "_")
REGISTRY = {}
REGISTRY_CLASS_NAMES = set()
DATACLASS_REGISTRY = {}
# maintain a registry of all registries
if registry_name in REGISTRIES:
return # registry already exists
REGISTRIES[registry_name] = {
"registry": REGISTRY,
"default": default,
"dataclass_registry": DATACLASS_REGISTRY,
}
def build_x(cfg: Union[DictConfig, str, Namespace], *extra_args, **extra_kwargs):
if isinstance(cfg, DictConfig):
choice = cfg._name
elif isinstance(cfg, str):
choice = cfg
if choice in DATACLASS_REGISTRY:
cfg = DATACLASS_REGISTRY[choice]()
else:
choice = getattr(cfg, registry_name, None)
if choice in DATACLASS_REGISTRY:
cfg = populate_dataclass(cfg, DATACLASS_REGISTRY[choice]())
if choice is None:
if required:
raise ValueError("{} is required!".format(registry_name))
return None
cls = REGISTRY[choice]
if hasattr(cls, "build_" + registry_name):
builder = getattr(cls, "build_" + registry_name)
else:
builder = cls
return builder(cfg, *extra_args, **extra_kwargs)
def register_x(name, dataclass=None):
def register_x_cls(cls):
if name in REGISTRY:
raise ValueError(
"Cannot register duplicate {} ({})".format(registry_name, name)
)
if cls.__name__ in REGISTRY_CLASS_NAMES:
raise ValueError(
"Cannot register {} with duplicate class name ({})".format(
registry_name, cls.__name__
)
)
if base_class is not None and not issubclass(cls, base_class):
raise ValueError(
"{} must extend {}".format(cls.__name__, base_class.__name__)
)
if dataclass is not None and not issubclass(dataclass, FairseqDataclass):
raise ValueError(
"Dataclass {} must extend FairseqDataclass".format(dataclass)
)
cls.__dataclass = dataclass
REGISTRY[name] = cls
if cls.__dataclass is not None:
DATACLASS_REGISTRY[name] = cls.__dataclass
return cls
return register_x_cls
return build_x, register_x, REGISTRY, DATACLASS_REGISTRY
|
https://github.com/pytorch/fairseq/issues/1393
|
/experiments/falva/tools/fairseq/fairseq/models/fairseq_model.py:280: UserWarning: FairseqModel is deprecated, please use FairseqEncoderDecoderModel or BaseFairseqModel instead
for key in self.keys
Traceback (most recent call last):
File "/home/falva/anaconda3/envs/mtl4ts/bin/fairseq-generate", line 11, in <module>
load_entry_point('fairseq', 'console_scripts', 'fairseq-generate')()
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 190, in cli_main
main(args)
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 47, in main
task=task,
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 167, in load_model_ensemble
ensemble, args, _task = load_model_ensemble_and_task(filenames, arg_overrides, task)
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 186, in load_model_ensemble_and_task
model.load_state_dict(state['model'], strict=True, args=args)
TypeError: load_state_dict() got an unexpected keyword argument 'args'
|
TypeError
|
def build_x(cfg: Union[DictConfig, str, Namespace], *extra_args, **extra_kwargs):
if isinstance(cfg, DictConfig):
choice = cfg._name
if choice and choice in DATACLASS_REGISTRY:
dc = DATACLASS_REGISTRY[choice]
cfg = merge_with_parent(dc(), cfg)
elif isinstance(cfg, str):
choice = cfg
if choice in DATACLASS_REGISTRY:
cfg = DATACLASS_REGISTRY[choice]()
else:
choice = getattr(cfg, registry_name, None)
if choice in DATACLASS_REGISTRY:
cfg = populate_dataclass(cfg, DATACLASS_REGISTRY[choice]())
if choice is None:
if required:
raise ValueError("{} is required!".format(registry_name))
return None
cls = REGISTRY[choice]
if hasattr(cls, "build_" + registry_name):
builder = getattr(cls, "build_" + registry_name)
else:
builder = cls
return builder(cfg, *extra_args, **extra_kwargs)
|
def build_x(cfg: Union[DictConfig, str, Namespace], *extra_args, **extra_kwargs):
if isinstance(cfg, DictConfig):
choice = cfg._name
elif isinstance(cfg, str):
choice = cfg
if choice in DATACLASS_REGISTRY:
cfg = DATACLASS_REGISTRY[choice]()
else:
choice = getattr(cfg, registry_name, None)
if choice in DATACLASS_REGISTRY:
cfg = populate_dataclass(cfg, DATACLASS_REGISTRY[choice]())
if choice is None:
if required:
raise ValueError("{} is required!".format(registry_name))
return None
cls = REGISTRY[choice]
if hasattr(cls, "build_" + registry_name):
builder = getattr(cls, "build_" + registry_name)
else:
builder = cls
return builder(cfg, *extra_args, **extra_kwargs)
|
https://github.com/pytorch/fairseq/issues/1393
|
/experiments/falva/tools/fairseq/fairseq/models/fairseq_model.py:280: UserWarning: FairseqModel is deprecated, please use FairseqEncoderDecoderModel or BaseFairseqModel instead
for key in self.keys
Traceback (most recent call last):
File "/home/falva/anaconda3/envs/mtl4ts/bin/fairseq-generate", line 11, in <module>
load_entry_point('fairseq', 'console_scripts', 'fairseq-generate')()
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 190, in cli_main
main(args)
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 47, in main
task=task,
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 167, in load_model_ensemble
ensemble, args, _task = load_model_ensemble_and_task(filenames, arg_overrides, task)
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 186, in load_model_ensemble_and_task
model.load_state_dict(state['model'], strict=True, args=args)
TypeError: load_state_dict() got an unexpected keyword argument 'args'
|
TypeError
|
def setup_task(cfg: DictConfig, **kwargs):
task = None
task_name = getattr(cfg, "task", None)
if isinstance(task_name, str):
# legacy tasks
task = TASK_REGISTRY[task_name]
else:
task_name = getattr(cfg, "_name", None)
if task_name and task_name in TASK_DATACLASS_REGISTRY:
dc = TASK_DATACLASS_REGISTRY[task_name]
cfg = merge_with_parent(dc(), cfg)
task = TASK_REGISTRY[task_name]
assert task is not None, f"Could not infer task type from {cfg}"
return task.setup_task(cfg, **kwargs)
|
def setup_task(cfg: DictConfig, **kwargs):
if isinstance(cfg, DictConfig):
return TASK_REGISTRY[cfg._name].setup_task(cfg, **kwargs)
return TASK_REGISTRY[cfg.task].setup_task(cfg, **kwargs)
|
https://github.com/pytorch/fairseq/issues/1393
|
/experiments/falva/tools/fairseq/fairseq/models/fairseq_model.py:280: UserWarning: FairseqModel is deprecated, please use FairseqEncoderDecoderModel or BaseFairseqModel instead
for key in self.keys
Traceback (most recent call last):
File "/home/falva/anaconda3/envs/mtl4ts/bin/fairseq-generate", line 11, in <module>
load_entry_point('fairseq', 'console_scripts', 'fairseq-generate')()
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 190, in cli_main
main(args)
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 47, in main
task=task,
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 167, in load_model_ensemble
ensemble, args, _task = load_model_ensemble_and_task(filenames, arg_overrides, task)
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 186, in load_model_ensemble_and_task
model.load_state_dict(state['model'], strict=True, args=args)
TypeError: load_state_dict() got an unexpected keyword argument 'args'
|
TypeError
|
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
dictionary, output_dictionary = cls.setup_dictionary(args, **kwargs)
# upgrade old checkpoints
if getattr(args, "exclude_self_target", False):
args.self_target = False
targets = []
if getattr(args, "self_target", False):
targets.append("self")
if getattr(args, "future_target", False):
targets.append("future")
if getattr(args, "past_target", False):
targets.append("past")
if len(targets) == 0:
# standard language modeling
targets = ["future"]
return cls(args, dictionary, output_dictionary, targets=targets)
|
def setup_task(cls, args, **kwargs):
"""Setup the task (e.g., load dictionaries).
Args:
args (argparse.Namespace): parsed command-line arguments
"""
dictionary, output_dictionary = cls.setup_dictionary(args, **kwargs)
# upgrade old checkpoints
if hasattr(args, "exclude_self_target"):
args.self_target = not args.exclude_self_target
targets = []
if getattr(args, "self_target", False):
targets.append("self")
if getattr(args, "future_target", False):
targets.append("future")
if getattr(args, "past_target", False):
targets.append("past")
if len(targets) == 0:
# standard language modeling
targets = ["future"]
return cls(args, dictionary, output_dictionary, targets=targets)
|
https://github.com/pytorch/fairseq/issues/1393
|
/experiments/falva/tools/fairseq/fairseq/models/fairseq_model.py:280: UserWarning: FairseqModel is deprecated, please use FairseqEncoderDecoderModel or BaseFairseqModel instead
for key in self.keys
Traceback (most recent call last):
File "/home/falva/anaconda3/envs/mtl4ts/bin/fairseq-generate", line 11, in <module>
load_entry_point('fairseq', 'console_scripts', 'fairseq-generate')()
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 190, in cli_main
main(args)
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 47, in main
task=task,
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 167, in load_model_ensemble
ensemble, args, _task = load_model_ensemble_and_task(filenames, arg_overrides, task)
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 186, in load_model_ensemble_and_task
model.load_state_dict(state['model'], strict=True, args=args)
TypeError: load_state_dict() got an unexpected keyword argument 'args'
|
TypeError
|
def main(cfg: DictConfig) -> None:
if isinstance(cfg, argparse.Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
utils.import_user_module(cfg.common)
assert cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None, (
"Must specify batch size either with --max-tokens or --batch-size"
)
metrics.reset()
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
if distributed_utils.is_master(cfg.distributed_training):
checkpoint_utils.verify_checkpoint_directory(cfg.checkpoint.save_dir)
# Print args
logger.info(cfg)
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(cfg.task)
# Load valid dataset (we load training data below, based on the latest checkpoint)
for valid_sub_split in cfg.dataset.valid_subset.split(","):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
assert cfg.criterion, "Please specify criterion to train a model"
# Build model and criterion
model = task.build_model(cfg.model)
criterion = task.build_criterion(cfg.criterion)
logger.info(model)
logger.info("task: {}".format(task.__class__.__name__))
logger.info("model: {}".format(model.__class__.__name__))
logger.info("criterion: {})".format(criterion.__class__.__name__))
logger.info(
"num. model params: {} (num. trained: {})".format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
)
)
# (optionally) Configure quantization
if cfg.common.quantization_config_path is not None:
quantizer = quantization_utils.Quantizer(
config_path=cfg.common.quantization_config_path,
max_epoch=cfg.optimization.max_epoch,
max_update=cfg.optimization.max_update,
)
else:
quantizer = None
# Build trainer
if cfg.common.model_parallel_size == 1:
trainer = Trainer(cfg, task, model, criterion, quantizer)
else:
trainer = MegatronTrainer(cfg, task, model, criterion)
logger.info(
"training on {} devices (GPUs/TPUs)".format(
cfg.distributed_training.distributed_world_size
)
)
logger.info(
"max tokens per GPU = {} and batch size per GPU = {}".format(
cfg.dataset.max_tokens,
cfg.dataset.batch_size,
)
)
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(
cfg.checkpoint,
trainer,
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
max_epoch = cfg.optimization.max_epoch or math.inf
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
while lr > cfg.optimization.min_lr and epoch_itr.next_epoch_idx <= max_epoch:
# train for one epoch
valid_losses, should_stop = train(cfg, trainer, task, epoch_itr)
if should_stop:
break
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
# sharded data: get train iterator for next epoch
load_dataset=task.has_sharded_data("train"),
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
train_meter.stop()
logger.info("done training in {:.1f} seconds".format(train_meter.sum))
|
def main(cfg: DictConfig) -> None:
if isinstance(cfg, argparse.Namespace):
cfg = convert_namespace_to_omegaconf(cfg)
utils.import_user_module(cfg.common)
assert cfg.dataset.max_tokens is not None or cfg.dataset.batch_size is not None, (
"Must specify batch size either with --max-tokens or --batch-size"
)
metrics.reset()
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
if distributed_utils.is_master(cfg.distributed_training):
checkpoint_utils.verify_checkpoint_directory(cfg.checkpoint.save_dir)
# Print args
logger.info(cfg)
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(cfg.task)
# Load valid dataset (we load training data below, based on the latest checkpoint)
for valid_sub_split in cfg.dataset.valid_subset.split(","):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
# Build model and criterion
model = task.build_model(cfg.model)
criterion = task.build_criterion(cfg.criterion)
logger.info(model)
logger.info("task: {} ({})".format(cfg.task._name, task.__class__.__name__))
logger.info("model: {} ({})".format(cfg.model._name, model.__class__.__name__))
logger.info(
"criterion: {} ({})".format(cfg.criterion._name, criterion.__class__.__name__)
)
logger.info(
"num. model params: {} (num. trained: {})".format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
)
)
# (optionally) Configure quantization
if cfg.common.quantization_config_path is not None:
quantizer = quantization_utils.Quantizer(
config_path=cfg.common.quantization_config_path,
max_epoch=cfg.optimization.max_epoch,
max_update=cfg.optimization.max_update,
)
else:
quantizer = None
# Build trainer
if cfg.common.model_parallel_size == 1:
trainer = Trainer(cfg, task, model, criterion, quantizer)
else:
trainer = MegatronTrainer(cfg, task, model, criterion)
logger.info(
"training on {} devices (GPUs/TPUs)".format(
cfg.distributed_training.distributed_world_size
)
)
logger.info(
"max tokens per GPU = {} and batch size per GPU = {}".format(
cfg.dataset.max_tokens,
cfg.dataset.batch_size,
)
)
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(
cfg.checkpoint,
trainer,
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
max_epoch = cfg.optimization.max_epoch or math.inf
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
while lr > cfg.optimization.min_lr and epoch_itr.next_epoch_idx <= max_epoch:
# train for one epoch
valid_losses, should_stop = train(cfg, trainer, task, epoch_itr)
if should_stop:
break
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
# sharded data: get train iterator for next epoch
load_dataset=task.has_sharded_data("train"),
# don't cache epoch iterators for sharded datasets
disable_iterator_cache=task.has_sharded_data("train"),
)
train_meter.stop()
logger.info("done training in {:.1f} seconds".format(train_meter.sum))
|
https://github.com/pytorch/fairseq/issues/1393
|
/experiments/falva/tools/fairseq/fairseq/models/fairseq_model.py:280: UserWarning: FairseqModel is deprecated, please use FairseqEncoderDecoderModel or BaseFairseqModel instead
for key in self.keys
Traceback (most recent call last):
File "/home/falva/anaconda3/envs/mtl4ts/bin/fairseq-generate", line 11, in <module>
load_entry_point('fairseq', 'console_scripts', 'fairseq-generate')()
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 190, in cli_main
main(args)
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 47, in main
task=task,
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 167, in load_model_ensemble
ensemble, args, _task = load_model_ensemble_and_task(filenames, arg_overrides, task)
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 186, in load_model_ensemble_and_task
model.load_state_dict(state['model'], strict=True, args=args)
TypeError: load_state_dict() got an unexpected keyword argument 'args'
|
TypeError
|
def should_stop_early(cfg: DictConfig, valid_loss: float) -> bool:
# skip check if no validation was done in the current epoch
if valid_loss is None:
return False
if cfg.checkpoint.patience <= 0:
return False
def is_better(a, b):
return a > b if cfg.checkpoint.maximize_best_checkpoint_metric else a < b
prev_best = getattr(should_stop_early, "best", None)
if prev_best is None or is_better(valid_loss, prev_best):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
if should_stop_early.num_runs >= cfg.checkpoint.patience:
logger.info(
"early stop since valid performance hasn't improved for last {} runs".format(
cfg.checkpoint.patience
)
)
return True
else:
return False
|
def should_stop_early(cfg: DictConfig, valid_loss: float) -> bool:
# skip check if no validation was done in the current epoch
if valid_loss is None:
return False
if cfg.checkpoint.patience <= 0:
return False
def is_better(a, b):
return a > b if cfg.checkpoint.maximize_best_checkpoint_metric else a < b
prev_best = getattr(should_stop_early, "best", None)
if prev_best is None or is_better(valid_loss, prev_best):
should_stop_early.best = valid_loss
should_stop_early.num_runs = 0
return False
else:
should_stop_early.num_runs += 1
if should_stop_early.num_runs >= cfg.checkpoint.patience:
logger.info(
"early stop since valid performance hasn't improved for last {} runs".format(
cfg.checkpoint.patience
)
)
return True
else:
return False
|
https://github.com/pytorch/fairseq/issues/1393
|
/experiments/falva/tools/fairseq/fairseq/models/fairseq_model.py:280: UserWarning: FairseqModel is deprecated, please use FairseqEncoderDecoderModel or BaseFairseqModel instead
for key in self.keys
Traceback (most recent call last):
File "/home/falva/anaconda3/envs/mtl4ts/bin/fairseq-generate", line 11, in <module>
load_entry_point('fairseq', 'console_scripts', 'fairseq-generate')()
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 190, in cli_main
main(args)
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 47, in main
task=task,
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 167, in load_model_ensemble
ensemble, args, _task = load_model_ensemble_and_task(filenames, arg_overrides, task)
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 186, in load_model_ensemble_and_task
model.load_state_dict(state['model'], strict=True, args=args)
TypeError: load_state_dict() got an unexpected keyword argument 'args'
|
TypeError
|
def train(
cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr
) -> Tuple[List[Optional[float]], bool]:
"""Train the model for one epoch and return validation losses."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=cfg.distributed_training.fix_batches_to_gpus,
shuffle=(epoch_itr.next_epoch_idx > cfg.dataset.curriculum),
)
update_freq = (
cfg.optimization.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(cfg.optimization.update_freq)
else cfg.optimization.update_freq[-1]
)
itr = iterators.GroupedIterator(itr, update_freq)
if getattr(cfg.common, "tpu", False):
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
epoch=epoch_itr.epoch,
tensorboard_logdir=(
cfg.common.tensorboard_logdir
if distributed_utils.is_master(cfg.distributed_training)
else None
),
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
wandb_project=(
cfg.common.wandb_project
if distributed_utils.is_master(cfg.distributed_training)
else None
),
)
trainer.begin_epoch(epoch_itr.epoch)
valid_subsets = cfg.dataset.valid_subset.split(",")
should_stop = False
num_updates = trainer.get_num_updates()
for i, samples in enumerate(progress):
with (
metrics.aggregate("train_inner"),
torch.autograd.profiler.record_function("train_step-%d" % i),
):
log_output = trainer.train_step(samples)
if log_output is not None: # not OOM, overflow, ...
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % cfg.common.log_interval == 0:
stats = get_training_stats(metrics.get_smoothed_values("train_inner"))
progress.log(stats, tag="train_inner", step=num_updates)
# reset mid-epoch stats after each log interval
# the end-of-epoch stats will still be preserved
metrics.reset_meters("train_inner")
end_of_epoch = not itr.has_next()
valid_losses, should_stop = validate_and_save(
cfg, trainer, task, epoch_itr, valid_subsets, end_of_epoch
)
if should_stop:
break
# log end-of-epoch stats
logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch))
stats = get_training_stats(metrics.get_smoothed_values("train"))
progress.print(stats, tag="train", step=num_updates)
# reset epoch-level meters
metrics.reset_meters("train")
return valid_losses, should_stop
|
def train(
cfg: DictConfig, trainer: Trainer, task: tasks.FairseqTask, epoch_itr
) -> Tuple[List[Optional[float]], bool]:
"""Train the model for one epoch and return validation losses."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=cfg.distributed_training.fix_batches_to_gpus,
shuffle=(epoch_itr.next_epoch_idx > cfg.dataset.curriculum),
)
update_freq = (
cfg.optimization.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(cfg.optimization.update_freq)
else cfg.optimization.update_freq[-1]
)
itr = iterators.GroupedIterator(itr, update_freq)
if getattr(cfg.common, "tpu", False):
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
epoch=epoch_itr.epoch,
tensorboard_logdir=(
cfg.common.tensorboard_logdir
if distributed_utils.is_master(cfg.distributed_training)
else None
),
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
wandb_project=(
cfg.common.wandb_project
if distributed_utils.is_master(cfg.distributed_training)
else None
),
)
trainer.begin_epoch(epoch_itr.epoch)
valid_subsets = cfg.dataset.valid_subset.split(",")
should_stop = False
num_updates = trainer.get_num_updates()
for i, samples in enumerate(progress):
with (
metrics.aggregate("train_inner"),
torch.autograd.profiler.record_function("train_step-%d" % i),
):
log_output = trainer.train_step(samples)
if log_output is not None: # not OOM, overflow, ...
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % cfg.common.log_interval == 0:
stats = get_training_stats(metrics.get_smoothed_values("train_inner"))
progress.log(stats, tag="train_inner", step=num_updates)
# reset mid-epoch stats after each log interval
# the end-of-epoch stats will still be preserved
metrics.reset_meters("train_inner")
end_of_epoch = not itr.has_next()
valid_losses, should_stop = validate_and_save(
cfg, trainer, task, epoch_itr, valid_subsets, end_of_epoch
)
if should_stop:
break
# log end-of-epoch stats
logger.info("end of epoch {} (average epoch stats below)".format(epoch_itr.epoch))
stats = get_training_stats(metrics.get_smoothed_values("train"))
progress.print(stats, tag="train", step=num_updates)
# reset epoch-level meters
metrics.reset_meters("train")
return valid_losses, should_stop
|
https://github.com/pytorch/fairseq/issues/1393
|
/experiments/falva/tools/fairseq/fairseq/models/fairseq_model.py:280: UserWarning: FairseqModel is deprecated, please use FairseqEncoderDecoderModel or BaseFairseqModel instead
for key in self.keys
Traceback (most recent call last):
File "/home/falva/anaconda3/envs/mtl4ts/bin/fairseq-generate", line 11, in <module>
load_entry_point('fairseq', 'console_scripts', 'fairseq-generate')()
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 190, in cli_main
main(args)
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 47, in main
task=task,
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 167, in load_model_ensemble
ensemble, args, _task = load_model_ensemble_and_task(filenames, arg_overrides, task)
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 186, in load_model_ensemble_and_task
model.load_state_dict(state['model'], strict=True, args=args)
TypeError: load_state_dict() got an unexpected keyword argument 'args'
|
TypeError
|
def validate_and_save(
cfg: DictConfig,
trainer: Trainer,
task: tasks.FairseqTask,
epoch_itr,
valid_subsets: List[str],
end_of_epoch: bool,
) -> Tuple[List[Optional[float]], bool]:
num_updates = trainer.get_num_updates()
max_update = cfg.optimization.max_update or math.inf
do_save = (
(end_of_epoch and epoch_itr.epoch % cfg.checkpoint.save_interval == 0)
or num_updates >= max_update
or (
cfg.checkpoint.save_interval_updates > 0
and num_updates > 0
and num_updates % cfg.checkpoint.save_interval_updates == 0
and num_updates >= cfg.dataset.validate_after_updates
)
)
do_validate = (
(not end_of_epoch and do_save) # validate during mid-epoch saves
or (end_of_epoch and epoch_itr.epoch % cfg.dataset.validate_interval == 0)
or num_updates >= max_update
or (
cfg.dataset.validate_interval_updates > 0
and num_updates > 0
and num_updates % cfg.dataset.validate_interval_updates == 0
)
) and not cfg.dataset.disable_validation
# Validate
valid_losses = [None]
if do_validate:
valid_losses = validate(cfg, trainer, task, epoch_itr, valid_subsets)
# Stopping conditions
should_stop = (
should_stop_early(cfg, valid_losses[0])
or num_updates >= max_update
or (
cfg.optimization.stop_time_hours > 0
and trainer.cumulative_training_time() / (60 * 60)
> cfg.optimization.stop_time_hours
)
)
# Save checkpoint
if do_save or should_stop:
logger.info("begin save checkpoint")
checkpoint_utils.save_checkpoint(
cfg.checkpoint, trainer, epoch_itr, valid_losses[0]
)
return valid_losses, should_stop
|
def validate_and_save(
cfg: DictConfig,
trainer: Trainer,
task: tasks.FairseqTask,
epoch_itr,
valid_subsets: List[str],
end_of_epoch: bool,
) -> Tuple[List[Optional[float]], bool]:
num_updates = trainer.get_num_updates()
max_update = cfg.optimization.max_update or math.inf
do_save = (
(end_of_epoch and epoch_itr.epoch % cfg.checkpoint.save_interval == 0)
or num_updates >= max_update
or (
cfg.checkpoint.save_interval_updates > 0
and num_updates > 0
and num_updates % cfg.checkpoint.save_interval_updates == 0
and num_updates >= cfg.dataset.validate_after_updates
)
)
do_validate = (
(not end_of_epoch and do_save) # validate during mid-epoch saves
or (end_of_epoch and epoch_itr.epoch % cfg.dataset.validate_interval == 0)
or num_updates >= max_update
or (
cfg.dataset.validate_interval_updates > 0
and num_updates > 0
and num_updates % cfg.dataset.validate_interval_updates == 0
)
) and not cfg.dataset.disable_validation
# Validate
valid_losses = [None]
if do_validate:
valid_losses = validate(cfg, trainer, task, epoch_itr, valid_subsets)
# Stopping conditions
should_stop = (
should_stop_early(cfg, valid_losses[0])
or num_updates >= max_update
or (
cfg.optimization.stop_time_hours > 0
and trainer.cumulative_training_time() / (60 * 60)
> cfg.optimization.stop_time_hours
)
)
# Save checkpoint
if do_save or should_stop:
logger.info("begin save checkpoint")
checkpoint_utils.save_checkpoint(
cfg.checkpoint, trainer, epoch_itr, valid_losses[0]
)
return valid_losses, should_stop
|
https://github.com/pytorch/fairseq/issues/1393
|
/experiments/falva/tools/fairseq/fairseq/models/fairseq_model.py:280: UserWarning: FairseqModel is deprecated, please use FairseqEncoderDecoderModel or BaseFairseqModel instead
for key in self.keys
Traceback (most recent call last):
File "/home/falva/anaconda3/envs/mtl4ts/bin/fairseq-generate", line 11, in <module>
load_entry_point('fairseq', 'console_scripts', 'fairseq-generate')()
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 190, in cli_main
main(args)
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 47, in main
task=task,
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 167, in load_model_ensemble
ensemble, args, _task = load_model_ensemble_and_task(filenames, arg_overrides, task)
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 186, in load_model_ensemble_and_task
model.load_state_dict(state['model'], strict=True, args=args)
TypeError: load_state_dict() got an unexpected keyword argument 'args'
|
TypeError
|
def validate(
cfg: DictConfig,
trainer: Trainer,
task: tasks.FairseqTask,
epoch_itr,
subsets: List[str],
) -> List[Optional[float]]:
"""Evaluate the model on the validation set(s) and return the losses."""
if cfg.dataset.fixed_validation_seed is not None:
# set fixed seed for every validation
utils.set_torch_seed(cfg.dataset.fixed_validation_seed)
trainer.begin_valid_epoch(epoch_itr.epoch)
valid_losses = []
for subset in subsets:
logger.info('begin validation on "{}" subset'.format(subset))
# Initialize data iterator
itr = trainer.get_valid_iterator(subset).next_epoch_itr(shuffle=False)
if cfg.common.tpu:
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
epoch=epoch_itr.epoch,
prefix=f"valid on '{subset}' subset",
tensorboard_logdir=(
cfg.common.tensorboard_logdir
if distributed_utils.is_master(cfg.distributed_training)
else None
),
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
wandb_project=(
cfg.common.wandb_project
if distributed_utils.is_master(cfg.distributed_training)
else None
),
)
# create a new root metrics aggregator so validation metrics
# don't pollute other aggregators (e.g., train meters)
with metrics.aggregate(new_root=True) as agg:
for sample in progress:
trainer.valid_step(sample)
# log validation stats
stats = get_valid_stats(cfg, trainer, agg.get_smoothed_values())
progress.print(stats, tag=subset, step=trainer.get_num_updates())
valid_losses.append(stats[cfg.checkpoint.best_checkpoint_metric])
return valid_losses
|
def validate(
cfg: DictConfig,
trainer: Trainer,
task: tasks.FairseqTask,
epoch_itr,
subsets: List[str],
) -> List[Optional[float]]:
"""Evaluate the model on the validation set(s) and return the losses."""
if cfg.dataset.fixed_validation_seed is not None:
# set fixed seed for every validation
utils.set_torch_seed(cfg.dataset.fixed_validation_seed)
trainer.begin_valid_epoch(epoch_itr.epoch)
valid_losses = []
for subset in subsets:
logger.info('begin validation on "{}" subset'.format(subset))
# Initialize data iterator
itr = trainer.get_valid_iterator(subset).next_epoch_itr(shuffle=False)
if cfg.common.tpu:
itr = utils.tpu_data_loader(itr)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
epoch=epoch_itr.epoch,
prefix=f"valid on '{subset}' subset",
tensorboard_logdir=(
cfg.common.tensorboard_logdir
if distributed_utils.is_master(cfg.distributed_training)
else None
),
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
wandb_project=(
cfg.common.wandb_project
if distributed_utils.is_master(cfg.distributed_training)
else None
),
)
# create a new root metrics aggregator so validation metrics
# don't pollute other aggregators (e.g., train meters)
with metrics.aggregate(new_root=True) as agg:
for sample in progress:
trainer.valid_step(sample)
# log validation stats
stats = get_valid_stats(cfg, trainer, agg.get_smoothed_values())
progress.print(stats, tag=subset, step=trainer.get_num_updates())
valid_losses.append(stats[cfg.checkpoint.best_checkpoint_metric])
return valid_losses
|
https://github.com/pytorch/fairseq/issues/1393
|
/experiments/falva/tools/fairseq/fairseq/models/fairseq_model.py:280: UserWarning: FairseqModel is deprecated, please use FairseqEncoderDecoderModel or BaseFairseqModel instead
for key in self.keys
Traceback (most recent call last):
File "/home/falva/anaconda3/envs/mtl4ts/bin/fairseq-generate", line 11, in <module>
load_entry_point('fairseq', 'console_scripts', 'fairseq-generate')()
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 190, in cli_main
main(args)
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 47, in main
task=task,
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 167, in load_model_ensemble
ensemble, args, _task = load_model_ensemble_and_task(filenames, arg_overrides, task)
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 186, in load_model_ensemble_and_task
model.load_state_dict(state['model'], strict=True, args=args)
TypeError: load_state_dict() got an unexpected keyword argument 'args'
|
TypeError
|
def get_valid_stats(
cfg: DictConfig, trainer: Trainer, stats: Dict[str, Any]
) -> Dict[str, Any]:
stats["num_updates"] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, "best"):
key = "best_{0}".format(cfg.checkpoint.best_checkpoint_metric)
best_function = max if cfg.checkpoint.maximize_best_checkpoint_metric else min
stats[key] = best_function(
checkpoint_utils.save_checkpoint.best,
stats[cfg.checkpoint.best_checkpoint_metric],
)
return stats
|
def get_valid_stats(
cfg: DictConfig, trainer: Trainer, stats: Dict[str, Any]
) -> Dict[str, Any]:
stats["num_updates"] = trainer.get_num_updates()
if hasattr(checkpoint_utils.save_checkpoint, "best"):
key = "best_{0}".format(cfg.checkpoint.best_checkpoint_metric)
best_function = max if cfg.checkpoint.maximize_best_checkpoint_metric else min
stats[key] = best_function(
checkpoint_utils.save_checkpoint.best,
stats[cfg.checkpoint.best_checkpoint_metric],
)
return stats
|
https://github.com/pytorch/fairseq/issues/1393
|
/experiments/falva/tools/fairseq/fairseq/models/fairseq_model.py:280: UserWarning: FairseqModel is deprecated, please use FairseqEncoderDecoderModel or BaseFairseqModel instead
for key in self.keys
Traceback (most recent call last):
File "/home/falva/anaconda3/envs/mtl4ts/bin/fairseq-generate", line 11, in <module>
load_entry_point('fairseq', 'console_scripts', 'fairseq-generate')()
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 190, in cli_main
main(args)
File "/experiments/falva/tools/fairseq/fairseq_cli/generate.py", line 47, in main
task=task,
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 167, in load_model_ensemble
ensemble, args, _task = load_model_ensemble_and_task(filenames, arg_overrides, task)
File "/experiments/falva/tools/fairseq/fairseq/checkpoint_utils.py", line 186, in load_model_ensemble_and_task
model.load_state_dict(state['model'], strict=True, args=args)
TypeError: load_state_dict() got an unexpected keyword argument 'args'
|
TypeError
|
def reset_parameters(self):
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
|
def reset_parameters(self):
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
|
https://github.com/pytorch/fairseq/issues/1527
|
Traceback (most recent call last):
File "xxx/fairseq/fairseq/modules/multihead_attention.py", line 56, in __init__
self.reset_parameters()
File "xxx/fairseq/fairseq/modules/multihead_attention.py", line 82, in reset_parameters
nn.init.constant_(self.out_proj.bias, 0.)
File "xxx/torch/nn/init.py", line 120, in constant_
return _no_grad_fill_(tensor, val)
File "xxx/torch/nn/init.py", line 24, in _no_grad_fill_
return tensor.fill_(val)
AttributeError: 'NoneType' object has no attribute 'fill_'
|
AttributeError
|
def DistributedFairseqModel(args, model, process_group):
"""
Wrap a *model* to support distributed data parallel training.
This is similar to the built-in DistributedDataParallel, but allows
additional configuration of the DistributedDataParallel class to
use, and also provides easier access to the wrapped model by
forwarding requests for missing attributes to the wrapped model.
Args:
args (argparse.Namespace): fairseq args
model (BaseFairseqModel): model to wrap
process_group: the c10d process group to be used for distributed data
parallel all-reduction.
"""
# determine which DDP class to extend
assert isinstance(model, nn.Module)
if args.tpu:
ddp_class = TPUDistributedDataParallel
init_kwargs = dict(
module=model,
process_group=process_group,
)
elif args.distributed_wrapper == "DDP" and args.ddp_backend == "c10d":
ddp_class = nn.parallel.DistributedDataParallel
init_kwargs = dict(
module=model,
device_ids=[args.device_id],
output_device=args.device_id,
broadcast_buffers=args.broadcast_buffers,
bucket_cap_mb=args.bucket_cap_mb,
process_group=process_group,
)
# Maintain backward compatibility
if "check_reduction" in inspect.getargspec(ddp_class)[0]:
init_kwargs["check_reduction"] = True
if "find_unused_parameters" in inspect.getargspec(ddp_class)[0]:
init_kwargs["find_unused_parameters"] = args.find_unused_parameters
elif args.distributed_wrapper == "DDP" and args.ddp_backend == "no_c10d":
ddp_class = LegacyDistributedDataParallel
init_kwargs = dict(
module=model,
buffer_size=2**28,
process_group=process_group,
)
elif args.distributed_wrapper == "SlowMo":
if _GOSSIP_DISABLED:
raise ImportError(
"Cannot find gossip library. Please install from: "
"github.com/facebookresearch/stochastic_gradient_push"
)
ddp_class = gossip.GossipDataParallel
# The values of slowmo_momentum below were obtained by tuning on the
# En-De 16 dataset by training the transformer_wmt_en_de_large model
if args.slowmo_momentum is None:
if args.distributed_world_size <= 16:
args.slowmo_momentum = 0.0
elif args.distributed_world_size <= 32:
args.slowmo_momentum = 0.2
elif args.distributed_world_size <= 64:
args.slowmo_momentum = 0.5
else:
args.slowmo_momentum = 0.6
init_kwargs = dict(
module=model,
device_ids=[args.device_id],
output_device=args.device_id,
broadcast_buffers=args.broadcast_buffers,
nprocs_per_node=args.nprocs_per_node,
slowmo_momentum=args.slowmo_momentum,
localsgd=(args.slowmo_algorithm == "LocalSGD"),
localsgd_frequency=args.localsgd_frequency,
)
else:
raise ValueError("Unknown --ddp-backend: " + args.ddp_backend)
heartbeat_timeout = getattr(args, "heartbeat_timeout", -1)
class _DistributedFairseqModel(ddp_class):
"""
Extend DistributedDataParallel to check for missing attributes in the
wrapped module and to add a timeout to kill the job if no progress is
made (--heartbeat-timeout).
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._heartbeat_timeout = heartbeat_timeout
if self._heartbeat_timeout > 0:
self._heartbeat = threading.Event()
self._heartbeat_thread = threading.Thread(
target=self._check_heartbeat,
args=(os.getpid(),),
daemon=True,
)
self._heartbeat_thread.start()
else:
self._heartbeat = None
def _check_heartbeat(self, parent_pid):
self._heartbeat.wait() # wait for the first forward pass
while True:
self._heartbeat.clear()
success = self._heartbeat.wait(timeout=self._heartbeat_timeout)
if not success:
logger.error(
(
"Killing job for not making progress in {} seconds. "
"Set --heartbeat-timeout=-1 to disable this timeout."
).format(int(self._heartbeat_timeout))
)
os.kill(parent_pid, signal.SIGKILL)
return
def __getattr__(self, name):
wrapped_module = super().__getattr__("module")
if hasattr(wrapped_module, name):
return getattr(wrapped_module, name)
return super().__getattr__(name)
def forward(self, *args, **kwargs):
if self._heartbeat is not None:
self._heartbeat.set()
return super().forward(*args, **kwargs)
return _DistributedFairseqModel(**init_kwargs)
|
def DistributedFairseqModel(args, model, process_group):
"""
Wrap a *model* to support distributed data parallel training.
This is similar to the built-in DistributedDataParallel, but allows
additional configuration of the DistributedDataParallel class to
use, and also provides easier access to the wrapped model by
forwarding requests for missing attributes to the wrapped model.
Args:
args (argparse.Namespace): fairseq args
model (BaseFairseqModel): model to wrap
process_group: the c10d process group to be used for distributed data
parallel all-reduction.
"""
# determine which DDP class to extend
assert isinstance(model, nn.Module)
if args.tpu:
ddp_class = TPUDistributedDataParallel
init_kwargs = dict(
module=model,
process_group=process_group,
)
elif args.distributed_wrapper == "DDP" and args.ddp_backend == "c10d":
ddp_class = nn.parallel.DistributedDataParallel
init_kwargs = dict(
module=model,
device_ids=[args.device_id],
output_device=args.device_id,
broadcast_buffers=args.broadcast_buffers,
bucket_cap_mb=args.bucket_cap_mb,
process_group=process_group,
)
# Maintain backward compatibility
if "check_reduction" in inspect.getargspec(ddp_class)[0]:
init_kwargs["check_reduction"] = True
if "find_unused_parameters" in inspect.getargspec(ddp_class)[0]:
init_kwargs["find_unused_parameters"] = args.find_unused_parameters
elif args.distributed_wrapper == "DDP" and args.ddp_backend == "no_c10d":
ddp_class = LegacyDistributedDataParallel
init_kwargs = dict(
module=model,
buffer_size=2**28,
process_group=process_group,
)
elif args.distributed_wrapper == "SlowMo":
if _GOSSIP_DISABLED:
raise ImportError(
"Cannot find gossip library. Please install from: "
"github.com/facebookresearch/stochastic_gradient_push"
)
ddp_class = gossip.GossipDataParallel
# The values of slowmo_momentum below were obtained by tuning on the
# En-De 16 dataset by training the transformer_wmt_en_de_large model
if args.slowmo_momentum is None:
if args.distributed_world_size <= 16:
args.slowmo_momentum = 0.0
elif args.distributed_world_size <= 32:
args.slowmo_momentum = 0.2
elif args.distributed_world_size <= 64:
args.slowmo_momentum = 0.5
else:
args.slowmo_momentum = 0.6
init_kwargs = dict(
module=model,
device_ids=[args.device_id],
output_device=args.device_id,
broadcast_buffers=args.broadcast_buffers,
nprocs_per_node=args.nprocs_per_node,
slowmo_momentum=args.slowmo_momentum,
localsgd=(args.slowmo_algorithm == "LocalSGD"),
localsgd_frequency=args.localsgd_frequency,
)
else:
raise ValueError("Unknown --ddp-backend: " + args.ddp_backend)
class _DistributedFairseqModel(ddp_class):
"""Extend DistributedDataParallel to check for missing
attributes in the wrapped module."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __getattr__(self, name):
wrapped_module = super().__getattr__("module")
if hasattr(wrapped_module, name):
return getattr(wrapped_module, name)
return super().__getattr__(name)
return _DistributedFairseqModel(**init_kwargs)
|
https://github.com/pytorch/fairseq/issues/1527
|
Traceback (most recent call last):
File "xxx/fairseq/fairseq/modules/multihead_attention.py", line 56, in __init__
self.reset_parameters()
File "xxx/fairseq/fairseq/modules/multihead_attention.py", line 82, in reset_parameters
nn.init.constant_(self.out_proj.bias, 0.)
File "xxx/torch/nn/init.py", line 120, in constant_
return _no_grad_fill_(tensor, val)
File "xxx/torch/nn/init.py", line 24, in _no_grad_fill_
return tensor.fill_(val)
AttributeError: 'NoneType' object has no attribute 'fill_'
|
AttributeError
|
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._heartbeat_timeout = heartbeat_timeout
if self._heartbeat_timeout > 0:
self._heartbeat = threading.Event()
self._heartbeat_thread = threading.Thread(
target=self._check_heartbeat,
args=(os.getpid(),),
daemon=True,
)
self._heartbeat_thread.start()
else:
self._heartbeat = None
|
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
|
https://github.com/pytorch/fairseq/issues/1527
|
Traceback (most recent call last):
File "xxx/fairseq/fairseq/modules/multihead_attention.py", line 56, in __init__
self.reset_parameters()
File "xxx/fairseq/fairseq/modules/multihead_attention.py", line 82, in reset_parameters
nn.init.constant_(self.out_proj.bias, 0.)
File "xxx/torch/nn/init.py", line 120, in constant_
return _no_grad_fill_(tensor, val)
File "xxx/torch/nn/init.py", line 24, in _no_grad_fill_
return tensor.fill_(val)
AttributeError: 'NoneType' object has no attribute 'fill_'
|
AttributeError
|
def forward(self, *args, **kwargs):
if self._heartbeat is not None:
self._heartbeat.set()
return super().forward(*args, **kwargs)
|
def forward(self, *inputs, **kwargs):
return self.module(*inputs, **kwargs)
|
https://github.com/pytorch/fairseq/issues/1527
|
Traceback (most recent call last):
File "xxx/fairseq/fairseq/modules/multihead_attention.py", line 56, in __init__
self.reset_parameters()
File "xxx/fairseq/fairseq/modules/multihead_attention.py", line 82, in reset_parameters
nn.init.constant_(self.out_proj.bias, 0.)
File "xxx/torch/nn/init.py", line 120, in constant_
return _no_grad_fill_(tensor, val)
File "xxx/torch/nn/init.py", line 24, in _no_grad_fill_
return tensor.fill_(val)
AttributeError: 'NoneType' object has no attribute 'fill_'
|
AttributeError
|
def execute(self):
try:
logger.debug("Passive hunter is attempting to get server certificate")
addr = (str(self.event.host), self.event.port)
cert = ssl.get_server_certificate(addr)
except ssl.SSLError:
# If the server doesn't offer SSL on this port we won't get a certificate
return
self.examine_certificate(cert)
|
def execute(self):
try:
logger.debug("Passive hunter is attempting to get server certificate")
addr = (str(self.event.host), self.event.port)
cert = ssl.get_server_certificate(addr)
except ssl.SSLError:
# If the server doesn't offer SSL on this port we won't get a certificate
return
c = cert.strip(ssl.PEM_HEADER).strip(ssl.PEM_FOOTER)
certdata = base64.decodebytes(c)
emails = re.findall(email_pattern, certdata)
for email in emails:
self.publish_event(CertificateEmail(email=email))
|
https://github.com/aquasecurity/kube-hunter/issues/349
|
2020-04-29 15:18:23,356 DEBUG kube_hunter.core.events.handler expected bytes-like object, not str
Traceback (most recent call last):
File "/usr/lib/python3.6/base64.py", line 510, in _input_type_check
m = memoryview(s)
TypeError: memoryview: a bytes-like object is required, not 'str'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/mac/kube-hunter/kube_hunter/core/events/handler.py", line 137, in worker
hook.execute()
File "/mac/kube-hunter/kube_hunter/modules/hunting/certificates.py", line 43, in execute
certdata = base64.decodebytes(c)
File "/usr/lib/python3.6/base64.py", line 545, in decodebytes
_input_type_check(s)
File "/usr/lib/python3.6/base64.py", line 513, in _input_type_check
raise TypeError(msg) from err
TypeError: expected bytes-like object, not str
|
TypeError
|
def extra_attributes(self):
attributes = {}
for listener in self.listeners:
attributes.update(listener.extra_attributes)
return attributes
|
def extra_attributes(self):
attributes = {}
for listener in self.listeners:
attributes.update(listener)
return attributes
|
https://github.com/agronholm/anyio/issues/157
|
import anyio
listener = await anyio.create_tcp_listener(local_port=33333)
listener.extra(anyio.abc.SocketAttribute.local_address)
Traceback (most recent call last):
File ".../lib/python3.8/concurrent/futures/_base.py", line 439, in result
return self.__get_result()
File ".../lib/python3.8/concurrent/futures/_base.py", line 388, in __get_result
raise self._exception
File ".../lib/python3.8/asyncio/__main__.py", line 34, in callback
coro = func()
File "<console>", line 1, in <module>
File ".../python3.8/site-packages/anyio/_core/_typedattr.py", line 74, in extra
return self.extra_attributes[attribute]()
File ".../lib/python3.8/site-packages/anyio/streams/stapled.py", line 123, in extra_attributes
attributes.update(listener)
TypeError: 'SocketListener' object is not iterable
|
TypeError
|
def load_ldconfig_cache():
"""
Create a cache of the `ldconfig`-output to call it only once.
It contains thousands of libraries and running it on every dylib
is expensive.
"""
global LDCONFIG_CACHE
if LDCONFIG_CACHE is not None:
return
from distutils.spawn import find_executable
ldconfig = find_executable("ldconfig")
if ldconfig is None:
# If `lsconfig` is not found in $PATH, search it in some fixed
# directories. Simply use a second call instead of fiddling
# around with checks for empty env-vars and string-concat.
ldconfig = find_executable("ldconfig", "/usr/sbin:/sbin:/usr/bin:/usr/sbin")
# if we still couldn't find 'ldconfig' command
if ldconfig is None:
LDCONFIG_CACHE = {}
return
if is_freebsd or is_openbsd:
# This has a quite different format than other Unixes
# [vagrant@freebsd-10 ~]$ ldconfig -r
# /var/run/ld-elf.so.hints:
# search directories: /lib:/usr/lib:/usr/lib/compat:...
# 0:-lgeom.5 => /lib/libgeom.so.5
# 184:-lpython2.7.1 => /usr/local/lib/libpython2.7.so.1
ldconfig_arg = "-r"
splitlines_count = 2
pattern = re.compile(r"^\s+\d+:-l(\S+)(\s.*)? => (\S+)")
else:
# Skip first line of the library list because it is just
# an informative line and might contain localized characters.
# Example of first line with local cs_CZ.UTF-8:
# $ /sbin/ldconfig -p
# V keši „/etc/ld.so.cache“ nalezeno knihoven: 2799
# libzvbi.so.0 (libc6,x86-64) => /lib64/libzvbi.so.0
# libzvbi-chains.so.0 (libc6,x86-64) => /lib64/libzvbi-chains.so.0
ldconfig_arg = "-p"
splitlines_count = 1
pattern = re.compile(r"^\s+(\S+)(\s.*)? => (\S+)")
try:
text = compat.exec_command(ldconfig, ldconfig_arg)
except ExecCommandFailed:
logger.warning("Failed to execute ldconfig. Disabling LD cache.")
LDCONFIG_CACHE = {}
return
text = text.strip().splitlines()[splitlines_count:]
LDCONFIG_CACHE = {}
for line in text:
# :fixme: this assumes libary names do not contain whitespace
m = pattern.match(line)
# Sanitize away any abnormal lines of output.
if m is None:
# Warn about it then skip the rest of this iteration.
if re.search("Cache generated by:", line):
# See #5540. This particular line is harmless.
pass
else:
logger.warning("Unrecognised line of output %r from ldconfig", line)
continue
path = m.groups()[-1]
if is_freebsd or is_openbsd:
# Insert `.so` at the end of the lib's basename. soname
# and filename may have (different) trailing versions. We
# assume the `.so` in the filename to mark the end of the
# lib's basename.
bname = os.path.basename(path).split(".so", 1)[0]
name = "lib" + m.group(1)
assert name.startswith(bname)
name = bname + ".so" + name[len(bname) :]
else:
name = m.group(1)
# ldconfig may know about several versions of the same lib,
# e.g. differents arch, different libc, etc. Use the first
# entry.
if not name in LDCONFIG_CACHE:
LDCONFIG_CACHE[name] = path
|
def load_ldconfig_cache():
"""
Create a cache of the `ldconfig`-output to call it only once.
It contains thousands of libraries and running it on every dylib
is expensive.
"""
global LDCONFIG_CACHE
if LDCONFIG_CACHE is not None:
return
from distutils.spawn import find_executable
ldconfig = find_executable("ldconfig")
if ldconfig is None:
# If `lsconfig` is not found in $PATH, search it in some fixed
# directories. Simply use a second call instead of fiddling
# around with checks for empty env-vars and string-concat.
ldconfig = find_executable("ldconfig", "/usr/sbin:/sbin:/usr/bin:/usr/sbin")
# if we still couldn't find 'ldconfig' command
if ldconfig is None:
LDCONFIG_CACHE = {}
return
if is_freebsd or is_openbsd:
# This has a quite different format than other Unixes
# [vagrant@freebsd-10 ~]$ ldconfig -r
# /var/run/ld-elf.so.hints:
# search directories: /lib:/usr/lib:/usr/lib/compat:...
# 0:-lgeom.5 => /lib/libgeom.so.5
# 184:-lpython2.7.1 => /usr/local/lib/libpython2.7.so.1
ldconfig_arg = "-r"
splitlines_count = 2
pattern = re.compile(r"^\s+\d+:-l(\S+)(\s.*)? => (\S+)")
else:
# Skip first line of the library list because it is just
# an informative line and might contain localized characters.
# Example of first line with local cs_CZ.UTF-8:
# $ /sbin/ldconfig -p
# V keši „/etc/ld.so.cache“ nalezeno knihoven: 2799
# libzvbi.so.0 (libc6,x86-64) => /lib64/libzvbi.so.0
# libzvbi-chains.so.0 (libc6,x86-64) => /lib64/libzvbi-chains.so.0
ldconfig_arg = "-p"
splitlines_count = 1
pattern = re.compile(r"^\s+(\S+)(\s.*)? => (\S+)")
try:
text = compat.exec_command(ldconfig, ldconfig_arg)
except ExecCommandFailed:
logger.warning("Failed to execute ldconfig. Disabling LD cache.")
LDCONFIG_CACHE = {}
return
text = text.strip().splitlines()[splitlines_count:]
LDCONFIG_CACHE = {}
for line in text:
# :fixme: this assumes libary names do not contain whitespace
m = pattern.match(line)
path = m.groups()[-1]
if is_freebsd or is_openbsd:
# Insert `.so` at the end of the lib's basename. soname
# and filename may have (different) trailing versions. We
# assume the `.so` in the filename to mark the end of the
# lib's basename.
bname = os.path.basename(path).split(".so", 1)[0]
name = "lib" + m.group(1)
assert name.startswith(bname)
name = bname + ".so" + name[len(bname) :]
else:
name = m.group(1)
# ldconfig may know about several versions of the same lib,
# e.g. differents arch, different libc, etc. Use the first
# entry.
if not name in LDCONFIG_CACHE:
LDCONFIG_CACHE[name] = path
|
https://github.com/pyinstaller/pyinstaller/issues/5540
|
Traceback (most recent call last):
File "/somewhere_on_my_system/venv/bin/pyinstaller", line 8, in <module>
sys.exit(run())
File "/somewhere_on_my_system/venv/lib/python3.9/site-packages/PyInstaller/__main__.py", line 114, in run
run_build(pyi_config, spec_file, **vars(args))
File "/somewhere_on_my_system/venv/lib/python3.9/site-packages/PyInstaller/__main__.py", line 65, in run_build
PyInstaller.building.build_main.main(pyi_config, spec_file, **kwargs)
File "/somewhere_on_my_system/venv/lib/python3.9/site-packages/PyInstaller/building/build_main.py", line 725, in main
build(specfile, kw.get('distpath'), kw.get('workpath'), kw.get('clean_build'))
File "/somewhere_on_my_system/venv/lib/python3.9/site-packages/PyInstaller/building/build_main.py", line 672, in build
exec(code, spec_namespace)
File "/somewhere_on_my_system/web_ui.spec", line 11, in <module>
a = Analysis(
File "/somewhere_on_my_system/venv/lib/python3.9/site-packages/PyInstaller/building/build_main.py", line 242, in __init__
self.__postinit__()
File "/somewhere_on_my_system/venv/lib/python3.9/site-packages/PyInstaller/building/datastruct.py", line 160, in __postinit__
self.assemble()
File "/somewhere_on_my_system/venv/lib/python3.9/site-packages/PyInstaller/building/build_main.py", line 438, in assemble
ctypes_binaries = scan_code_for_ctypes(co)
File "/somewhere_on_my_system/venv/lib/python3.9/site-packages/PyInstaller/depend/utils.py", line 145, in scan_code_for_ctypes
binaries = _resolveCtypesImports(binaries)
File "/somewhere_on_my_system/venv/lib/python3.9/site-packages/PyInstaller/depend/utils.py", line 319, in _resolveCtypesImports
load_ldconfig_cache()
File "/somewhere_on_my_system/venv/lib/python3.9/site-packages/PyInstaller/depend/utils.py", line 402, in load_ldconfig_cache
path = m.groups()[-1]
AttributeError: 'NoneType' object has no attribute 'groups'
|
AttributeError
|
def __init__(self):
if sys.stdin.isatty():
self.read_handle = GetStdHandle(STD_INPUT_HANDLE)
self.read_handle.SetConsoleMode(
ENABLE_LINE_INPUT | ENABLE_ECHO_INPUT | ENABLE_PROCESSED_INPUT
)
self.cur_event_length = 0
self.cur_keys_length = 0
self.captured_chars = []
else:
raise InitError("Terminal was not a tty. Keyboard input disabled")
|
def __init__(self):
self.read_handle = GetStdHandle(STD_INPUT_HANDLE)
self.read_handle.SetConsoleMode(
ENABLE_LINE_INPUT | ENABLE_ECHO_INPUT | ENABLE_PROCESSED_INPUT
)
self.cur_event_length = 0
self.cur_keys_length = 0
self.captured_chars = []
|
https://github.com/locustio/locust/issues/1654
|
[Locust_test] $ powershell.exe -NonInteractive -ExecutionPolicy Bypass -File C:\Users\locust\AppData\Local\Temp\jenkins11138277147510956709.ps1
[2020-12-10 19:13:09,846] robot-vm/INFO/locust.main: Run time limit set to 3 seconds
[2020-12-10 19:13:09,847] robot-vm/INFO/locust.main: Starting Locust 1.4.1
[2020-12-10 19:13:09,847] robot-vm/INFO/locust.runners: Spawning 1 users at the rate 1 users/s (0 users already running)...
[2020-12-10 19:13:09,847] robot-vm/INFO/locust.runners: All users spawned: MyUser: 1 (1 total running)
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 854, in gevent._gevent_cgreenlet.Greenlet.run
File "c:\python39\lib\site-packages\locust\input_events.py", line 89, in input_listener_func
poller = get_poller()
File "c:\python39\lib\site-packages\locust\input_events.py", line 81, in get_poller
return WindowsKeyPoller()
File "c:\python39\lib\site-packages\locust\input_events.py", line 47, in __init__
self.read_handle.SetConsoleMode(ENABLE_LINE_INPUT | ENABLE_ECHO_INPUT | ENABLE_PROCESSED_INPUT)
pywintypes.error: (6, 'SetConsoleMode', 'The handle is invalid.')
2020-12-10T17:13:09Z <Greenlet at 0x19066ffdd00: input_listener_func> failed with error
Name # reqs # fails | Avg Min Max Median | req/s failures/s
--------------------------------------------------------------------------------------------------------------------------------------------
--------------------------------------------------------------------------------------------------------------------------------------------
Aggregated 0 0(0.00%) | 0 0 0 0 | 0.00 0.00
[2020-12-10 19:13:09,855] robot-vm/CRITICAL/locust.main: Unhandled exception in greenlet: <Greenlet at 0x19066ffdd00: input_listener_func>
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 854, in gevent._gevent_cgreenlet.Greenlet.run
File "c:\python39\lib\site-packages\locust\input_events.py", line 89, in input_listener_func
poller = get_poller()
File "c:\python39\lib\site-packages\locust\input_events.py", line 81, in get_poller
return WindowsKeyPoller()
File "c:\python39\lib\site-packages\locust\input_events.py", line 47, in __init__
self.read_handle.SetConsoleMode(ENABLE_LINE_INPUT | ENABLE_ECHO_INPUT | ENABLE_PROCESSED_INPUT)
pywintypes.error: (6, 'SetConsoleMode', 'The handle is invalid.')
Name # reqs # fails | Avg Min Max Median | req/s failures/s
--------------------------------------------------------------------------------------------------------------------------------------------
--------------------------------------------------------------------------------------------------------------------------------------------
Aggregated 0 0(0.00%) | 0 0 0 0 | 0.00 0.00
[2020-12-10 19:13:12,486] robot-vm/INFO/locust.main: Time limit reached. Stopping Locust.
[2020-12-10 19:13:12,486] robot-vm/INFO/locust.runners: Stopping 1 users
[2020-12-10 19:13:12,487] robot-vm/INFO/locust.runners: 1 Users have been stopped, 0 still running
[2020-12-10 19:13:12,487] robot-vm/INFO/locust.main: Running teardowns...
[2020-12-10 19:13:12,487] robot-vm/INFO/locust.main: Shutting down (exit code 2), bye.
[2020-12-10 19:13:12,487] robot-vm/INFO/locust.main: Cleaning up runner...
Name # reqs # fails | Avg Min Max Median | req/s failures/s
--------------------------------------------------------------------------------------------------------------------------------------------
--------------------------------------------------------------------------------------------------------------------------------------------
Aggregated 0 0(0.00%) | 0 0 0 0 | 0.00 0.00
Response time percentiles (approximated)
Type Name 50% 66% 75% 80% 90% 95% 98% 99% 99.9% 99.99% 100% # reqs
--------|------------------------------------------------------------|---------|------|------|------|------|------|------|------|------|------|------|------|
--------|------------------------------------------------------------|---------|------|------|------|------|------|------|------|------|------|------|------|
executing my_task
Build step 'PowerShell' marked build as failure
Archiving artifacts
Finished: FAILURE
REST API
Jenkins 2.249.3
|
pywintypes.error
|
def stats_history(runner):
"""Save current stats info to history for charts of report."""
while True:
stats = runner.stats
if not stats.total.use_response_times_cache:
break
r = {
"time": datetime.datetime.now().strftime("%H:%M:%S"),
"current_rps": stats.total.current_rps or 0,
"current_fail_per_sec": stats.total.current_fail_per_sec or 0,
"response_time_percentile_95": stats.total.get_current_response_time_percentile(
0.95
)
or 0,
"response_time_percentile_50": stats.total.get_current_response_time_percentile(
0.5
)
or 0,
"user_count": runner.user_count or 0,
}
stats.history.append(r)
gevent.sleep(HISTORY_STATS_INTERVAL_SEC)
|
def stats_history(runner):
"""Save current stats info to history for charts of report."""
while True:
stats = runner.stats
r = {
"time": datetime.datetime.now().strftime("%H:%M:%S"),
"current_rps": stats.total.current_rps or 0,
"current_fail_per_sec": stats.total.current_fail_per_sec or 0,
"response_time_percentile_95": stats.total.get_current_response_time_percentile(
0.95
)
or 0,
"response_time_percentile_50": stats.total.get_current_response_time_percentile(
0.5
)
or 0,
"user_count": runner.user_count or 0,
}
stats.history.append(r)
gevent.sleep(HISTORY_STATS_INTERVAL_SEC)
|
https://github.com/locustio/locust/issues/1531
|
$ locust --worker --locustfile=main.py
[2020-08-20 11:02:50,637] C02TD0F6GTDX/INFO/locust.main: Starting Locust 1.2
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 854, in gevent._gevent_cgreenlet.Greenlet.run
File "/usr/local/lib/python3.8/site-packages/locust/stats.py", line 766, in stats_history
'response_time_percentile_95': stats.total.get_current_response_time_percentile(0.95) or 0,
File "/usr/local/lib/python3.8/site-packages/locust/stats.py", line 553, in get_current_response_time_percentile
raise ValueError("StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile")
ValueError: StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile
2020-08-20T09:02:50Z <Greenlet at 0x10803f6a0: stats_history(<locust.runners.WorkerRunner object at 0x10806da60)> failed with ValueError
|
ValueError
|
def fire(self, *, reverse=False, **kwargs):
if reverse:
handlers = reversed(self._handlers)
else:
handlers = self._handlers
for handler in handlers:
try:
handler(**kwargs)
except Exception as e:
logging.error("Uncaught exception in event handler: %s", e)
unhandled_greenlet_exception = True
|
def fire(self, *, reverse=False, **kwargs):
if reverse:
handlers = reversed(self._handlers)
else:
handlers = self._handlers
for handler in handlers:
handler(**kwargs)
|
https://github.com/locustio/locust/issues/1461
|
[2020-07-03 03:37:18,086] instance-1/INFO/locust.main: Time limit reached. Stopping Locust.
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 854, in gevent._gevent_cgreenlet.Greenlet.run
File "/home/camilojimenez/locust/venv/lib/python3.7/site-packages/locust/main.py", line 231, in timelimit_stop
runner.quit()
File "/home/camilojimenez/locust/venv/lib/python3.7/site-packages/locust/runners.py", line 294, in quit
self.stop()
File "/home/camilojimenez/locust/venv/lib/python3.7/site-packages/locust/runners.py", line 340, in stop
self.environment.events.test_stop.fire(environment=self.environment)
File "/home/camilojimenez/locust/venv/lib/python3.7/site-packages/locust/event.py", line 33, in fire
handler(**kwargs)
File "/home/camilojimenez/locust/locustfile.py", line 6, in on_test_stop
1/0
ZeroDivisionError: division by zero
2020-07-03T03:37:18Z <Greenlet at 0x7fcad134b9d8: timelimit_stop> failed with ZeroDivisionError
[2020-07-03 03:37:18,091] instance-1/CRITICAL/locust.main: Unhandled exception in greenlet: <Greenlet at 0x7fcad134b9d8: timelimit_stop>
Traceback (most recent call last):
File "src/gevent/greenlet.py", line 854, in gevent._gevent_cgreenlet.Greenlet.run
File "/home/camilojimenez/locust/venv/lib/python3.7/site-packages/locust/main.py", line 231, in timelimit_stop
runner.quit()
File "/home/camilojimenez/locust/venv/lib/python3.7/site-packages/locust/runners.py", line 294, in quit
self.stop()
File "/home/camilojimenez/locust/venv/lib/python3.7/site-packages/locust/runners.py", line 340, in stop
self.environment.events.test_stop.fire(environment=self.environment)
File "/home/camilojimenez/locust/venv/lib/python3.7/site-packages/locust/event.py", line 33, in fire
handler(**kwargs)
File "/home/camilojimenez/locust/locustfile.py", line 6, in on_test_stop
1/0
ZeroDivisionError: division by zero
Name # reqs # fails Avg Min Max | Median req/s failures/s
--------------------------------------------------------------------------------------------------------------------------------------------
--------------------------------------------------------------------------------------------------------------------------------------------
Aggregated 0 0(0.00%) 0 0 0 | 0 0.00 0.00
|
ZeroDivisionError
|
def __init__(self, *args, master_host, master_port, **kwargs):
# Create a new RequestStats with use_response_times_cache set to False to save some memory
# and CPU cycles. We need to create the new RequestStats before we call super() (since int's
# used in the constructor of DistributedLocustRunner)
self.stats = RequestStats(use_response_times_cache=False)
super().__init__(*args, **kwargs)
self.client_id = socket.gethostname() + "_" + uuid4().hex
self.master_host = master_host
self.master_port = master_port
self.client = rpc.Client(master_host, master_port, self.client_id)
self.greenlet.spawn(self.heartbeat)
self.greenlet.spawn(self.worker)
self.client.send(Message("client_ready", None, self.client_id))
self.worker_state = STATE_INIT
self.greenlet.spawn(self.stats_reporter)
# register listener for when all locust users have hatched, and report it to the master node
def on_hatch_complete(user_count):
self.client.send(
Message("hatch_complete", {"count": user_count}, self.client_id)
)
self.worker_state = STATE_RUNNING
self.environment.events.hatch_complete.add_listener(on_hatch_complete)
# register listener that adds the current number of spawned locusts to the report that is sent to the master node
def on_report_to_master(client_id, data):
data["user_count"] = self.user_count
self.environment.events.report_to_master.add_listener(on_report_to_master)
# register listener that sends quit message to master
def on_quitting():
self.client.send(Message("quit", None, self.client_id))
self.environment.events.quitting.add_listener(on_quitting)
# register listener thats sends locust exceptions to master
def on_locust_error(locust_instance, exception, tb):
formatted_tb = "".join(traceback.format_tb(tb))
self.client.send(
Message(
"exception",
{"msg": str(exception), "traceback": formatted_tb},
self.client_id,
)
)
self.environment.events.locust_error.add_listener(on_locust_error)
|
def __init__(self, *args, master_host, master_port, **kwargs):
super().__init__(*args, **kwargs)
self.client_id = socket.gethostname() + "_" + uuid4().hex
self.master_host = master_host
self.master_port = master_port
self.client = rpc.Client(master_host, master_port, self.client_id)
self.greenlet.spawn(self.heartbeat)
self.greenlet.spawn(self.worker)
self.client.send(Message("client_ready", None, self.client_id))
self.worker_state = STATE_INIT
self.greenlet.spawn(self.stats_reporter)
# register listener for when all locust users have hatched, and report it to the master node
def on_hatch_complete(user_count):
self.client.send(
Message("hatch_complete", {"count": user_count}, self.client_id)
)
self.worker_state = STATE_RUNNING
self.environment.events.hatch_complete.add_listener(on_hatch_complete)
# register listener that adds the current number of spawned locusts to the report that is sent to the master node
def on_report_to_master(client_id, data):
data["user_count"] = self.user_count
self.environment.events.report_to_master.add_listener(on_report_to_master)
# register listener that sends quit message to master
def on_quitting():
self.client.send(Message("quit", None, self.client_id))
self.environment.events.quitting.add_listener(on_quitting)
# register listener thats sends locust exceptions to master
def on_locust_error(locust_instance, exception, tb):
formatted_tb = "".join(traceback.format_tb(tb))
self.client.send(
Message(
"exception",
{"msg": str(exception), "traceback": formatted_tb},
self.client_id,
)
)
self.environment.events.locust_error.add_listener(on_locust_error)
|
https://github.com/locustio/locust/issues/1315
|
[2020-04-06 13:01:45,700] Jonatans-Air.localdomain/ERROR/stderr: Traceback (most recent call last):
[2020-04-06 13:01:45,700] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/virtualenvs/locust/bin/locust", line 11, in <module>
[2020-04-06 13:01:45,702] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,702] Jonatans-Air.localdomain/ERROR/stderr: load_entry_point('locustio', 'console_scripts', 'locust')()
[2020-04-06 13:01:45,702] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,702] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/main.py", line 297, in main
[2020-04-06 13:01:45,703] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,703] Jonatans-Air.localdomain/ERROR/stderr: shutdown(code=code)
[2020-04-06 13:01:45,703] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,704] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/main.py", line 281, in shutdown
[2020-04-06 13:01:45,705] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,705] Jonatans-Air.localdomain/ERROR/stderr: write_stat_csvs(runner.stats, options.csvfilebase, options.stats_history_enabled)
[2020-04-06 13:01:45,705] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,706] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/stats.py", line 757, in write_stat_csvs
[2020-04-06 13:01:45,706] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,706] Jonatans-Air.localdomain/ERROR/stderr: f.write(stats_history_csv(stats, stats_history_enabled) + "\n")
[2020-04-06 13:01:45,707] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,707] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/stats.py", line 871, in stats_history_csv
[2020-04-06 13:01:45,708] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,708] Jonatans-Air.localdomain/ERROR/stderr: percentile_str = ','.join([
[2020-04-06 13:01:45,708] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,708] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/stats.py", line 872, in <listcomp>
[2020-04-06 13:01:45,709] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,710] Jonatans-Air.localdomain/ERROR/stderr: str(int(s.get_current_response_time_percentile(x) or 0)) for x in PERCENTILES_TO_REPORT])
[2020-04-06 13:01:45,710] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,710] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/stats.py", line 508, in get_current_response_time_percentile
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr: raise ValueError("StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile")
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr: ValueError
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr: :
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr: StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr:
|
ValueError
|
def __init__(self, use_response_times_cache=True):
"""
The value of use_response_times_cache will be set for each StatsEntry() when they are created.
Settings it to False saves some memory and CPU cycles which we can do on worker nodes where
the response_times_cache is not needed.
"""
self.use_response_times_cache = use_response_times_cache
self.entries = {}
self.errors = {}
self.total = StatsEntry(
self, "Aggregated", None, use_response_times_cache=self.use_response_times_cache
)
|
def __init__(self):
self.entries = {}
self.errors = {}
self.total = StatsEntry(self, "Aggregated", None, use_response_times_cache=True)
|
https://github.com/locustio/locust/issues/1315
|
[2020-04-06 13:01:45,700] Jonatans-Air.localdomain/ERROR/stderr: Traceback (most recent call last):
[2020-04-06 13:01:45,700] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/virtualenvs/locust/bin/locust", line 11, in <module>
[2020-04-06 13:01:45,702] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,702] Jonatans-Air.localdomain/ERROR/stderr: load_entry_point('locustio', 'console_scripts', 'locust')()
[2020-04-06 13:01:45,702] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,702] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/main.py", line 297, in main
[2020-04-06 13:01:45,703] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,703] Jonatans-Air.localdomain/ERROR/stderr: shutdown(code=code)
[2020-04-06 13:01:45,703] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,704] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/main.py", line 281, in shutdown
[2020-04-06 13:01:45,705] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,705] Jonatans-Air.localdomain/ERROR/stderr: write_stat_csvs(runner.stats, options.csvfilebase, options.stats_history_enabled)
[2020-04-06 13:01:45,705] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,706] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/stats.py", line 757, in write_stat_csvs
[2020-04-06 13:01:45,706] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,706] Jonatans-Air.localdomain/ERROR/stderr: f.write(stats_history_csv(stats, stats_history_enabled) + "\n")
[2020-04-06 13:01:45,707] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,707] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/stats.py", line 871, in stats_history_csv
[2020-04-06 13:01:45,708] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,708] Jonatans-Air.localdomain/ERROR/stderr: percentile_str = ','.join([
[2020-04-06 13:01:45,708] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,708] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/stats.py", line 872, in <listcomp>
[2020-04-06 13:01:45,709] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,710] Jonatans-Air.localdomain/ERROR/stderr: str(int(s.get_current_response_time_percentile(x) or 0)) for x in PERCENTILES_TO_REPORT])
[2020-04-06 13:01:45,710] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,710] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/stats.py", line 508, in get_current_response_time_percentile
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr: raise ValueError("StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile")
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr: ValueError
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr: :
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr: StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr:
|
ValueError
|
def get(self, name, method):
"""
Retrieve a StatsEntry instance by name and method
"""
entry = self.entries.get((name, method))
if not entry:
entry = StatsEntry(
self, name, method, use_response_times_cache=self.use_response_times_cache
)
self.entries[(name, method)] = entry
return entry
|
def get(self, name, method):
"""
Retrieve a StatsEntry instance by name and method
"""
entry = self.entries.get((name, method))
if not entry:
entry = StatsEntry(self, name, method, True)
self.entries[(name, method)] = entry
return entry
|
https://github.com/locustio/locust/issues/1315
|
[2020-04-06 13:01:45,700] Jonatans-Air.localdomain/ERROR/stderr: Traceback (most recent call last):
[2020-04-06 13:01:45,700] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/virtualenvs/locust/bin/locust", line 11, in <module>
[2020-04-06 13:01:45,702] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,702] Jonatans-Air.localdomain/ERROR/stderr: load_entry_point('locustio', 'console_scripts', 'locust')()
[2020-04-06 13:01:45,702] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,702] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/main.py", line 297, in main
[2020-04-06 13:01:45,703] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,703] Jonatans-Air.localdomain/ERROR/stderr: shutdown(code=code)
[2020-04-06 13:01:45,703] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,704] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/main.py", line 281, in shutdown
[2020-04-06 13:01:45,705] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,705] Jonatans-Air.localdomain/ERROR/stderr: write_stat_csvs(runner.stats, options.csvfilebase, options.stats_history_enabled)
[2020-04-06 13:01:45,705] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,706] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/stats.py", line 757, in write_stat_csvs
[2020-04-06 13:01:45,706] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,706] Jonatans-Air.localdomain/ERROR/stderr: f.write(stats_history_csv(stats, stats_history_enabled) + "\n")
[2020-04-06 13:01:45,707] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,707] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/stats.py", line 871, in stats_history_csv
[2020-04-06 13:01:45,708] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,708] Jonatans-Air.localdomain/ERROR/stderr: percentile_str = ','.join([
[2020-04-06 13:01:45,708] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,708] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/stats.py", line 872, in <listcomp>
[2020-04-06 13:01:45,709] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,710] Jonatans-Air.localdomain/ERROR/stderr: str(int(s.get_current_response_time_percentile(x) or 0)) for x in PERCENTILES_TO_REPORT])
[2020-04-06 13:01:45,710] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,710] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/stats.py", line 508, in get_current_response_time_percentile
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr: raise ValueError("StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile")
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr: ValueError
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr: :
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr: StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr:
|
ValueError
|
def clear_all(self):
"""
Remove all stats entries and errors
"""
self.total = StatsEntry(
self, "Aggregated", None, use_response_times_cache=self.use_response_times_cache
)
self.entries = {}
self.errors = {}
|
def clear_all(self):
"""
Remove all stats entries and errors
"""
self.total = StatsEntry(self, "Aggregated", None, use_response_times_cache=True)
self.entries = {}
self.errors = {}
|
https://github.com/locustio/locust/issues/1315
|
[2020-04-06 13:01:45,700] Jonatans-Air.localdomain/ERROR/stderr: Traceback (most recent call last):
[2020-04-06 13:01:45,700] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/virtualenvs/locust/bin/locust", line 11, in <module>
[2020-04-06 13:01:45,702] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,702] Jonatans-Air.localdomain/ERROR/stderr: load_entry_point('locustio', 'console_scripts', 'locust')()
[2020-04-06 13:01:45,702] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,702] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/main.py", line 297, in main
[2020-04-06 13:01:45,703] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,703] Jonatans-Air.localdomain/ERROR/stderr: shutdown(code=code)
[2020-04-06 13:01:45,703] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,704] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/main.py", line 281, in shutdown
[2020-04-06 13:01:45,705] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,705] Jonatans-Air.localdomain/ERROR/stderr: write_stat_csvs(runner.stats, options.csvfilebase, options.stats_history_enabled)
[2020-04-06 13:01:45,705] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,706] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/stats.py", line 757, in write_stat_csvs
[2020-04-06 13:01:45,706] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,706] Jonatans-Air.localdomain/ERROR/stderr: f.write(stats_history_csv(stats, stats_history_enabled) + "\n")
[2020-04-06 13:01:45,707] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,707] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/stats.py", line 871, in stats_history_csv
[2020-04-06 13:01:45,708] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,708] Jonatans-Air.localdomain/ERROR/stderr: percentile_str = ','.join([
[2020-04-06 13:01:45,708] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,708] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/stats.py", line 872, in <listcomp>
[2020-04-06 13:01:45,709] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,710] Jonatans-Air.localdomain/ERROR/stderr: str(int(s.get_current_response_time_percentile(x) or 0)) for x in PERCENTILES_TO_REPORT])
[2020-04-06 13:01:45,710] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,710] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/stats.py", line 508, in get_current_response_time_percentile
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr: raise ValueError("StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile")
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr: ValueError
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr: :
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr: StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr:
|
ValueError
|
def extend(self, other):
"""
Extend the data from the current StatsEntry with the stats from another
StatsEntry instance.
"""
# save the old last_request_timestamp, to see if we should store a new copy
# of the response times in the response times cache
old_last_request_timestamp = self.last_request_timestamp
if (
self.last_request_timestamp is not None
and other.last_request_timestamp is not None
):
self.last_request_timestamp = max(
self.last_request_timestamp, other.last_request_timestamp
)
elif other.last_request_timestamp is not None:
self.last_request_timestamp = other.last_request_timestamp
self.start_time = min(self.start_time, other.start_time)
self.num_requests = self.num_requests + other.num_requests
self.num_none_requests = self.num_none_requests + other.num_none_requests
self.num_failures = self.num_failures + other.num_failures
self.total_response_time = self.total_response_time + other.total_response_time
self.max_response_time = max(self.max_response_time, other.max_response_time)
if self.min_response_time is not None and other.min_response_time is not None:
self.min_response_time = min(self.min_response_time, other.min_response_time)
elif other.min_response_time is not None:
# this means self.min_response_time is None, so we can safely replace it
self.min_response_time = other.min_response_time
self.total_content_length = self.total_content_length + other.total_content_length
for key in other.response_times:
self.response_times[key] = (
self.response_times.get(key, 0) + other.response_times[key]
)
for key in other.num_reqs_per_sec:
self.num_reqs_per_sec[key] = (
self.num_reqs_per_sec.get(key, 0) + other.num_reqs_per_sec[key]
)
for key in other.num_fail_per_sec:
self.num_fail_per_sec[key] = (
self.num_fail_per_sec.get(key, 0) + other.num_fail_per_sec[key]
)
if self.use_response_times_cache:
# If we've entered a new second, we'll cache the response times. Note that there
# might still be reports from other worker nodes - that contains requests for the same
# time periods - that hasn't been received/accounted for yet. This will cause the cache to
# lag behind a second or two, but since StatsEntry.current_response_time_percentile()
# (which is what the response times cache is used for) uses an approximation of the
# last 10 seconds anyway, it should be fine to ignore this.
last_time = (
self.last_request_timestamp and int(self.last_request_timestamp) or None
)
if last_time and last_time > (
old_last_request_timestamp and int(old_last_request_timestamp) or 0
):
self._cache_response_times(last_time)
|
def extend(self, other):
"""
Extend the data from the current StatsEntry with the stats from another
StatsEntry instance.
"""
if (
self.last_request_timestamp is not None
and other.last_request_timestamp is not None
):
self.last_request_timestamp = max(
self.last_request_timestamp, other.last_request_timestamp
)
elif other.last_request_timestamp is not None:
self.last_request_timestamp = other.last_request_timestamp
self.start_time = min(self.start_time, other.start_time)
self.num_requests = self.num_requests + other.num_requests
self.num_none_requests = self.num_none_requests + other.num_none_requests
self.num_failures = self.num_failures + other.num_failures
self.total_response_time = self.total_response_time + other.total_response_time
self.max_response_time = max(self.max_response_time, other.max_response_time)
if self.min_response_time is not None and other.min_response_time is not None:
self.min_response_time = min(self.min_response_time, other.min_response_time)
elif other.min_response_time is not None:
# this means self.min_response_time is None, so we can safely replace it
self.min_response_time = other.min_response_time
self.total_content_length = self.total_content_length + other.total_content_length
for key in other.response_times:
self.response_times[key] = (
self.response_times.get(key, 0) + other.response_times[key]
)
for key in other.num_reqs_per_sec:
self.num_reqs_per_sec[key] = (
self.num_reqs_per_sec.get(key, 0) + other.num_reqs_per_sec[key]
)
for key in other.num_fail_per_sec:
self.num_fail_per_sec[key] = (
self.num_fail_per_sec.get(key, 0) + other.num_fail_per_sec[key]
)
|
https://github.com/locustio/locust/issues/1315
|
[2020-04-06 13:01:45,700] Jonatans-Air.localdomain/ERROR/stderr: Traceback (most recent call last):
[2020-04-06 13:01:45,700] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/virtualenvs/locust/bin/locust", line 11, in <module>
[2020-04-06 13:01:45,702] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,702] Jonatans-Air.localdomain/ERROR/stderr: load_entry_point('locustio', 'console_scripts', 'locust')()
[2020-04-06 13:01:45,702] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,702] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/main.py", line 297, in main
[2020-04-06 13:01:45,703] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,703] Jonatans-Air.localdomain/ERROR/stderr: shutdown(code=code)
[2020-04-06 13:01:45,703] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,704] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/main.py", line 281, in shutdown
[2020-04-06 13:01:45,705] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,705] Jonatans-Air.localdomain/ERROR/stderr: write_stat_csvs(runner.stats, options.csvfilebase, options.stats_history_enabled)
[2020-04-06 13:01:45,705] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,706] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/stats.py", line 757, in write_stat_csvs
[2020-04-06 13:01:45,706] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,706] Jonatans-Air.localdomain/ERROR/stderr: f.write(stats_history_csv(stats, stats_history_enabled) + "\n")
[2020-04-06 13:01:45,707] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,707] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/stats.py", line 871, in stats_history_csv
[2020-04-06 13:01:45,708] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,708] Jonatans-Air.localdomain/ERROR/stderr: percentile_str = ','.join([
[2020-04-06 13:01:45,708] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,708] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/stats.py", line 872, in <listcomp>
[2020-04-06 13:01:45,709] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,710] Jonatans-Air.localdomain/ERROR/stderr: str(int(s.get_current_response_time_percentile(x) or 0)) for x in PERCENTILES_TO_REPORT])
[2020-04-06 13:01:45,710] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,710] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/stats.py", line 508, in get_current_response_time_percentile
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr: raise ValueError("StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile")
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr: ValueError
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr: :
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr: StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr:
|
ValueError
|
def setup_distributed_stats_event_listeners(events, stats):
def on_report_to_master(client_id, data):
data["stats"] = stats.serialize_stats()
data["stats_total"] = stats.total.get_stripped_report()
data["errors"] = stats.serialize_errors()
stats.errors = {}
def on_worker_report(client_id, data):
for stats_data in data["stats"]:
entry = StatsEntry.unserialize(stats_data)
request_key = (entry.name, entry.method)
if not request_key in stats.entries:
stats.entries[request_key] = StatsEntry(
stats, entry.name, entry.method, use_response_times_cache=True
)
stats.entries[request_key].extend(entry)
for error_key, error in data["errors"].items():
if error_key not in stats.errors:
stats.errors[error_key] = StatsError.from_dict(error)
else:
stats.errors[error_key].occurrences += error["occurrences"]
stats.total.extend(StatsEntry.unserialize(data["stats_total"]))
events.report_to_master.add_listener(on_report_to_master)
events.worker_report.add_listener(on_worker_report)
|
def setup_distributed_stats_event_listeners(events, stats):
def on_report_to_master(client_id, data):
data["stats"] = stats.serialize_stats()
data["stats_total"] = stats.total.get_stripped_report()
data["errors"] = stats.serialize_errors()
stats.errors = {}
def on_worker_report(client_id, data):
for stats_data in data["stats"]:
entry = StatsEntry.unserialize(stats_data)
request_key = (entry.name, entry.method)
if not request_key in stats.entries:
stats.entries[request_key] = StatsEntry(stats, entry.name, entry.method)
stats.entries[request_key].extend(entry)
for error_key, error in data["errors"].items():
if error_key not in stats.errors:
stats.errors[error_key] = StatsError.from_dict(error)
else:
stats.errors[error_key].occurrences += error["occurrences"]
# save the old last_request_timestamp, to see if we should store a new copy
# of the response times in the response times cache
old_last_request_timestamp = stats.total.last_request_timestamp
# update the total StatsEntry
stats.total.extend(StatsEntry.unserialize(data["stats_total"]))
if stats.total.last_request_timestamp and stats.total.last_request_timestamp > (
old_last_request_timestamp or 0
):
# If we've entered a new second, we'll cache the response times. Note that there
# might still be reports from other worker nodes - that contains requests for the same
# time periods - that hasn't been received/accounted for yet. This will cause the cache to
# lag behind a second or two, but since StatsEntry.current_response_time_percentile()
# (which is what the response times cache is used for) uses an approximation of the
# last 10 seconds anyway, it should be fine to ignore this.
stats.total._cache_response_times(int(stats.total.last_request_timestamp))
events.report_to_master.add_listener(on_report_to_master)
events.worker_report.add_listener(on_worker_report)
|
https://github.com/locustio/locust/issues/1315
|
[2020-04-06 13:01:45,700] Jonatans-Air.localdomain/ERROR/stderr: Traceback (most recent call last):
[2020-04-06 13:01:45,700] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/virtualenvs/locust/bin/locust", line 11, in <module>
[2020-04-06 13:01:45,702] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,702] Jonatans-Air.localdomain/ERROR/stderr: load_entry_point('locustio', 'console_scripts', 'locust')()
[2020-04-06 13:01:45,702] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,702] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/main.py", line 297, in main
[2020-04-06 13:01:45,703] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,703] Jonatans-Air.localdomain/ERROR/stderr: shutdown(code=code)
[2020-04-06 13:01:45,703] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,704] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/main.py", line 281, in shutdown
[2020-04-06 13:01:45,705] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,705] Jonatans-Air.localdomain/ERROR/stderr: write_stat_csvs(runner.stats, options.csvfilebase, options.stats_history_enabled)
[2020-04-06 13:01:45,705] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,706] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/stats.py", line 757, in write_stat_csvs
[2020-04-06 13:01:45,706] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,706] Jonatans-Air.localdomain/ERROR/stderr: f.write(stats_history_csv(stats, stats_history_enabled) + "\n")
[2020-04-06 13:01:45,707] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,707] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/stats.py", line 871, in stats_history_csv
[2020-04-06 13:01:45,708] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,708] Jonatans-Air.localdomain/ERROR/stderr: percentile_str = ','.join([
[2020-04-06 13:01:45,708] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,708] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/stats.py", line 872, in <listcomp>
[2020-04-06 13:01:45,709] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,710] Jonatans-Air.localdomain/ERROR/stderr: str(int(s.get_current_response_time_percentile(x) or 0)) for x in PERCENTILES_TO_REPORT])
[2020-04-06 13:01:45,710] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,710] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/stats.py", line 508, in get_current_response_time_percentile
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr: raise ValueError("StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile")
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr: ValueError
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr: :
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr: StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr:
|
ValueError
|
def on_worker_report(client_id, data):
for stats_data in data["stats"]:
entry = StatsEntry.unserialize(stats_data)
request_key = (entry.name, entry.method)
if not request_key in stats.entries:
stats.entries[request_key] = StatsEntry(
stats, entry.name, entry.method, use_response_times_cache=True
)
stats.entries[request_key].extend(entry)
for error_key, error in data["errors"].items():
if error_key not in stats.errors:
stats.errors[error_key] = StatsError.from_dict(error)
else:
stats.errors[error_key].occurrences += error["occurrences"]
stats.total.extend(StatsEntry.unserialize(data["stats_total"]))
|
def on_worker_report(client_id, data):
for stats_data in data["stats"]:
entry = StatsEntry.unserialize(stats_data)
request_key = (entry.name, entry.method)
if not request_key in stats.entries:
stats.entries[request_key] = StatsEntry(stats, entry.name, entry.method)
stats.entries[request_key].extend(entry)
for error_key, error in data["errors"].items():
if error_key not in stats.errors:
stats.errors[error_key] = StatsError.from_dict(error)
else:
stats.errors[error_key].occurrences += error["occurrences"]
# save the old last_request_timestamp, to see if we should store a new copy
# of the response times in the response times cache
old_last_request_timestamp = stats.total.last_request_timestamp
# update the total StatsEntry
stats.total.extend(StatsEntry.unserialize(data["stats_total"]))
if stats.total.last_request_timestamp and stats.total.last_request_timestamp > (
old_last_request_timestamp or 0
):
# If we've entered a new second, we'll cache the response times. Note that there
# might still be reports from other worker nodes - that contains requests for the same
# time periods - that hasn't been received/accounted for yet. This will cause the cache to
# lag behind a second or two, but since StatsEntry.current_response_time_percentile()
# (which is what the response times cache is used for) uses an approximation of the
# last 10 seconds anyway, it should be fine to ignore this.
stats.total._cache_response_times(int(stats.total.last_request_timestamp))
|
https://github.com/locustio/locust/issues/1315
|
[2020-04-06 13:01:45,700] Jonatans-Air.localdomain/ERROR/stderr: Traceback (most recent call last):
[2020-04-06 13:01:45,700] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/virtualenvs/locust/bin/locust", line 11, in <module>
[2020-04-06 13:01:45,702] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,702] Jonatans-Air.localdomain/ERROR/stderr: load_entry_point('locustio', 'console_scripts', 'locust')()
[2020-04-06 13:01:45,702] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,702] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/main.py", line 297, in main
[2020-04-06 13:01:45,703] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,703] Jonatans-Air.localdomain/ERROR/stderr: shutdown(code=code)
[2020-04-06 13:01:45,703] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,704] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/main.py", line 281, in shutdown
[2020-04-06 13:01:45,705] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,705] Jonatans-Air.localdomain/ERROR/stderr: write_stat_csvs(runner.stats, options.csvfilebase, options.stats_history_enabled)
[2020-04-06 13:01:45,705] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,706] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/stats.py", line 757, in write_stat_csvs
[2020-04-06 13:01:45,706] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,706] Jonatans-Air.localdomain/ERROR/stderr: f.write(stats_history_csv(stats, stats_history_enabled) + "\n")
[2020-04-06 13:01:45,707] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,707] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/stats.py", line 871, in stats_history_csv
[2020-04-06 13:01:45,708] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,708] Jonatans-Air.localdomain/ERROR/stderr: percentile_str = ','.join([
[2020-04-06 13:01:45,708] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,708] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/stats.py", line 872, in <listcomp>
[2020-04-06 13:01:45,709] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,710] Jonatans-Air.localdomain/ERROR/stderr: str(int(s.get_current_response_time_percentile(x) or 0)) for x in PERCENTILES_TO_REPORT])
[2020-04-06 13:01:45,710] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,710] Jonatans-Air.localdomain/ERROR/stderr: File "/Users/heyman/projects/locust/locust/stats.py", line 508, in get_current_response_time_percentile
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr: raise ValueError("StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile")
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr:
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr: ValueError
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr: :
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr: StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile
[2020-04-06 13:01:45,711] Jonatans-Air.localdomain/ERROR/stderr:
|
ValueError
|
def start_hatching(self, locust_count=None, hatch_rate=None, wait=False):
if hatch_rate > 100:
logger.warning(
"Your selected hatch rate is very high (>100), and this is known to sometimes cause issues. Do you really need to ramp up that fast?"
)
self.hatching_greenlet = gevent.spawn(
lambda: super(LocalLocustRunner, self).start_hatching(
locust_count, hatch_rate, wait=wait
)
)
self.greenlet = self.hatching_greenlet
|
def start_hatching(self, locust_count=None, hatch_rate=None, wait=False):
self.hatching_greenlet = gevent.spawn(
lambda: super(LocalLocustRunner, self).start_hatching(
locust_count, hatch_rate, wait=wait
)
)
self.greenlet = self.hatching_greenlet
|
https://github.com/locustio/locust/issues/1174
|
Traceback (most recent call last):
File "gevent/pywsgi.py", line 964, in handle_one_response
self.run_application()
File "gevent/pywsgi.py", line 911, in run_application
self.result = self.application(self.environ, self.start_response)
File "flask/app.py", line 2463, in __call__
return self.wsgi_app(environ, start_response)
File "flask/app.py", line 2449, in wsgi_app
response = self.handle_exception(e)
File "flask/app.py", line 1866, in handle_exception
reraise(exc_type, exc_value, tb)
File "flask/_compat.py", line 39, in reraise
raise value
File "flask/app.py", line 2446, in wsgi_app
response = self.full_dispatch_request()
File "flask/app.py", line 1951, in full_dispatch_request
rv = self.handle_user_exception(e)
File "flask/app.py", line 1820, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "flask/_compat.py", line 39, in reraise
raise value
File "flask/app.py", line 1949, in full_dispatch_request
rv = self.dispatch_request()
File "flask/app.py", line 1935, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "locust/web.py", line 85, in stop
runners.locust_runner.stop()
File "locust/runners.py", line 196, in stop
for locust_greenlet in self.locusts:
RuntimeError: Set changed size during iteration
2019-11-27T18:41:36Z
{'REMOTE_..., (hidden keys: 26)} failed with RuntimeError
|
RuntimeError
|
def start_hatching(self, locust_count, hatch_rate):
num_slaves = (
len(self.clients.ready) + len(self.clients.running) + len(self.clients.hatching)
)
if not num_slaves:
logger.warning(
"You are running in distributed mode but have no slave servers connected. "
"Please connect slaves prior to swarming."
)
return
self.num_clients = locust_count
self.hatch_rate = hatch_rate
slave_num_clients = locust_count // (num_slaves or 1)
slave_hatch_rate = float(hatch_rate) / (num_slaves or 1)
remaining = locust_count % num_slaves
logger.info(
"Sending hatch jobs of %d locusts and %.2f hatch rate to %d ready clients"
% (slave_num_clients, slave_hatch_rate, num_slaves)
)
if slave_hatch_rate > 100:
logger.warning(
"Your selected hatch rate is very high (>100/slave), and this is known to sometimes cause issues. Do you really need to ramp up that fast?"
)
if self.state != STATE_RUNNING and self.state != STATE_HATCHING:
self.stats.clear_all()
self.exceptions = {}
events.master_start_hatching.fire()
for client in self.clients.ready + self.clients.running + self.clients.hatching:
data = {
"hatch_rate": slave_hatch_rate,
"num_clients": slave_num_clients,
"host": self.host,
"stop_timeout": self.options.stop_timeout,
}
if remaining > 0:
data["num_clients"] += 1
remaining -= 1
self.server.send_to_client(Message("hatch", data, client.id))
self.state = STATE_HATCHING
|
def start_hatching(self, locust_count, hatch_rate):
num_slaves = (
len(self.clients.ready) + len(self.clients.running) + len(self.clients.hatching)
)
if not num_slaves:
logger.warning(
"You are running in distributed mode but have no slave servers connected. "
"Please connect slaves prior to swarming."
)
return
self.num_clients = locust_count
self.hatch_rate = hatch_rate
slave_num_clients = locust_count // (num_slaves or 1)
slave_hatch_rate = float(hatch_rate) / (num_slaves or 1)
remaining = locust_count % num_slaves
logger.info(
"Sending hatch jobs of %d locusts and %.2f hatch rate to %d ready clients"
% (slave_num_clients, slave_hatch_rate, num_slaves)
)
if self.state != STATE_RUNNING and self.state != STATE_HATCHING:
self.stats.clear_all()
self.exceptions = {}
events.master_start_hatching.fire()
for client in self.clients.ready + self.clients.running + self.clients.hatching:
data = {
"hatch_rate": slave_hatch_rate,
"num_clients": slave_num_clients,
"host": self.host,
"stop_timeout": self.options.stop_timeout,
}
if remaining > 0:
data["num_clients"] += 1
remaining -= 1
self.server.send_to_client(Message("hatch", data, client.id))
self.state = STATE_HATCHING
|
https://github.com/locustio/locust/issues/1174
|
Traceback (most recent call last):
File "gevent/pywsgi.py", line 964, in handle_one_response
self.run_application()
File "gevent/pywsgi.py", line 911, in run_application
self.result = self.application(self.environ, self.start_response)
File "flask/app.py", line 2463, in __call__
return self.wsgi_app(environ, start_response)
File "flask/app.py", line 2449, in wsgi_app
response = self.handle_exception(e)
File "flask/app.py", line 1866, in handle_exception
reraise(exc_type, exc_value, tb)
File "flask/_compat.py", line 39, in reraise
raise value
File "flask/app.py", line 2446, in wsgi_app
response = self.full_dispatch_request()
File "flask/app.py", line 1951, in full_dispatch_request
rv = self.handle_user_exception(e)
File "flask/app.py", line 1820, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "flask/_compat.py", line 39, in reraise
raise value
File "flask/app.py", line 1949, in full_dispatch_request
rv = self.dispatch_request()
File "flask/app.py", line 1935, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "locust/web.py", line 85, in stop
runners.locust_runner.stop()
File "locust/runners.py", line 196, in stop
for locust_greenlet in self.locusts:
RuntimeError: Set changed size during iteration
2019-11-27T18:41:36Z
{'REMOTE_..., (hidden keys: 26)} failed with RuntimeError
|
RuntimeError
|
def load_locustfile(path):
"""
Import given locustfile path and return (docstring, callables).
Specifically, the locustfile's ``__doc__`` attribute (a string) and a
dictionary of ``{'name': callable}`` containing all callables which pass
the "is a Locust" test.
"""
def __import_locustfile__(filename, path):
"""
Loads the locust file as a module, similar to performing `import`
"""
try:
# Python 3 compatible
source = importlib.machinery.SourceFileLoader(
os.path.splitext(locustfile)[0], path
)
imported = source.load_module()
except AttributeError:
# Python 2.7 compatible
import imp
imported = imp.load_source(os.path.splitext(locustfile)[0], path)
return imported
# Get directory and locustfile name
directory, locustfile = os.path.split(path)
# If the directory isn't in the PYTHONPATH, add it so our import will work
added_to_path = False
index = None
if directory not in sys.path:
sys.path.insert(0, directory)
added_to_path = True
# If the directory IS in the PYTHONPATH, move it to the front temporarily,
# otherwise other locustfiles -- like Locusts's own -- may scoop the intended
# one.
else:
i = sys.path.index(directory)
if i != 0:
# Store index for later restoration
index = i
# Add to front, then remove from original position
sys.path.insert(0, directory)
del sys.path[i + 1]
# Perform the import
imported = __import_locustfile__(locustfile, path)
# Remove directory from path if we added it ourselves (just to be neat)
if added_to_path:
del sys.path[0]
# Put back in original index if we moved it
if index is not None:
sys.path.insert(index + 1, directory)
del sys.path[0]
# Return our two-tuple
locusts = dict(filter(is_locust, vars(imported).items()))
return imported.__doc__, locusts
|
def load_locustfile(path):
"""
Import given locustfile path and return (docstring, callables).
Specifically, the locustfile's ``__doc__`` attribute (a string) and a
dictionary of ``{'name': callable}`` containing all callables which pass
the "is a Locust" test.
"""
# Get directory and locustfile name
directory, locustfile = os.path.split(path)
# If the directory isn't in the PYTHONPATH, add it so our import will work
added_to_path = False
index = None
if directory not in sys.path:
sys.path.insert(0, directory)
added_to_path = True
# If the directory IS in the PYTHONPATH, move it to the front temporarily,
# otherwise other locustfiles -- like Locusts's own -- may scoop the intended
# one.
else:
i = sys.path.index(directory)
if i != 0:
# Store index for later restoration
index = i
# Add to front, then remove from original position
sys.path.insert(0, directory)
del sys.path[i + 1]
# Perform the import (trimming off the .py)
imported = __import__(os.path.splitext(locustfile)[0])
# Remove directory from path if we added it ourselves (just to be neat)
if added_to_path:
del sys.path[0]
# Put back in original index if we moved it
if index is not None:
sys.path.insert(index + 1, directory)
del sys.path[0]
# Return our two-tuple
locusts = dict(filter(is_locust, vars(imported).items()))
return imported.__doc__, locusts
|
https://github.com/locustio/locust/issues/940
|
unuser@miguel-pc:/$ locust -f /tests/login_and_minimal_navigation.test.py -H http://127.0.0.1:10000 --no-web -c 10 -r 2 -t 30s --only-summary
[2019-01-04 09:32:05,137] miguel-pc/ERROR/stderr: Traceback (most recent call last):
[2019-01-04 09:32:05,137] miguel-pc/ERROR/stderr: File "/usr/local/bin/locust", line 11, in <module>
[2019-01-04 09:32:05,137] miguel-pc/ERROR/stderr:
[2019-01-04 09:32:05,137] miguel-pc/ERROR/stderr: sys.exit(main())
[2019-01-04 09:32:05,137] miguel-pc/ERROR/stderr:
[2019-01-04 09:32:05,137] miguel-pc/ERROR/stderr: File "/usr/local/lib/python3.5/site-packages/locust/main.py", line 391, in main
[2019-01-04 09:32:05,138] miguel-pc/ERROR/stderr:
[2019-01-04 09:32:05,138] miguel-pc/ERROR/stderr: docstring, locusts = load_locustfile(locustfile)
[2019-01-04 09:32:05,138] miguel-pc/ERROR/stderr:
[2019-01-04 09:32:05,138] miguel-pc/ERROR/stderr: File "/usr/local/lib/python3.5/site-packages/locust/main.py", line 358, in load_locustfile
[2019-01-04 09:32:05,138] miguel-pc/ERROR/stderr:
[2019-01-04 09:32:05,138] miguel-pc/ERROR/stderr: imported = __import__(os.path.splitext(locustfile)[0])
[2019-01-04 09:32:05,138] miguel-pc/ERROR/stderr:
[2019-01-04 09:32:05,138] miguel-pc/ERROR/stderr: ImportError
[2019-01-04 09:32:05,138] miguel-pc/ERROR/stderr: :
[2019-01-04 09:32:05,138] miguel-pc/ERROR/stderr: No module named 'login_and_minimal_navigation'
[2019-01-04 09:32:05,138] miguel-pc/ERROR/stderr:
unuser@miguel-pc:/$
|
ImportError
|
def sample(
self,
n: Optional[int] = None,
frac: Optional[float] = None,
replace: bool = False,
weights: Optional[Union[Sequence, Series]] = None,
random_state=None,
):
"""
Return a random sample of items from each group.
You can use `random_state` for reproducibility.
.. versionadded:: 1.1.0
Parameters
----------
n : int, optional
Number of items to return for each group. Cannot be used with
`frac` and must be no larger than the smallest group unless
`replace` is True. Default is one if `frac` is None.
frac : float, optional
Fraction of items to return. Cannot be used with `n`.
replace : bool, default False
Allow or disallow sampling of the same row more than once.
weights : list-like, optional
Default None results in equal probability weighting.
If passed a list-like then values must have the same length as
the underlying DataFrame or Series object and will be used as
sampling probabilities after normalization within each group.
Values must be non-negative with at least one positive element
within each group.
random_state : int, array-like, BitGenerator, np.random.RandomState, optional
If int, array-like, or BitGenerator (NumPy>=1.17), seed for
random number generator
If np.random.RandomState, use as numpy RandomState object.
Returns
-------
Series or DataFrame
A new object of same type as caller containing items randomly
sampled within each group from the caller object.
See Also
--------
DataFrame.sample: Generate random samples from a DataFrame object.
numpy.random.choice: Generate a random sample from a given 1-D numpy
array.
Examples
--------
>>> df = pd.DataFrame(
... {"a": ["red"] * 2 + ["blue"] * 2 + ["black"] * 2, "b": range(6)}
... )
>>> df
a b
0 red 0
1 red 1
2 blue 2
3 blue 3
4 black 4
5 black 5
Select one row at random for each distinct value in column a. The
`random_state` argument can be used to guarantee reproducibility:
>>> df.groupby("a").sample(n=1, random_state=1)
a b
4 black 4
2 blue 2
1 red 1
Set `frac` to sample fixed proportions rather than counts:
>>> df.groupby("a")["b"].sample(frac=0.5, random_state=2)
5 5
2 2
0 0
Name: b, dtype: int64
Control sample probabilities within groups by setting weights:
>>> df.groupby("a").sample(
... n=1,
... weights=[1, 1, 1, 0, 0, 1],
... random_state=1,
... )
a b
5 black 5
2 blue 2
0 red 0
"""
from pandas.core.reshape.concat import concat
if weights is not None:
weights = Series(weights, index=self._selected_obj.index)
ws = [weights.iloc[idx] for idx in self.indices.values()]
else:
ws = [None] * self.ngroups
if random_state is not None:
random_state = com.random_state(random_state)
samples = [
obj.sample(
n=n, frac=frac, replace=replace, weights=w, random_state=random_state
)
for (_, obj), w in zip(self, ws)
]
return concat(samples, axis=self.axis)
|
def sample(
self,
n: Optional[int] = None,
frac: Optional[float] = None,
replace: bool = False,
weights: Optional[Union[Sequence, Series]] = None,
random_state=None,
):
"""
Return a random sample of items from each group.
You can use `random_state` for reproducibility.
.. versionadded:: 1.1.0
Parameters
----------
n : int, optional
Number of items to return for each group. Cannot be used with
`frac` and must be no larger than the smallest group unless
`replace` is True. Default is one if `frac` is None.
frac : float, optional
Fraction of items to return. Cannot be used with `n`.
replace : bool, default False
Allow or disallow sampling of the same row more than once.
weights : list-like, optional
Default None results in equal probability weighting.
If passed a list-like then values must have the same length as
the underlying DataFrame or Series object and will be used as
sampling probabilities after normalization within each group.
Values must be non-negative with at least one positive element
within each group.
random_state : int, array-like, BitGenerator, np.random.RandomState, optional
If int, array-like, or BitGenerator (NumPy>=1.17), seed for
random number generator
If np.random.RandomState, use as numpy RandomState object.
Returns
-------
Series or DataFrame
A new object of same type as caller containing items randomly
sampled within each group from the caller object.
See Also
--------
DataFrame.sample: Generate random samples from a DataFrame object.
numpy.random.choice: Generate a random sample from a given 1-D numpy
array.
Examples
--------
>>> df = pd.DataFrame(
... {"a": ["red"] * 2 + ["blue"] * 2 + ["black"] * 2, "b": range(6)}
... )
>>> df
a b
0 red 0
1 red 1
2 blue 2
3 blue 3
4 black 4
5 black 5
Select one row at random for each distinct value in column a. The
`random_state` argument can be used to guarantee reproducibility:
>>> df.groupby("a").sample(n=1, random_state=1)
a b
4 black 4
2 blue 2
1 red 1
Set `frac` to sample fixed proportions rather than counts:
>>> df.groupby("a")["b"].sample(frac=0.5, random_state=2)
5 5
2 2
0 0
Name: b, dtype: int64
Control sample probabilities within groups by setting weights:
>>> df.groupby("a").sample(
... n=1,
... weights=[1, 1, 1, 0, 0, 1],
... random_state=1,
... )
a b
5 black 5
2 blue 2
0 red 0
"""
from pandas.core.reshape.concat import concat
if weights is not None:
weights = Series(weights, index=self._selected_obj.index)
ws = [weights[idx] for idx in self.indices.values()]
else:
ws = [None] * self.ngroups
if random_state is not None:
random_state = com.random_state(random_state)
samples = [
obj.sample(
n=n, frac=frac, replace=replace, weights=w, random_state=random_state
)
for (_, obj), w in zip(self, ws)
]
return concat(samples, axis=self.axis)
|
https://github.com/pandas-dev/pandas/issues/39927
|
Traceback (most recent call last):
File "/Users/wenjun/miniconda3/lib/python3.8/site-packages/IPython/core/interactiveshell.py", line 3417, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-4-1fc31a504740>", line 1, in <module>
df1.groupby('c').sample(1, weights=df1['d'].to_numpy())
File "/Users/wenjun/miniconda3/lib/python3.8/site-packages/pandas/core/groupby/groupby.py", line 3024, in sample
ws = [weights[idx] for idx in self.indices.values()]
File "/Users/wenjun/miniconda3/lib/python3.8/site-packages/pandas/core/groupby/groupby.py", line 3024, in <listcomp>
ws = [weights[idx] for idx in self.indices.values()]
File "/Users/wenjun/miniconda3/lib/python3.8/site-packages/pandas/core/series.py", line 875, in __getitem__
return self._get_with(key)
File "/Users/wenjun/miniconda3/lib/python3.8/site-packages/pandas/core/series.py", line 910, in _get_with
return self.loc[key]
File "/Users/wenjun/miniconda3/lib/python3.8/site-packages/pandas/core/indexing.py", line 895, in __getitem__
return self._getitem_axis(maybe_callable, axis=axis)
File "/Users/wenjun/miniconda3/lib/python3.8/site-packages/pandas/core/indexing.py", line 1113, in _getitem_axis
return self._getitem_iterable(key, axis=axis)
File "/Users/wenjun/miniconda3/lib/python3.8/site-packages/pandas/core/indexing.py", line 1053, in _getitem_iterable
keyarr, indexer = self._get_listlike_indexer(key, axis, raise_missing=False)
File "/Users/wenjun/miniconda3/lib/python3.8/site-packages/pandas/core/indexing.py", line 1266, in _get_listlike_indexer
self._validate_read_indexer(keyarr, indexer, axis, raise_missing=raise_missing)
File "/Users/wenjun/miniconda3/lib/python3.8/site-packages/pandas/core/indexing.py", line 1308, in _validate_read_indexer
raise KeyError(f"None of [{key}] are in the [{axis_name}]")
KeyError: "None of [Int64Index([6, 7], dtype='int64')] are in the [index]"
|
KeyError
|
def _wrap_applied_output(
self,
data: Series,
keys: Index,
values: Optional[List[Any]],
not_indexed_same: bool = False,
) -> FrameOrSeriesUnion:
"""
Wrap the output of SeriesGroupBy.apply into the expected result.
Parameters
----------
data : Series
Input data for groupby operation.
keys : Index
Keys of groups that Series was grouped by.
values : Optional[List[Any]]
Applied output for each group.
not_indexed_same : bool, default False
Whether the applied outputs are not indexed the same as the group axes.
Returns
-------
DataFrame or Series
"""
if len(keys) == 0:
# GH #6265
return self.obj._constructor(
[],
name=self._selection_name,
index=self.grouper.result_index,
dtype=data.dtype,
)
assert values is not None
def _get_index() -> Index:
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823 #24880
index = _get_index()
result: FrameOrSeriesUnion = self._reindex_output(
self.obj._constructor_expanddim(values, index=index)
)
# if self.observed is False,
# keep all-NaN rows created while re-indexing
result = result.stack(dropna=self.observed)
result.name = self._selection_name
return result
elif isinstance(values[0], (Series, DataFrame)):
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
else:
# GH #6265 #24880
result = self.obj._constructor(
data=values, index=_get_index(), name=self._selection_name
)
return self._reindex_output(result)
|
def _wrap_applied_output(
self, keys: Index, values: Optional[List[Any]], not_indexed_same: bool = False
) -> FrameOrSeriesUnion:
"""
Wrap the output of SeriesGroupBy.apply into the expected result.
Parameters
----------
keys : Index
Keys of groups that Series was grouped by.
values : Optional[List[Any]]
Applied output for each group.
not_indexed_same : bool, default False
Whether the applied outputs are not indexed the same as the group axes.
Returns
-------
DataFrame or Series
"""
if len(keys) == 0:
# GH #6265
return self.obj._constructor(
[], name=self._selection_name, index=keys, dtype=np.float64
)
assert values is not None
def _get_index() -> Index:
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823 #24880
index = _get_index()
result: FrameOrSeriesUnion = self._reindex_output(
self.obj._constructor_expanddim(values, index=index)
)
# if self.observed is False,
# keep all-NaN rows created while re-indexing
result = result.stack(dropna=self.observed)
result.name = self._selection_name
return result
elif isinstance(values[0], (Series, DataFrame)):
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
else:
# GH #6265 #24880
result = self.obj._constructor(
data=values, index=_get_index(), name=self._selection_name
)
return self._reindex_output(result)
|
https://github.com/pandas-dev/pandas/issues/26411
|
Empty DataFrame
Columns: []
Index: []
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python2.7/dist-packages/pandas/core/frame.py", line 2927, in __getitem__
indexer = self.columns.get_loc(key)
File "/usr/local/lib/python2.7/dist-packages/pandas/core/indexes/base.py", line 2659, in get_lo
return self._engine.get_loc(self._maybe_cast_indexer(key))
File "pandas/_libs/index.pyx", line 108, in pandas._libs.index.IndexEngine.get_loc
File "pandas/_libs/index.pyx", line 132, in pandas._libs.index.IndexEngine.get_loc
File "pandas/_libs/hashtable_class_helper.pxi", line 1601, in pandas._libs.hashtable.PyObjectHashTable.get_item
File "pandas/_libs/hashtable_class_helper.pxi", line 1608, in pandas._libs.hashtable.PyObjectHashTable.get_item
KeyError: 'b'
|
KeyError
|
def _wrap_applied_output(self, data, keys, values, not_indexed_same=False):
if len(keys) == 0:
result = self.obj._constructor(
index=self.grouper.result_index, columns=data.columns
)
result = result.astype(data.dtypes.to_dict(), copy=False)
return result
# GH12824
first_not_none = next(com.not_none(*values), None)
if first_not_none is None:
# GH9684 - All values are None, return an empty frame.
return self.obj._constructor()
elif isinstance(first_not_none, DataFrame):
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
key_index = self.grouper.result_index if self.as_index else None
if isinstance(first_not_none, (np.ndarray, Index)):
# GH#1738: values is list of arrays of unequal lengths
# fall through to the outer else clause
# TODO: sure this is right? we used to do this
# after raising AttributeError above
return self.obj._constructor_sliced(
values, index=key_index, name=self._selection_name
)
elif not isinstance(first_not_none, Series):
# values are not series or array-like but scalars
# self._selection_name not passed through to Series as the
# result should not take the name of original selection
# of columns
if self.as_index:
return self.obj._constructor_sliced(values, index=key_index)
else:
result = DataFrame(values, index=key_index, columns=[self._selection])
self._insert_inaxis_grouper_inplace(result)
return result
else:
# values are Series
return self._wrap_applied_output_series(
keys, values, not_indexed_same, first_not_none, key_index
)
|
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
return self.obj._constructor(index=keys)
# GH12824
first_not_none = next(com.not_none(*values), None)
if first_not_none is None:
# GH9684 - All values are None, return an empty frame.
return self.obj._constructor()
elif isinstance(first_not_none, DataFrame):
return self._concat_objects(keys, values, not_indexed_same=not_indexed_same)
key_index = self.grouper.result_index if self.as_index else None
if isinstance(first_not_none, (np.ndarray, Index)):
# GH#1738: values is list of arrays of unequal lengths
# fall through to the outer else clause
# TODO: sure this is right? we used to do this
# after raising AttributeError above
return self.obj._constructor_sliced(
values, index=key_index, name=self._selection_name
)
elif not isinstance(first_not_none, Series):
# values are not series or array-like but scalars
# self._selection_name not passed through to Series as the
# result should not take the name of original selection
# of columns
if self.as_index:
return self.obj._constructor_sliced(values, index=key_index)
else:
result = DataFrame(values, index=key_index, columns=[self._selection])
self._insert_inaxis_grouper_inplace(result)
return result
else:
# values are Series
return self._wrap_applied_output_series(
keys, values, not_indexed_same, first_not_none, key_index
)
|
https://github.com/pandas-dev/pandas/issues/26411
|
Empty DataFrame
Columns: []
Index: []
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python2.7/dist-packages/pandas/core/frame.py", line 2927, in __getitem__
indexer = self.columns.get_loc(key)
File "/usr/local/lib/python2.7/dist-packages/pandas/core/indexes/base.py", line 2659, in get_lo
return self._engine.get_loc(self._maybe_cast_indexer(key))
File "pandas/_libs/index.pyx", line 108, in pandas._libs.index.IndexEngine.get_loc
File "pandas/_libs/index.pyx", line 132, in pandas._libs.index.IndexEngine.get_loc
File "pandas/_libs/hashtable_class_helper.pxi", line 1601, in pandas._libs.hashtable.PyObjectHashTable.get_item
File "pandas/_libs/hashtable_class_helper.pxi", line 1608, in pandas._libs.hashtable.PyObjectHashTable.get_item
KeyError: 'b'
|
KeyError
|
def _python_apply_general(self, f: F, data: FrameOrSeriesUnion) -> FrameOrSeriesUnion:
"""
Apply function f in python space
Parameters
----------
f : callable
Function to apply
data : Series or DataFrame
Data to apply f to
Returns
-------
Series or DataFrame
data after applying f
"""
keys, values, mutated = self.grouper.apply(f, data, self.axis)
return self._wrap_applied_output(
data, keys, values, not_indexed_same=mutated or self.mutated
)
|
def _python_apply_general(self, f: F, data: FrameOrSeriesUnion) -> FrameOrSeriesUnion:
"""
Apply function f in python space
Parameters
----------
f : callable
Function to apply
data : Series or DataFrame
Data to apply f to
Returns
-------
Series or DataFrame
data after applying f
"""
keys, values, mutated = self.grouper.apply(f, data, self.axis)
return self._wrap_applied_output(
keys, values, not_indexed_same=mutated or self.mutated
)
|
https://github.com/pandas-dev/pandas/issues/26411
|
Empty DataFrame
Columns: []
Index: []
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python2.7/dist-packages/pandas/core/frame.py", line 2927, in __getitem__
indexer = self.columns.get_loc(key)
File "/usr/local/lib/python2.7/dist-packages/pandas/core/indexes/base.py", line 2659, in get_lo
return self._engine.get_loc(self._maybe_cast_indexer(key))
File "pandas/_libs/index.pyx", line 108, in pandas._libs.index.IndexEngine.get_loc
File "pandas/_libs/index.pyx", line 132, in pandas._libs.index.IndexEngine.get_loc
File "pandas/_libs/hashtable_class_helper.pxi", line 1601, in pandas._libs.hashtable.PyObjectHashTable.get_item
File "pandas/_libs/hashtable_class_helper.pxi", line 1608, in pandas._libs.hashtable.PyObjectHashTable.get_item
KeyError: 'b'
|
KeyError
|
def _wrap_applied_output(self, data, keys, values, not_indexed_same: bool = False):
raise AbstractMethodError(self)
|
def _wrap_applied_output(self, keys, values, not_indexed_same: bool = False):
raise AbstractMethodError(self)
|
https://github.com/pandas-dev/pandas/issues/26411
|
Empty DataFrame
Columns: []
Index: []
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python2.7/dist-packages/pandas/core/frame.py", line 2927, in __getitem__
indexer = self.columns.get_loc(key)
File "/usr/local/lib/python2.7/dist-packages/pandas/core/indexes/base.py", line 2659, in get_lo
return self._engine.get_loc(self._maybe_cast_indexer(key))
File "pandas/_libs/index.pyx", line 108, in pandas._libs.index.IndexEngine.get_loc
File "pandas/_libs/index.pyx", line 132, in pandas._libs.index.IndexEngine.get_loc
File "pandas/_libs/hashtable_class_helper.pxi", line 1601, in pandas._libs.hashtable.PyObjectHashTable.get_item
File "pandas/_libs/hashtable_class_helper.pxi", line 1608, in pandas._libs.hashtable.PyObjectHashTable.get_item
KeyError: 'b'
|
KeyError
|
def __internal_pivot_table(
data: DataFrame,
values,
index,
columns,
aggfunc: Union[AggFuncTypeBase, AggFuncTypeDict],
fill_value,
margins: bool,
dropna: bool,
margins_name: str,
observed: bool,
) -> DataFrame:
"""
Helper of :func:`pandas.pivot_table` for any non-list ``aggfunc``.
"""
keys = index + columns
values_passed = values is not None
if values_passed:
if is_list_like(values):
values_multi = True
values = list(values)
else:
values_multi = False
values = [values]
# GH14938 Make sure value labels are in data
for i in values:
if i not in data:
raise KeyError(i)
to_filter = []
for x in keys + values:
if isinstance(x, Grouper):
x = x.key
try:
if x in data:
to_filter.append(x)
except TypeError:
pass
if len(to_filter) < len(data.columns):
data = data[to_filter]
else:
values = data.columns
for key in keys:
try:
values = values.drop(key)
except (TypeError, ValueError, KeyError):
pass
values = list(values)
grouped = data.groupby(keys, observed=observed)
agged = grouped.agg(aggfunc)
if dropna and isinstance(agged, ABCDataFrame) and len(agged.columns):
agged = agged.dropna(how="all")
# gh-21133
# we want to down cast if
# the original values are ints
# as we grouped with a NaN value
# and then dropped, coercing to floats
for v in values:
if (
v in data
and is_integer_dtype(data[v])
and v in agged
and not is_integer_dtype(agged[v])
):
agged[v] = maybe_downcast_to_dtype(agged[v], data[v].dtype)
table = agged
# GH17038, this check should only happen if index is defined (not None)
if table.index.nlevels > 1 and index:
# Related GH #17123
# If index_names are integers, determine whether the integers refer
# to the level position or name.
index_names = agged.index.names[: len(index)]
to_unstack = []
for i in range(len(index), len(keys)):
name = agged.index.names[i]
if name is None or name in index_names:
to_unstack.append(i)
else:
to_unstack.append(name)
table = agged.unstack(to_unstack)
if not dropna:
if isinstance(table.index, MultiIndex):
m = MultiIndex.from_arrays(
cartesian_product(table.index.levels), names=table.index.names
)
table = table.reindex(m, axis=0)
if isinstance(table.columns, MultiIndex):
m = MultiIndex.from_arrays(
cartesian_product(table.columns.levels), names=table.columns.names
)
table = table.reindex(m, axis=1)
if isinstance(table, ABCDataFrame):
table = table.sort_index(axis=1)
if fill_value is not None:
_table = table.fillna(fill_value, downcast="infer")
assert _table is not None # needed for mypy
table = _table
if margins:
if dropna:
data = data[data.notna().all(axis=1)]
table = _add_margins(
table,
data,
values,
rows=index,
cols=columns,
aggfunc=aggfunc,
observed=dropna,
margins_name=margins_name,
fill_value=fill_value,
)
# discard the top level
if values_passed and not values_multi and table.columns.nlevels > 1:
table = table.droplevel(0, axis=1)
if len(index) == 0 and len(columns) > 0:
table = table.T
# GH 15193 Make sure empty columns are removed if dropna=True
if isinstance(table, ABCDataFrame) and dropna:
table = table.dropna(how="all", axis=1)
return table
|
def __internal_pivot_table(
data: DataFrame,
values,
index,
columns,
aggfunc: Union[AggFuncTypeBase, AggFuncTypeDict],
fill_value,
margins: bool,
dropna: bool,
margins_name: str,
observed: bool,
) -> DataFrame:
"""
Helper of :func:`pandas.pivot_table` for any non-list ``aggfunc``.
"""
keys = index + columns
values_passed = values is not None
if values_passed:
if is_list_like(values):
values_multi = True
values = list(values)
else:
values_multi = False
values = [values]
# GH14938 Make sure value labels are in data
for i in values:
if i not in data:
raise KeyError(i)
to_filter = []
for x in keys + values:
if isinstance(x, Grouper):
x = x.key
try:
if x in data:
to_filter.append(x)
except TypeError:
pass
if len(to_filter) < len(data.columns):
data = data[to_filter]
else:
values = data.columns
for key in keys:
try:
values = values.drop(key)
except (TypeError, ValueError, KeyError):
pass
values = list(values)
grouped = data.groupby(keys, observed=observed)
agged = grouped.agg(aggfunc)
if dropna and isinstance(agged, ABCDataFrame) and len(agged.columns):
agged = agged.dropna(how="all")
# gh-21133
# we want to down cast if
# the original values are ints
# as we grouped with a NaN value
# and then dropped, coercing to floats
for v in values:
if (
v in data
and is_integer_dtype(data[v])
and v in agged
and not is_integer_dtype(agged[v])
):
agged[v] = maybe_downcast_to_dtype(agged[v], data[v].dtype)
table = agged
# GH17038, this check should only happen if index is defined (not None)
if table.index.nlevels > 1 and index:
# Related GH #17123
# If index_names are integers, determine whether the integers refer
# to the level position or name.
index_names = agged.index.names[: len(index)]
to_unstack = []
for i in range(len(index), len(keys)):
name = agged.index.names[i]
if name is None or name in index_names:
to_unstack.append(i)
else:
to_unstack.append(name)
table = agged.unstack(to_unstack)
if not dropna:
if isinstance(table.index, MultiIndex):
m = MultiIndex.from_arrays(
cartesian_product(table.index.levels), names=table.index.names
)
table = table.reindex(m, axis=0)
if isinstance(table.columns, MultiIndex):
m = MultiIndex.from_arrays(
cartesian_product(table.columns.levels), names=table.columns.names
)
table = table.reindex(m, axis=1)
if isinstance(table, ABCDataFrame):
table = table.sort_index(axis=1)
if fill_value is not None:
_table = table.fillna(fill_value, downcast="infer")
assert _table is not None # needed for mypy
table = _table
if margins:
if dropna:
data = data[data.notna().all(axis=1)]
table = _add_margins(
table,
data,
values,
rows=index,
cols=columns,
aggfunc=aggfunc,
observed=dropna,
margins_name=margins_name,
fill_value=fill_value,
)
# discard the top level
if (
values_passed
and not values_multi
and not table.empty
and (table.columns.nlevels > 1)
):
table = table[values[0]]
if len(index) == 0 and len(columns) > 0:
table = table.T
# GH 15193 Make sure empty columns are removed if dropna=True
if isinstance(table, ABCDataFrame) and dropna:
table = table.dropna(how="all", axis=1)
return table
|
https://github.com/pandas-dev/pandas/issues/26411
|
Empty DataFrame
Columns: []
Index: []
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python2.7/dist-packages/pandas/core/frame.py", line 2927, in __getitem__
indexer = self.columns.get_loc(key)
File "/usr/local/lib/python2.7/dist-packages/pandas/core/indexes/base.py", line 2659, in get_lo
return self._engine.get_loc(self._maybe_cast_indexer(key))
File "pandas/_libs/index.pyx", line 108, in pandas._libs.index.IndexEngine.get_loc
File "pandas/_libs/index.pyx", line 132, in pandas._libs.index.IndexEngine.get_loc
File "pandas/_libs/hashtable_class_helper.pxi", line 1601, in pandas._libs.hashtable.PyObjectHashTable.get_item
File "pandas/_libs/hashtable_class_helper.pxi", line 1608, in pandas._libs.hashtable.PyObjectHashTable.get_item
KeyError: 'b'
|
KeyError
|
def crosstab(
index,
columns,
values=None,
rownames=None,
colnames=None,
aggfunc=None,
margins=False,
margins_name: str = "All",
dropna: bool = True,
normalize=False,
) -> DataFrame:
"""
Compute a simple cross tabulation of two (or more) factors. By default
computes a frequency table of the factors unless an array of values and an
aggregation function are passed.
Parameters
----------
index : array-like, Series, or list of arrays/Series
Values to group by in the rows.
columns : array-like, Series, or list of arrays/Series
Values to group by in the columns.
values : array-like, optional
Array of values to aggregate according to the factors.
Requires `aggfunc` be specified.
rownames : sequence, default None
If passed, must match number of row arrays passed.
colnames : sequence, default None
If passed, must match number of column arrays passed.
aggfunc : function, optional
If specified, requires `values` be specified as well.
margins : bool, default False
Add row/column margins (subtotals).
margins_name : str, default 'All'
Name of the row/column that will contain the totals
when margins is True.
dropna : bool, default True
Do not include columns whose entries are all NaN.
normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False
Normalize by dividing all values by the sum of values.
- If passed 'all' or `True`, will normalize over all values.
- If passed 'index' will normalize over each row.
- If passed 'columns' will normalize over each column.
- If margins is `True`, will also normalize margin values.
Returns
-------
DataFrame
Cross tabulation of the data.
See Also
--------
DataFrame.pivot : Reshape data based on column values.
pivot_table : Create a pivot table as a DataFrame.
Notes
-----
Any Series passed will have their name attributes used unless row or column
names for the cross-tabulation are specified.
Any input passed containing Categorical data will have **all** of its
categories included in the cross-tabulation, even if the actual data does
not contain any instances of a particular category.
In the event that there aren't overlapping indexes an empty DataFrame will
be returned.
Examples
--------
>>> a = np.array(["foo", "foo", "foo", "foo", "bar", "bar",
... "bar", "bar", "foo", "foo", "foo"], dtype=object)
>>> b = np.array(["one", "one", "one", "two", "one", "one",
... "one", "two", "two", "two", "one"], dtype=object)
>>> c = np.array(["dull", "dull", "shiny", "dull", "dull", "shiny",
... "shiny", "dull", "shiny", "shiny", "shiny"],
... dtype=object)
>>> pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c'])
b one two
c dull shiny dull shiny
a
bar 1 2 1 0
foo 2 2 1 2
Here 'c' and 'f' are not represented in the data and will not be
shown in the output because dropna is True by default. Set
dropna=False to preserve categories with no data.
>>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c'])
>>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f'])
>>> pd.crosstab(foo, bar)
col_0 d e
row_0
a 1 0
b 0 1
>>> pd.crosstab(foo, bar, dropna=False)
col_0 d e f
row_0
a 1 0 0
b 0 1 0
c 0 0 0
"""
if values is None and aggfunc is not None:
raise ValueError("aggfunc cannot be used without values.")
if values is not None and aggfunc is None:
raise ValueError("values cannot be used without an aggfunc.")
index = com.maybe_make_list(index)
columns = com.maybe_make_list(columns)
common_idx = None
pass_objs = [x for x in index + columns if isinstance(x, (ABCSeries, ABCDataFrame))]
if pass_objs:
common_idx = get_objs_combined_axis(pass_objs, intersect=True, sort=False)
rownames = _get_names(index, rownames, prefix="row")
colnames = _get_names(columns, colnames, prefix="col")
# duplicate names mapped to unique names for pivot op
(
rownames_mapper,
unique_rownames,
colnames_mapper,
unique_colnames,
) = _build_names_mapper(rownames, colnames)
from pandas import DataFrame
data = {
**dict(zip(unique_rownames, index)),
**dict(zip(unique_colnames, columns)),
}
df = DataFrame(data, index=common_idx)
if values is None:
df["__dummy__"] = 0
kwargs = {"aggfunc": len, "fill_value": 0}
else:
df["__dummy__"] = values
kwargs = {"aggfunc": aggfunc}
table = df.pivot_table(
"__dummy__",
index=unique_rownames,
columns=unique_colnames,
margins=margins,
margins_name=margins_name,
dropna=dropna,
**kwargs,
)
# Post-process
if normalize is not False:
table = _normalize(
table, normalize=normalize, margins=margins, margins_name=margins_name
)
table = table.rename_axis(index=rownames_mapper, axis=0)
table = table.rename_axis(columns=colnames_mapper, axis=1)
return table
|
def crosstab(
index,
columns,
values=None,
rownames=None,
colnames=None,
aggfunc=None,
margins=False,
margins_name: str = "All",
dropna: bool = True,
normalize=False,
) -> DataFrame:
"""
Compute a simple cross tabulation of two (or more) factors. By default
computes a frequency table of the factors unless an array of values and an
aggregation function are passed.
Parameters
----------
index : array-like, Series, or list of arrays/Series
Values to group by in the rows.
columns : array-like, Series, or list of arrays/Series
Values to group by in the columns.
values : array-like, optional
Array of values to aggregate according to the factors.
Requires `aggfunc` be specified.
rownames : sequence, default None
If passed, must match number of row arrays passed.
colnames : sequence, default None
If passed, must match number of column arrays passed.
aggfunc : function, optional
If specified, requires `values` be specified as well.
margins : bool, default False
Add row/column margins (subtotals).
margins_name : str, default 'All'
Name of the row/column that will contain the totals
when margins is True.
dropna : bool, default True
Do not include columns whose entries are all NaN.
normalize : bool, {'all', 'index', 'columns'}, or {0,1}, default False
Normalize by dividing all values by the sum of values.
- If passed 'all' or `True`, will normalize over all values.
- If passed 'index' will normalize over each row.
- If passed 'columns' will normalize over each column.
- If margins is `True`, will also normalize margin values.
Returns
-------
DataFrame
Cross tabulation of the data.
See Also
--------
DataFrame.pivot : Reshape data based on column values.
pivot_table : Create a pivot table as a DataFrame.
Notes
-----
Any Series passed will have their name attributes used unless row or column
names for the cross-tabulation are specified.
Any input passed containing Categorical data will have **all** of its
categories included in the cross-tabulation, even if the actual data does
not contain any instances of a particular category.
In the event that there aren't overlapping indexes an empty DataFrame will
be returned.
Examples
--------
>>> a = np.array(["foo", "foo", "foo", "foo", "bar", "bar",
... "bar", "bar", "foo", "foo", "foo"], dtype=object)
>>> b = np.array(["one", "one", "one", "two", "one", "one",
... "one", "two", "two", "two", "one"], dtype=object)
>>> c = np.array(["dull", "dull", "shiny", "dull", "dull", "shiny",
... "shiny", "dull", "shiny", "shiny", "shiny"],
... dtype=object)
>>> pd.crosstab(a, [b, c], rownames=['a'], colnames=['b', 'c'])
b one two
c dull shiny dull shiny
a
bar 1 2 1 0
foo 2 2 1 2
Here 'c' and 'f' are not represented in the data and will not be
shown in the output because dropna is True by default. Set
dropna=False to preserve categories with no data.
>>> foo = pd.Categorical(['a', 'b'], categories=['a', 'b', 'c'])
>>> bar = pd.Categorical(['d', 'e'], categories=['d', 'e', 'f'])
>>> pd.crosstab(foo, bar)
col_0 d e
row_0
a 1 0
b 0 1
>>> pd.crosstab(foo, bar, dropna=False)
col_0 d e f
row_0
a 1 0 0
b 0 1 0
c 0 0 0
"""
if values is None and aggfunc is not None:
raise ValueError("aggfunc cannot be used without values.")
if values is not None and aggfunc is None:
raise ValueError("values cannot be used without an aggfunc.")
index = com.maybe_make_list(index)
columns = com.maybe_make_list(columns)
common_idx = None
pass_objs = [x for x in index + columns if isinstance(x, (ABCSeries, ABCDataFrame))]
if pass_objs:
common_idx = get_objs_combined_axis(pass_objs, intersect=True, sort=False)
rownames = _get_names(index, rownames, prefix="row")
colnames = _get_names(columns, colnames, prefix="col")
# duplicate names mapped to unique names for pivot op
(
rownames_mapper,
unique_rownames,
colnames_mapper,
unique_colnames,
) = _build_names_mapper(rownames, colnames)
from pandas import DataFrame
data = {
**dict(zip(unique_rownames, index)),
**dict(zip(unique_colnames, columns)),
}
df = DataFrame(data, index=common_idx)
original_df_cols = df.columns
if values is None:
df["__dummy__"] = 0
kwargs = {"aggfunc": len, "fill_value": 0}
else:
df["__dummy__"] = values
kwargs = {"aggfunc": aggfunc}
table = df.pivot_table(
["__dummy__"],
index=unique_rownames,
columns=unique_colnames,
margins=margins,
margins_name=margins_name,
dropna=dropna,
**kwargs,
)
# GH18321, after pivoting, an extra top level of column index of `__dummy__` is
# created, and this extra level should not be included in the further steps
if not table.empty:
cols_diff = df.columns.difference(original_df_cols)[0]
table = table[cols_diff]
# Post-process
if normalize is not False:
table = _normalize(
table, normalize=normalize, margins=margins, margins_name=margins_name
)
table = table.rename_axis(index=rownames_mapper, axis=0)
table = table.rename_axis(columns=colnames_mapper, axis=1)
return table
|
https://github.com/pandas-dev/pandas/issues/26411
|
Empty DataFrame
Columns: []
Index: []
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/lib/python2.7/dist-packages/pandas/core/frame.py", line 2927, in __getitem__
indexer = self.columns.get_loc(key)
File "/usr/local/lib/python2.7/dist-packages/pandas/core/indexes/base.py", line 2659, in get_lo
return self._engine.get_loc(self._maybe_cast_indexer(key))
File "pandas/_libs/index.pyx", line 108, in pandas._libs.index.IndexEngine.get_loc
File "pandas/_libs/index.pyx", line 132, in pandas._libs.index.IndexEngine.get_loc
File "pandas/_libs/hashtable_class_helper.pxi", line 1601, in pandas._libs.hashtable.PyObjectHashTable.get_item
File "pandas/_libs/hashtable_class_helper.pxi", line 1608, in pandas._libs.hashtable.PyObjectHashTable.get_item
KeyError: 'b'
|
KeyError
|
def _convert_datetimes(sas_datetimes: pd.Series, unit: str) -> pd.Series:
"""
Convert to Timestamp if possible, otherwise to datetime.datetime.
SAS float64 lacks precision for more than ms resolution so the fit
to datetime.datetime is ok.
Parameters
----------
sas_datetimes : {Series, Sequence[float]}
Dates or datetimes in SAS
unit : {str}
"d" if the floats represent dates, "s" for datetimes
Returns
-------
Series
Series of datetime64 dtype or datetime.datetime.
"""
try:
return pd.to_datetime(sas_datetimes, unit=unit, origin="1960-01-01")
except OutOfBoundsDatetime:
s_series = sas_datetimes.apply(_parse_datetime, unit=unit)
s_series = cast(pd.Series, s_series)
return s_series
|
def _convert_datetimes(sas_datetimes: pd.Series, unit: str) -> pd.Series:
"""
Convert to Timestamp if possible, otherwise to datetime.datetime.
SAS float64 lacks precision for more than ms resolution so the fit
to datetime.datetime is ok.
Parameters
----------
sas_datetimes : {Series, Sequence[float]}
Dates or datetimes in SAS
unit : {str}
"d" if the floats represent dates, "s" for datetimes
Returns
-------
Series
Series of datetime64 dtype or datetime.datetime.
"""
try:
return pd.to_datetime(sas_datetimes, unit=unit, origin="1960-01-01")
except OutOfBoundsDatetime:
if unit == "s":
s_series = sas_datetimes.apply(
lambda sas_float: datetime(1960, 1, 1) + timedelta(seconds=sas_float)
)
s_series = cast(pd.Series, s_series)
return s_series
elif unit == "d":
d_series = sas_datetimes.apply(
lambda sas_float: datetime(1960, 1, 1) + timedelta(days=sas_float)
)
d_series = cast(pd.Series, d_series)
return d_series
else:
raise ValueError("unit must be 'd' or 's'")
|
https://github.com/pandas-dev/pandas/issues/39725
|
Traceback (most recent call last):
File "/home/wertha/source/pandas/pandas/tests/io/sas/data/.test/lib/python3.9/site-packages/pandas/io/sas/sas7bdat.py", line 52, in _convert_datetimes
return pd.to_datetime(sas_datetimes, unit=unit, origin="1960-01-01")
File "/home/wertha/source/pandas/pandas/tests/io/sas/data/.test/lib/python3.9/site-packages/pandas/core/tools/datetimes.py", line 805, in to_datetime
values = convert_listlike(arg._values, format)
File "/home/wertha/source/pandas/pandas/tests/io/sas/data/.test/lib/python3.9/site-packages/pandas/core/tools/datetimes.py", line 345, in _convert_listlike_datetimes
result, tz_parsed = tslib.array_with_unit_to_datetime(
File "pandas/_libs/tslib.pyx", line 249, in pandas._libs.tslib.array_with_unit_to_datetime
pandas._libs.tslibs.np_datetime.OutOfBoundsDatetime: cannot convert input with unit 'd'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/wertha/source/pandas/pandas/tests/io/sas/data/.test/lib/python3.9/site-packages/pandas/io/sas/sasreader.py", line 152, in read_sas
return reader.read()
File "/home/wertha/source/pandas/pandas/tests/io/sas/data/.test/lib/python3.9/site-packages/pandas/io/sas/sas7bdat.py", line 723, in read
rslt = self._chunk_to_dataframe()
File "/home/wertha/source/pandas/pandas/tests/io/sas/data/.test/lib/python3.9/site-packages/pandas/io/sas/sas7bdat.py", line 771, in _chunk_to_dataframe
rslt[name] = _convert_datetimes(rslt[name], "d")
File "/home/wertha/source/pandas/pandas/tests/io/sas/data/.test/lib/python3.9/site-packages/pandas/io/sas/sas7bdat.py", line 59, in _convert_datetimes
return sas_datetimes.apply(
File "/home/wertha/source/pandas/pandas/tests/io/sas/data/.test/lib/python3.9/site-packages/pandas/core/series.py", line 4135, in apply
mapped = lib.map_infer(values, f, convert=convert_dtype)
File "pandas/_libs/lib.pyx", line 2467, in pandas._libs.lib.map_infer
File "/home/wertha/source/pandas/pandas/tests/io/sas/data/.test/lib/python3.9/site-packages/pandas/io/sas/sas7bdat.py", line 60, in <lambda>
lambda sas_float: datetime(1960, 1, 1) + timedelta(days=sas_float)
ValueError: cannot convert float NaN to integer
|
ValueError
|
def sort_values( # type: ignore[override]
self,
by,
axis: Axis = 0,
ascending=True,
inplace: bool = False,
kind: str = "quicksort",
na_position: str = "last",
ignore_index: bool = False,
key: ValueKeyFunc = None,
):
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError(
f"Length of ascending ({len(ascending)}) != length of by ({len(by)})"
)
if len(by) > 1:
keys = [self._get_label_or_level_values(x, axis=axis) for x in by]
# need to rewrap columns in Series to apply key function
if key is not None:
keys = [Series(k, name=name) for (k, name) in zip(keys, by)]
indexer = lexsort_indexer(
keys, orders=ascending, na_position=na_position, key=key
)
indexer = ensure_platform_int(indexer)
else:
by = by[0]
k = self._get_label_or_level_values(by, axis=axis)
# need to rewrap column in Series to apply key function
if key is not None:
k = Series(k, name=by)
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = nargsort(
k, kind=kind, ascending=ascending, na_position=na_position, key=key
)
new_data = self._mgr.take(
indexer, axis=self._get_block_manager_axis(axis), verify=False
)
if ignore_index:
new_data.set_axis(
self._get_block_manager_axis(axis), ibase.default_index(len(indexer))
)
result = self._constructor(new_data)
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="sort_values")
|
def sort_values( # type: ignore[override]
self,
by,
axis: Axis = 0,
ascending=True,
inplace: bool = False,
kind: str = "quicksort",
na_position: str = "last",
ignore_index: bool = False,
key: ValueKeyFunc = None,
):
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError(
f"Length of ascending ({len(ascending)}) != length of by ({len(by)})"
)
if len(by) > 1:
keys = [self._get_label_or_level_values(x, axis=axis) for x in by]
# need to rewrap columns in Series to apply key function
if key is not None:
keys = [Series(k, name=name) for (k, name) in zip(keys, by)]
indexer = lexsort_indexer(
keys, orders=ascending, na_position=na_position, key=key
)
indexer = ensure_platform_int(indexer)
else:
by = by[0]
k = self._get_label_or_level_values(by, axis=axis)
# need to rewrap column in Series to apply key function
if key is not None:
k = Series(k, name=by)
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = nargsort(
k, kind=kind, ascending=ascending, na_position=na_position, key=key
)
new_data = self._mgr.take(
indexer, axis=self._get_block_manager_axis(axis), verify=False
)
if ignore_index:
new_data.set_axis(1, ibase.default_index(len(indexer)))
result = self._constructor(new_data)
if inplace:
return self._update_inplace(result)
else:
return result.__finalize__(self, method="sort_values")
|
https://github.com/pandas-dev/pandas/issues/39426
|
Traceback (most recent call last):
File "sort_values_test6.py", line 19, in <module>
print(df2)
File "/localdisk/gashiman/miniconda3/lib/python3.8/site-packages/pandas/core/frame.py", line 803, in __repr__
self.to_string(
File "/localdisk/gashiman/miniconda3/lib/python3.8/site-packages/pandas/core/frame.py", line 939, in to_string
return fmt.DataFrameRenderer(formatter).to_string(
File "/localdisk/gashiman/miniconda3/lib/python3.8/site-packages/pandas/io/formats/format.py", line 1031, in to_string
string = string_formatter.to_string()
File "/localdisk/gashiman/miniconda3/lib/python3.8/site-packages/pandas/io/formats/string.py", line 23, in to_string
text = self._get_string_representation()
File "/localdisk/gashiman/miniconda3/lib/python3.8/site-packages/pandas/io/formats/string.py", line 47, in _get_string_representation
return self._fit_strcols_to_terminal_width(strcols)
File "/localdisk/gashiman/miniconda3/lib/python3.8/site-packages/pandas/io/formats/string.py", line 179, in _fit_strcols_to_terminal_width
self.fmt.truncate()
File "/localdisk/gashiman/miniconda3/lib/python3.8/site-packages/pandas/io/formats/format.py", line 700, in truncate
self._truncate_horizontally()
File "/localdisk/gashiman/miniconda3/lib/python3.8/site-packages/pandas/io/formats/format.py", line 718, in _truncate_horizontally
self.tr_frame = concat((left, right), axis=1)
File "/localdisk/gashiman/miniconda3/lib/python3.8/site-packages/pandas/core/reshape/concat.py", line 298, in concat
return op.get_result()
File "/localdisk/gashiman/miniconda3/lib/python3.8/site-packages/pandas/core/reshape/concat.py", line 520, in get_result
new_data = concatenate_block_managers(
File "/localdisk/gashiman/miniconda3/lib/python3.8/site-packages/pandas/core/internals/concat.py", line 89, in concatenate_block_managers
return BlockManager(blocks, axes)
File "/localdisk/gashiman/miniconda3/lib/python3.8/site-packages/pandas/core/internals/managers.py", line 143, in __init__
self._verify_integrity()
File "/localdisk/gashiman/miniconda3/lib/python3.8/site-packages/pandas/core/internals/managers.py", line 323, in _verify_integrity
raise construction_error(tot_items, block.shape[1:], self.axes)
ValueError: Shape of passed values is (4, 16), indices imply (32, 16)
|
ValueError
|
def _align_series(self, indexer, ser: Series, multiindex_indexer: bool = False):
"""
Parameters
----------
indexer : tuple, slice, scalar
Indexer used to get the locations that will be set to `ser`.
ser : pd.Series
Values to assign to the locations specified by `indexer`.
multiindex_indexer : boolean, optional
Defaults to False. Should be set to True if `indexer` was from
a `pd.MultiIndex`, to avoid unnecessary broadcasting.
Returns
-------
`np.array` of `ser` broadcast to the appropriate shape for assignment
to the locations selected by `indexer`
"""
if isinstance(indexer, (slice, np.ndarray, list, Index)):
indexer = (indexer,)
if isinstance(indexer, tuple):
# flatten np.ndarray indexers
def ravel(i):
return i.ravel() if isinstance(i, np.ndarray) else i
indexer = tuple(map(ravel, indexer))
aligners = [not com.is_null_slice(idx) for idx in indexer]
sum_aligners = sum(aligners)
single_aligner = sum_aligners == 1
is_frame = self.ndim == 2
obj = self.obj
# are we a single alignable value on a non-primary
# dim (e.g. panel: 1,2, or frame: 0) ?
# hence need to align to a single axis dimension
# rather that find all valid dims
# frame
if is_frame:
single_aligner = single_aligner and aligners[0]
# we have a frame, with multiple indexers on both axes; and a
# series, so need to broadcast (see GH5206)
if sum_aligners == self.ndim and all(is_sequence(_) for _ in indexer):
ser = ser.reindex(obj.axes[0][indexer[0]], copy=True)._values
# single indexer
if len(indexer) > 1 and not multiindex_indexer:
len_indexer = len(indexer[1])
ser = np.tile(ser, len_indexer).reshape(len_indexer, -1).T
return ser
for i, idx in enumerate(indexer):
ax = obj.axes[i]
# multiple aligners (or null slices)
if is_sequence(idx) or isinstance(idx, slice):
if single_aligner and com.is_null_slice(idx):
continue
new_ix = ax[idx]
if not is_list_like_indexer(new_ix):
new_ix = Index([new_ix])
else:
new_ix = Index(new_ix)
if ser.index.equals(new_ix) or not len(new_ix):
return ser._values.copy()
return ser.reindex(new_ix)._values
# 2 dims
elif single_aligner:
# reindex along index
ax = self.obj.axes[1]
if ser.index.equals(ax) or not len(ax):
return ser._values.copy()
return ser.reindex(ax)._values
elif is_integer(indexer) and self.ndim == 1:
if is_object_dtype(self.obj):
return ser
ax = self.obj._get_axis(0)
if ser.index.equals(ax):
return ser._values.copy()
return ser.reindex(ax)._values[indexer]
elif is_integer(indexer):
ax = self.obj._get_axis(1)
if ser.index.equals(ax):
return ser._values.copy()
return ser.reindex(ax)._values
raise ValueError("Incompatible indexer with Series")
|
def _align_series(self, indexer, ser: Series, multiindex_indexer: bool = False):
"""
Parameters
----------
indexer : tuple, slice, scalar
Indexer used to get the locations that will be set to `ser`.
ser : pd.Series
Values to assign to the locations specified by `indexer`.
multiindex_indexer : boolean, optional
Defaults to False. Should be set to True if `indexer` was from
a `pd.MultiIndex`, to avoid unnecessary broadcasting.
Returns
-------
`np.array` of `ser` broadcast to the appropriate shape for assignment
to the locations selected by `indexer`
"""
if isinstance(indexer, (slice, np.ndarray, list, Index)):
indexer = (indexer,)
if isinstance(indexer, tuple):
# flatten np.ndarray indexers
def ravel(i):
return i.ravel() if isinstance(i, np.ndarray) else i
indexer = tuple(map(ravel, indexer))
aligners = [not com.is_null_slice(idx) for idx in indexer]
sum_aligners = sum(aligners)
single_aligner = sum_aligners == 1
is_frame = self.ndim == 2
obj = self.obj
# are we a single alignable value on a non-primary
# dim (e.g. panel: 1,2, or frame: 0) ?
# hence need to align to a single axis dimension
# rather that find all valid dims
# frame
if is_frame:
single_aligner = single_aligner and aligners[0]
# we have a frame, with multiple indexers on both axes; and a
# series, so need to broadcast (see GH5206)
if sum_aligners == self.ndim and all(is_sequence(_) for _ in indexer):
ser = ser.reindex(obj.axes[0][indexer[0]], copy=True)._values
# single indexer
if len(indexer) > 1 and not multiindex_indexer:
len_indexer = len(indexer[1])
ser = np.tile(ser, len_indexer).reshape(len_indexer, -1).T
return ser
for i, idx in enumerate(indexer):
ax = obj.axes[i]
# multiple aligners (or null slices)
if is_sequence(idx) or isinstance(idx, slice):
if single_aligner and com.is_null_slice(idx):
continue
new_ix = ax[idx]
if not is_list_like_indexer(new_ix):
new_ix = Index([new_ix])
else:
new_ix = Index(new_ix)
if ser.index.equals(new_ix) or not len(new_ix):
return ser._values.copy()
return ser.reindex(new_ix)._values
# 2 dims
elif single_aligner:
# reindex along index
ax = self.obj.axes[1]
if ser.index.equals(ax) or not len(ax):
return ser._values.copy()
return ser.reindex(ax)._values
elif is_scalar(indexer):
ax = self.obj._get_axis(1)
if ser.index.equals(ax):
return ser._values.copy()
return ser.reindex(ax)._values
raise ValueError("Incompatible indexer with Series")
|
https://github.com/pandas-dev/pandas/issues/38303
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
~/sources/official.clone/pandas/pandas/core/generic.py in _get_axis_number(cls, axis)
456 try:
--> 457 return cls._AXIS_TO_AXIS_NUMBER[axis]
458 except KeyError:
KeyError: 1
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
<ipython-input-24-7c4b5d72ca25> in <module>
----> 1 ser.loc[0] = pd.Series([0])
~/sources/official.clone/pandas/pandas/core/indexing.py in __setitem__(self, key, value)
689
690 iloc = self if self.name == "iloc" else self.obj.iloc
--> 691 iloc._setitem_with_indexer(indexer, value, self.name)
692
693 def _validate_key(self, key, axis: int):
~/sources/official.clone/pandas/pandas/core/indexing.py in _setitem_with_indexer(self, indexer, value, name)
1634 self._setitem_with_indexer_split_path(indexer, value, name)
1635 else:
-> 1636 self._setitem_single_block(indexer, value, name)
1637
1638 def _setitem_with_indexer_split_path(self, indexer, value, name: str):
~/sources/official.clone/pandas/pandas/core/indexing.py in _setitem_single_block(self, indexer, value, name)
1848 # setting for extensionarrays that store dicts. Need to decide
1849 # if it's worth supporting that.
-> 1850 value = self._align_series(indexer, Series(value))
1851
1852 elif isinstance(value, ABCDataFrame) and name != "iloc":
~/sources/official.clone/pandas/pandas/core/indexing.py in _align_series(self, indexer, ser, multiindex_indexer)
2018
2019 elif is_scalar(indexer):
-> 2020 ax = self.obj._get_axis(1)
2021
2022 if ser.index.equals(ax):
~/sources/official.clone/pandas/pandas/core/generic.py in _get_axis(self, axis)
467 @final
468 def _get_axis(self, axis: Axis) -> Index:
--> 469 axis_number = self._get_axis_number(axis)
470 assert axis_number in {0, 1}
471 return self.index if axis_number == 0 else self.columns
~/sources/official.clone/pandas/pandas/core/generic.py in _get_axis_number(cls, axis)
457 return cls._AXIS_TO_AXIS_NUMBER[axis]
458 except KeyError:
--> 459 raise ValueError(f"No axis named {axis} for object type {cls.__name__}")
460
461 @final
ValueError: No axis named 1 for object type Series
|
KeyError
|
def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
"""
Calculate datetime bounds for parsed time string and its resolution.
Parameters
----------
reso : str
Resolution provided by parsed string.
parsed : datetime
Datetime from parsed string.
Returns
-------
lower, upper: pd.Timestamp
"""
assert isinstance(reso, Resolution), (type(reso), reso)
valid_resos = {
"year",
"month",
"quarter",
"day",
"hour",
"minute",
"second",
"minute",
"second",
"millisecond",
"microsecond",
}
if reso.attrname not in valid_resos:
raise KeyError
grp = reso.freq_group
per = Period(parsed, freq=grp.value)
start, end = per.start_time, per.end_time
# GH 24076
# If an incoming date string contained a UTC offset, need to localize
# the parsed date to this offset first before aligning with the index's
# timezone
if parsed.tzinfo is not None:
if self.tz is None:
raise ValueError(
"The index must be timezone aware when indexing "
"with a date string with a UTC offset"
)
start = start.tz_localize(parsed.tzinfo).tz_convert(self.tz)
end = end.tz_localize(parsed.tzinfo).tz_convert(self.tz)
elif self.tz is not None:
start = start.tz_localize(self.tz)
end = end.tz_localize(self.tz)
return start, end
|
def _parsed_string_to_bounds(self, reso: Resolution, parsed: datetime):
"""
Calculate datetime bounds for parsed time string and its resolution.
Parameters
----------
reso : str
Resolution provided by parsed string.
parsed : datetime
Datetime from parsed string.
Returns
-------
lower, upper: pd.Timestamp
"""
assert isinstance(reso, Resolution), (type(reso), reso)
valid_resos = {
"year",
"month",
"quarter",
"day",
"hour",
"minute",
"second",
"minute",
"second",
"microsecond",
}
if reso.attrname not in valid_resos:
raise KeyError
grp = reso.freq_group
per = Period(parsed, freq=grp.value)
start, end = per.start_time, per.end_time
# GH 24076
# If an incoming date string contained a UTC offset, need to localize
# the parsed date to this offset first before aligning with the index's
# timezone
if parsed.tzinfo is not None:
if self.tz is None:
raise ValueError(
"The index must be timezone aware when indexing "
"with a date string with a UTC offset"
)
start = start.tz_localize(parsed.tzinfo).tz_convert(self.tz)
end = end.tz_localize(parsed.tzinfo).tz_convert(self.tz)
elif self.tz is not None:
start = start.tz_localize(self.tz)
end = end.tz_localize(self.tz)
return start, end
|
https://github.com/pandas-dev/pandas/issues/33589
|
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
<ipython-input-20-482a9c5e8c58> in <module>
----> 1 df['2017-10-25T16:25:04.252':'2017-10-25T16:50:05.237']
/opt/conda/lib/python3.7/site-packages/pandas/core/frame.py in __getitem__(self, key)
2777
2778 # Do we have a slicer (on rows)?
-> 2779 indexer = convert_to_index_sliceable(self, key)
2780 if indexer is not None:
2781 # either we have a slice or we have a string that can be converted
/opt/conda/lib/python3.7/site-packages/pandas/core/indexing.py in convert_to_index_sliceable(obj, key)
2265 idx = obj.index
2266 if isinstance(key, slice):
-> 2267 return idx._convert_slice_indexer(key, kind="getitem")
2268
2269 elif isinstance(key, str):
/opt/conda/lib/python3.7/site-packages/pandas/core/indexes/base.py in _convert_slice_indexer(self, key, kind)
2960 indexer = key
2961 else:
-> 2962 indexer = self.slice_indexer(start, stop, step, kind=kind)
2963
2964 return indexer
/opt/conda/lib/python3.7/site-packages/pandas/core/indexes/datetimes.py in slice_indexer(self, start, end, step, kind)
823 mask = True
824 if start is not None:
--> 825 start_casted = self._maybe_cast_slice_bound(start, "left", kind)
826 mask = start_casted <= self
827
/opt/conda/lib/python3.7/site-packages/pandas/core/indexes/datetimes.py in _maybe_cast_slice_bound(self, label, side, kind)
761 freq = getattr(self, "freqstr", getattr(self, "inferred_freq", None))
762 _, parsed, reso = parsing.parse_time_string(label, freq)
--> 763 lower, upper = self._parsed_string_to_bounds(reso, parsed)
764 # lower, upper form the half-open interval:
765 # [parsed, parsed + 1 freq)
/opt/conda/lib/python3.7/site-packages/pandas/core/indexes/datetimes.py in _parsed_string_to_bounds(self, reso, parsed)
517 }
518 if reso not in valid_resos:
--> 519 raise KeyError
520 if reso == "year":
521 start = Timestamp(parsed.year, 1, 1)
KeyError:
|
KeyError
|
def value_counts(
self, normalize=False, sort=True, ascending=False, bins=None, dropna=True
):
from pandas.core.reshape.merge import get_join_indexers
from pandas.core.reshape.tile import cut
ids, _, _ = self.grouper.group_info
val = self.obj._values
def apply_series_value_counts():
return self.apply(
Series.value_counts,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins,
)
if bins is not None:
if not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return apply_series_value_counts()
elif is_categorical_dtype(val):
# GH38672
return apply_series_value_counts()
# groupby removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algorithms.factorize(val, sort=True)
llab = lambda lab, inc: lab[inc]
else:
# lab is a Categorical with categories an IntervalIndex
lab = cut(Series(val), bins, include_lowest=True)
lev = lab.cat.categories
lab = lev.take(lab.cat.codes, allow_fill=True, fill_value=lev._na_value)
llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
if is_interval_dtype(lab.dtype):
# TODO: should we do this inside II?
sorter = np.lexsort((lab.left, lab.right, ids))
else:
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0]
idx = np.r_[0, idchanges]
if not len(ids):
idx = idchanges
# new values are where sorted labels change
lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
inc = np.r_[True, lchanges]
if not len(lchanges):
inc = lchanges
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
codes = self.grouper.reconstructed_codes
codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)]
levels = [ping.group_index for ping in self.grouper.groupings] + [lev]
names = self.grouper.names + [self._selection_name]
if dropna:
mask = codes[-1] != -1
if mask.all():
dropna = False
else:
out, codes = out[mask], [level_codes[mask] for level_codes in codes]
if normalize:
out = out.astype("float")
d = np.diff(np.r_[idx, len(ids)])
if dropna:
m = ids[lab == -1]
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if dropna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, codes[-1] = out[sorter], codes[-1][sorter]
if bins is None:
mi = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False)
if is_integer_dtype(out):
out = ensure_int64(out)
return self.obj._constructor(out, index=mi, name=self._selection_name)
# for compat. with libgroupby.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(len(out), dtype="bool")
for level_codes in codes[:-1]:
diff |= np.r_[True, level_codes[1:] != level_codes[:-1]]
ncat, nbin = diff.sum(), len(levels[-1])
left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]
right = [diff.cumsum() - 1, codes[-1]]
_, idx = get_join_indexers(left, right, sort=False, how="left")
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
def build_codes(lev_codes: np.ndarray) -> np.ndarray:
return np.repeat(lev_codes[diff], nbin)
codes = [build_codes(lev_codes) for lev_codes in codes[:-1]]
codes.append(left[-1])
mi = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False)
if is_integer_dtype(out):
out = ensure_int64(out)
return self.obj._constructor(out, index=mi, name=self._selection_name)
|
def value_counts(
self, normalize=False, sort=True, ascending=False, bins=None, dropna=True
):
from pandas.core.reshape.merge import get_join_indexers
from pandas.core.reshape.tile import cut
ids, _, _ = self.grouper.group_info
val = self.obj._values
def apply_series_value_counts():
return self.apply(
Series.value_counts,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins,
)
if bins is not None:
if not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return apply_series_value_counts()
elif is_categorical_dtype(val):
# GH38672
return apply_series_value_counts()
# groupby removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algorithms.factorize(val, sort=True)
llab = lambda lab, inc: lab[inc]
else:
# lab is a Categorical with categories an IntervalIndex
lab = cut(Series(val), bins, include_lowest=True)
lev = lab.cat.categories
lab = lev.take(lab.cat.codes, allow_fill=True, fill_value=lev._na_value)
llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
if is_interval_dtype(lab.dtype):
# TODO: should we do this inside II?
sorter = np.lexsort((lab.left, lab.right, ids))
else:
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
# new values are where sorted labels change
lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
inc = np.r_[True, lchanges]
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
codes = self.grouper.reconstructed_codes
codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)]
levels = [ping.group_index for ping in self.grouper.groupings] + [lev]
names = self.grouper.names + [self._selection_name]
if dropna:
mask = codes[-1] != -1
if mask.all():
dropna = False
else:
out, codes = out[mask], [level_codes[mask] for level_codes in codes]
if normalize:
out = out.astype("float")
d = np.diff(np.r_[idx, len(ids)])
if dropna:
m = ids[lab == -1]
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if dropna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, codes[-1] = out[sorter], codes[-1][sorter]
if bins is None:
mi = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False)
if is_integer_dtype(out):
out = ensure_int64(out)
return self.obj._constructor(out, index=mi, name=self._selection_name)
# for compat. with libgroupby.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(len(out), dtype="bool")
for level_codes in codes[:-1]:
diff |= np.r_[True, level_codes[1:] != level_codes[:-1]]
ncat, nbin = diff.sum(), len(levels[-1])
left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]
right = [diff.cumsum() - 1, codes[-1]]
_, idx = get_join_indexers(left, right, sort=False, how="left")
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
def build_codes(lev_codes: np.ndarray) -> np.ndarray:
return np.repeat(lev_codes[diff], nbin)
codes = [build_codes(lev_codes) for lev_codes in codes[:-1]]
codes.append(left[-1])
mi = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False)
if is_integer_dtype(out):
out = ensure_int64(out)
return self.obj._constructor(out, index=mi, name=self._selection_name)
|
https://github.com/pandas-dev/pandas/issues/39172
|
Traceback (most recent call last):
File "<input>", line 1, in <module>
File "/Users/username/.virtualenvs/my_project/lib/python3.8/site-packages/pandas/core/groupby/generic.py", line 736, in value_counts
codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)]
File "/Users/username/.virtualenvs/my_project/lib/python3.8/site-packages/pandas/core/groupby/generic.py", line 705, in <lambda>
llab = lambda lab, inc: lab[inc]
IndexError: boolean index did not match indexed array along dimension 0; dimension is 0 but corresponding boolean dimension is 1
|
IndexError
|
def __setitem__(self, key, value):
key = com.apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
# either we have a slice or we have a string that can be converted
# to a slice for partial-string date indexing
return self._setitem_slice(indexer, value)
if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2:
self._setitem_frame(key, value)
elif isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
elif isinstance(value, DataFrame):
self._set_item_frame_value(key, value)
elif is_list_like(value) and 1 < len(self.columns.get_indexer_for([key])) == len(
value
):
# Column to set is duplicated
self._setitem_array([key], value)
else:
# set column
self._set_item(key, value)
|
def __setitem__(self, key, value):
key = com.apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
# either we have a slice or we have a string that can be converted
# to a slice for partial-string date indexing
return self._setitem_slice(indexer, value)
if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2:
self._setitem_frame(key, value)
elif isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
elif isinstance(value, DataFrame):
self._set_item_frame_value(key, value)
else:
# set column
self._set_item(key, value)
|
https://github.com/pandas-dev/pandas/issues/15695
|
In [2]: df = pd.DataFrame(index=range(3), columns=['A', 'B', 'C', 'D', 'E', 'F'])
In [3]: df.loc[0, ['A', 'D']] = (1,2)
In [4]: df.loc[:, ['B', 'E']] = (1,2)
In [5]: df[['C', 'F']] = (1,2)
In [6]: df
Out[6]:
A B C D E F
0 1 1 1 2 2 2
1 NaN 1 1 NaN 2 2
2 NaN 1 1 NaN 2 2
In [7]: dfdup = pd.DataFrame(index=range(3), columns=['A', 'B', 'C']*2)
In [8]: dfdup.loc[0, 'A'] = (1,2) # Works
In [9]: dfdup.loc[:, 'B'] = (1,2) # Works
In [10]: dfdup['C'] = (1,2) # Fails
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-10-17d5611af828> in <module>()
----> 1 dfdup['C'] = (1,2)
/home/pietro/nobackup/repo/pandas/pandas/core/frame.py in __setitem__(self, key, value)
2421 else:
2422 # set column
-> 2423 self._set_item(key, value)
2424
2425 def _setitem_slice(self, key, value):
/home/pietro/nobackup/repo/pandas/pandas/core/frame.py in _set_item(self, key, value)
2487
2488 self._ensure_valid_index(value)
-> 2489 value = self._sanitize_column(key, value)
2490 NDFrame._set_item(self, key, value)
2491
/home/pietro/nobackup/repo/pandas/pandas/core/frame.py in _sanitize_column(self, key, value, broadcast)
2658
2659 # turn me into an ndarray
-> 2660 value = _sanitize_index(value, self.index, copy=False)
2661 if not isinstance(value, (np.ndarray, Index)):
2662 if isinstance(value, list) and len(value) > 0:
/home/pietro/nobackup/repo/pandas/pandas/core/series.py in _sanitize_index(data, index, copy)
2847
2848 if len(data) != len(index):
-> 2849 raise ValueError('Length of values does not match length of ' 'index')
2850
2851 if isinstance(data, PeriodIndex):
ValueError: Length of values does not match length of index
In [11]: dfdup
Out[11]:
A B C A B C
0 1 1 NaN 2 2 NaN
1 NaN 1 NaN NaN 2 NaN
2 NaN 1 NaN NaN 2 NaN
|
ValueError
|
def _setitem_single_block(self, indexer, value, name: str):
"""
_setitem_with_indexer for the case when we have a single Block.
"""
from pandas import Series
info_axis = self.obj._info_axis_number
item_labels = self.obj._get_axis(info_axis)
if isinstance(indexer, tuple):
# if we are setting on the info axis ONLY
# set using those methods to avoid block-splitting
# logic here
if (
len(indexer) > info_axis
and is_integer(indexer[info_axis])
and all(
com.is_null_slice(idx)
for i, idx in enumerate(indexer)
if i != info_axis
)
):
selected_item_labels = item_labels[indexer[info_axis]]
if len(item_labels.get_indexer_for([selected_item_labels])) == 1:
self.obj[selected_item_labels] = value
return
indexer = maybe_convert_ix(*indexer)
if (isinstance(value, ABCSeries) and name != "iloc") or isinstance(value, dict):
# TODO(EA): ExtensionBlock.setitem this causes issues with
# setting for extensionarrays that store dicts. Need to decide
# if it's worth supporting that.
value = self._align_series(indexer, Series(value))
elif isinstance(value, ABCDataFrame) and name != "iloc":
value = self._align_frame(indexer, value)
# check for chained assignment
self.obj._check_is_chained_assignment_possible()
# actually do the set
self.obj._mgr = self.obj._mgr.setitem(indexer=indexer, value=value)
self.obj._maybe_update_cacher(clear=True)
|
def _setitem_single_block(self, indexer, value, name: str):
"""
_setitem_with_indexer for the case when we have a single Block.
"""
from pandas import Series
info_axis = self.obj._info_axis_number
item_labels = self.obj._get_axis(info_axis)
if isinstance(indexer, tuple):
# if we are setting on the info axis ONLY
# set using those methods to avoid block-splitting
# logic here
if (
len(indexer) > info_axis
and is_integer(indexer[info_axis])
and all(
com.is_null_slice(idx)
for i, idx in enumerate(indexer)
if i != info_axis
)
and item_labels.is_unique
):
self.obj[item_labels[indexer[info_axis]]] = value
return
indexer = maybe_convert_ix(*indexer)
if (isinstance(value, ABCSeries) and name != "iloc") or isinstance(value, dict):
# TODO(EA): ExtensionBlock.setitem this causes issues with
# setting for extensionarrays that store dicts. Need to decide
# if it's worth supporting that.
value = self._align_series(indexer, Series(value))
elif isinstance(value, ABCDataFrame) and name != "iloc":
value = self._align_frame(indexer, value)
# check for chained assignment
self.obj._check_is_chained_assignment_possible()
# actually do the set
self.obj._mgr = self.obj._mgr.setitem(indexer=indexer, value=value)
self.obj._maybe_update_cacher(clear=True)
|
https://github.com/pandas-dev/pandas/issues/38521
|
Traceback (most recent call last):
File "c:\Users\leona\pandas\main.py", line 3, in <module>
df.loc[:, 'a'] = list(range(5))
File "c:\Users\leona\pandas\pandas\core\indexing.py", line 691, in __setitem__
iloc._setitem_with_indexer(indexer, value, self.name)
File "c:\Users\leona\pandas\pandas\core\indexing.py", line 1636, in _setitem_with_indexer
self._setitem_single_block(indexer, value, name)
File "c:\Users\leona\pandas\pandas\core\indexing.py", line 1862, in _setitem_single_block
self.obj._mgr = self.obj._mgr.setitem(indexer=indexer, value=value)
File "c:\Users\leona\pandas\pandas\core\internals\managers.py", line 565, in setitem
return self.apply("setitem", indexer=indexer, value=value)
File "c:\Users\leona\pandas\pandas\core\internals\managers.py", line 428, in apply
applied = getattr(b, f)(**kwargs)
File "c:\Users\leon\pandas\pandas\core\internals\blocks.py", line 1022, in setitem
values[indexer] = value
ValueError: cannot copy sequence with size 5 to array axis with dimension 0
|
ValueError
|
def __init__(self, f: Union[FilePathOrBuffer, List], **kwds):
"""
Workhorse function for processing nested list into DataFrame
"""
ParserBase.__init__(self, kwds)
self.data: Optional[Iterator[str]] = None
self.buf: List = []
self.pos = 0
self.line_pos = 0
self.skiprows = kwds["skiprows"]
if callable(self.skiprows):
self.skipfunc = self.skiprows
else:
self.skipfunc = lambda x: x in self.skiprows
self.skipfooter = _validate_skipfooter_arg(kwds["skipfooter"])
self.delimiter = kwds["delimiter"]
self.quotechar = kwds["quotechar"]
if isinstance(self.quotechar, str):
self.quotechar = str(self.quotechar)
self.escapechar = kwds["escapechar"]
self.doublequote = kwds["doublequote"]
self.skipinitialspace = kwds["skipinitialspace"]
self.lineterminator = kwds["lineterminator"]
self.quoting = kwds["quoting"]
self.usecols, _ = _validate_usecols_arg(kwds["usecols"])
self.skip_blank_lines = kwds["skip_blank_lines"]
self.warn_bad_lines = kwds["warn_bad_lines"]
self.error_bad_lines = kwds["error_bad_lines"]
self.names_passed = kwds["names"] or None
self.has_index_names = False
if "has_index_names" in kwds:
self.has_index_names = kwds["has_index_names"]
self.verbose = kwds["verbose"]
self.converters = kwds["converters"]
self.dtype = kwds["dtype"]
self.thousands = kwds["thousands"]
self.decimal = kwds["decimal"]
self.comment = kwds["comment"]
# Set self.data to something that can read lines.
if isinstance(f, list):
# read_excel: f is a list
self.data = cast(Iterator[str], f)
else:
self._open_handles(f, kwds)
assert self.handles is not None
assert hasattr(self.handles.handle, "readline")
try:
self._make_reader(self.handles.handle)
except (csv.Error, UnicodeDecodeError):
self.close()
raise
# Get columns in two steps: infer from data, then
# infer column indices from self.usecols if it is specified.
self._col_indices: Optional[List[int]] = None
try:
(
self.columns,
self.num_original_columns,
self.unnamed_cols,
) = self._infer_columns()
except (TypeError, ValueError):
self.close()
raise
# Now self.columns has the set of columns that we will process.
# The original set is stored in self.original_columns.
if len(self.columns) > 1:
# we are processing a multi index column
(
self.columns,
self.index_names,
self.col_names,
_,
) = self._extract_multi_indexer_columns(
self.columns, self.index_names, self.col_names
)
# Update list of original names to include all indices.
self.num_original_columns = len(self.columns)
else:
self.columns = self.columns[0]
# get popped off for index
self.orig_names = list(self.columns)
# needs to be cleaned/refactored
# multiple date column thing turning into a real spaghetti factory
if not self._has_complex_date_col:
(index_names, self.orig_names, self.columns) = self._get_index_name(
self.columns
)
self._name_processed = True
if self.index_names is None:
self.index_names = index_names
if self._col_indices is None:
self._col_indices = list(range(len(self.columns)))
self._validate_parse_dates_presence(self.columns)
if self.parse_dates:
self._no_thousands_columns = self._set_no_thousands_columns()
else:
self._no_thousands_columns = None
if len(self.decimal) != 1:
raise ValueError("Only length-1 decimal markers supported")
decimal = re.escape(self.decimal)
if self.thousands is None:
regex = rf"^[\-\+]?[0-9]*({decimal}[0-9]*)?([0-9]?(E|e)\-?[0-9]+)?$"
else:
thousands = re.escape(self.thousands)
regex = (
rf"^[\-\+]?([0-9]+{thousands}|[0-9])*({decimal}[0-9]*)?"
rf"([0-9]?(E|e)\-?[0-9]+)?$"
)
self.num = re.compile(regex)
|
def __init__(self, f: Union[FilePathOrBuffer, List], **kwds):
"""
Workhorse function for processing nested list into DataFrame
"""
ParserBase.__init__(self, kwds)
self.data: Optional[Iterator[str]] = None
self.buf: List = []
self.pos = 0
self.line_pos = 0
self.skiprows = kwds["skiprows"]
if callable(self.skiprows):
self.skipfunc = self.skiprows
else:
self.skipfunc = lambda x: x in self.skiprows
self.skipfooter = _validate_skipfooter_arg(kwds["skipfooter"])
self.delimiter = kwds["delimiter"]
self.quotechar = kwds["quotechar"]
if isinstance(self.quotechar, str):
self.quotechar = str(self.quotechar)
self.escapechar = kwds["escapechar"]
self.doublequote = kwds["doublequote"]
self.skipinitialspace = kwds["skipinitialspace"]
self.lineterminator = kwds["lineterminator"]
self.quoting = kwds["quoting"]
self.usecols, _ = _validate_usecols_arg(kwds["usecols"])
self.skip_blank_lines = kwds["skip_blank_lines"]
self.warn_bad_lines = kwds["warn_bad_lines"]
self.error_bad_lines = kwds["error_bad_lines"]
self.names_passed = kwds["names"] or None
self.has_index_names = False
if "has_index_names" in kwds:
self.has_index_names = kwds["has_index_names"]
self.verbose = kwds["verbose"]
self.converters = kwds["converters"]
self.dtype = kwds["dtype"]
self.thousands = kwds["thousands"]
self.decimal = kwds["decimal"]
self.comment = kwds["comment"]
# Set self.data to something that can read lines.
if isinstance(f, list):
# read_excel: f is a list
self.data = cast(Iterator[str], f)
else:
self._open_handles(f, kwds)
assert self.handles is not None
assert hasattr(self.handles.handle, "readline")
self._make_reader(self.handles.handle)
# Get columns in two steps: infer from data, then
# infer column indices from self.usecols if it is specified.
self._col_indices: Optional[List[int]] = None
try:
(
self.columns,
self.num_original_columns,
self.unnamed_cols,
) = self._infer_columns()
except (TypeError, ValueError):
self.close()
raise
# Now self.columns has the set of columns that we will process.
# The original set is stored in self.original_columns.
if len(self.columns) > 1:
# we are processing a multi index column
(
self.columns,
self.index_names,
self.col_names,
_,
) = self._extract_multi_indexer_columns(
self.columns, self.index_names, self.col_names
)
# Update list of original names to include all indices.
self.num_original_columns = len(self.columns)
else:
self.columns = self.columns[0]
# get popped off for index
self.orig_names = list(self.columns)
# needs to be cleaned/refactored
# multiple date column thing turning into a real spaghetti factory
if not self._has_complex_date_col:
(index_names, self.orig_names, self.columns) = self._get_index_name(
self.columns
)
self._name_processed = True
if self.index_names is None:
self.index_names = index_names
if self._col_indices is None:
self._col_indices = list(range(len(self.columns)))
self._validate_parse_dates_presence(self.columns)
if self.parse_dates:
self._no_thousands_columns = self._set_no_thousands_columns()
else:
self._no_thousands_columns = None
if len(self.decimal) != 1:
raise ValueError("Only length-1 decimal markers supported")
decimal = re.escape(self.decimal)
if self.thousands is None:
regex = rf"^[\-\+]?[0-9]*({decimal}[0-9]*)?([0-9]?(E|e)\-?[0-9]+)?$"
else:
thousands = re.escape(self.thousands)
regex = (
rf"^[\-\+]?([0-9]+{thousands}|[0-9])*({decimal}[0-9]*)?"
rf"([0-9]?(E|e)\-?[0-9]+)?$"
)
self.num = re.compile(regex)
|
https://github.com/pandas-dev/pandas/issues/39024
|
Traceback (most recent call last):
File "..\scratch\pandas_file_handle.py", line 19, in <module>
dataframe = pandas.read_csv(csv_file, sep=None)
File "C:\Users\dmf\projects\invest\env\lib\site-packages\pandas\io\parsers.py", line 605, in read_csv
return _read(filepath_or_buffer, kwds)
File "C:\Users\dmf\projects\invest\env\lib\site-packages\pandas\io\parsers.py", line 457, in _read
parser = TextFileReader(filepath_or_buffer, **kwds)
File "C:\Users\dmf\projects\invest\env\lib\site-packages\pandas\io\parsers.py", line 814, in __init__
self._engine = self._make_engine(self.engine)
File "C:\Users\dmf\projects\invest\env\lib\site-packages\pandas\io\parsers.py", line 1045, in _make_engine
return mapping[engine](self.f, **self.options) # type: ignore[call-arg]
File "C:\Users\dmf\projects\invest\env\lib\site-packages\pandas\io\parsers.py", line 2291, in __init__
self._make_reader(self.handles.handle)
File "C:\Users\dmf\projects\invest\env\lib\site-packages\pandas\io\parsers.py", line 2412, in _make_reader
line = f.readline()
File "C:\Users\dmf\projects\invest\env\lib\codecs.py", line 322, in decode
(result, consumed) = self._buffer_decode(data, self.errors, final)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0xce in position 10: invalid continuation byte
|
UnicodeDecodeError
|
def _reindex_non_unique(self, target):
"""
Create a new index with target's values (move/add/delete values as
necessary) use with non-unique Index and a possibly non-unique target.
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index.
indexer : np.ndarray or None
Indices of output values in original index.
"""
target = ensure_index(target)
if len(target) == 0:
# GH#13691
return self[:0], np.array([], dtype=np.intp), None
indexer, missing = self.get_indexer_non_unique(target)
check = indexer != -1
new_labels = self.take(indexer[check])
new_indexer = None
if len(missing):
length = np.arange(len(indexer))
missing = ensure_platform_int(missing)
missing_labels = target.take(missing)
missing_indexer = ensure_int64(length[~check])
cur_labels = self.take(indexer[check]).values
cur_indexer = ensure_int64(length[check])
new_labels = np.empty((len(indexer),), dtype=object)
new_labels[cur_indexer] = cur_labels
new_labels[missing_indexer] = missing_labels
# GH#38906
if not len(self):
new_indexer = np.arange(0)
# a unique indexer
elif target.is_unique:
# see GH5553, make sure we use the right indexer
new_indexer = np.arange(len(indexer))
new_indexer[cur_indexer] = np.arange(len(cur_labels))
new_indexer[missing_indexer] = -1
# we have a non_unique selector, need to use the original
# indexer here
else:
# need to retake to have the same size as the indexer
indexer[~check] = -1
# reset the new indexer to account for the new size
new_indexer = np.arange(len(self.take(indexer)))
new_indexer[~check] = -1
if isinstance(self, ABCMultiIndex):
new_index = type(self).from_tuples(new_labels, names=self.names)
else:
new_index = Index(new_labels, name=self.name)
return new_index, indexer, new_indexer
|
def _reindex_non_unique(self, target):
"""
Create a new index with target's values (move/add/delete values as
necessary) use with non-unique Index and a possibly non-unique target.
Parameters
----------
target : an iterable
Returns
-------
new_index : pd.Index
Resulting index.
indexer : np.ndarray or None
Indices of output values in original index.
"""
target = ensure_index(target)
if len(target) == 0:
# GH#13691
return self[:0], np.array([], dtype=np.intp), None
indexer, missing = self.get_indexer_non_unique(target)
check = indexer != -1
new_labels = self.take(indexer[check])
new_indexer = None
if len(missing):
length = np.arange(len(indexer))
missing = ensure_platform_int(missing)
missing_labels = target.take(missing)
missing_indexer = ensure_int64(length[~check])
cur_labels = self.take(indexer[check]).values
cur_indexer = ensure_int64(length[check])
new_labels = np.empty((len(indexer),), dtype=object)
new_labels[cur_indexer] = cur_labels
new_labels[missing_indexer] = missing_labels
# a unique indexer
if target.is_unique:
# see GH5553, make sure we use the right indexer
new_indexer = np.arange(len(indexer))
new_indexer[cur_indexer] = np.arange(len(cur_labels))
new_indexer[missing_indexer] = -1
# we have a non_unique selector, need to use the original
# indexer here
else:
# need to retake to have the same size as the indexer
indexer[~check] = -1
# reset the new indexer to account for the new size
new_indexer = np.arange(len(self.take(indexer)))
new_indexer[~check] = -1
if isinstance(self, ABCMultiIndex):
new_index = type(self).from_tuples(new_labels, names=self.names)
else:
new_index = Index(new_labels, name=self.name)
return new_index, indexer, new_indexer
|
https://github.com/pandas-dev/pandas/issues/38906
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Users\mboling\Anaconda3\envs\pandastest\lib\site-packages\pandas\util\_decorators.py", line 312, in wrapper
return func(*args, **kwargs)
File "C:\Users\mboling\Anaconda3\envs\pandastest\lib\site-packages\pandas\core\frame.py", line 4173, in reindex
return super().reindex(**kwargs)
File "C:\Users\mboling\Anaconda3\envs\pandastest\lib\site-packages\pandas\core\generic.py", line 4806, in reindex
return self._reindex_axes(
File "C:\Users\mboling\Anaconda3\envs\pandastest\lib\site-packages\pandas\core\frame.py", line 4013, in _reindex_axes
frame = frame._reindex_columns(
File "C:\Users\mboling\Anaconda3\envs\pandastest\lib\site-packages\pandas\core\frame.py", line 4055, in _reindex_columns
new_columns, indexer = self.columns.reindex(
File "C:\Users\mboling\Anaconda3\envs\pandastest\lib\site-packages\pandas\core\indexes\category.py", line 448, in reindex
new_target, indexer, _ = result._reindex_non_unique(np.array(target))
File "C:\Users\mboling\Anaconda3\envs\pandastest\lib\site-packages\pandas\core\indexes\base.py", line 3589, in _reindex_non_unique
new_indexer = np.arange(len(self.take(indexer)))
File "C:\Users\mboling\Anaconda3\envs\pandastest\lib\site-packages\pandas\core\indexes\base.py", line 751, in take
taken = algos.take(
File "C:\Users\mboling\Anaconda3\envs\pandastest\lib\site-packages\pandas\core\algorithms.py", line 1657, in take
result = arr.take(indices, axis=axis)
IndexError: cannot do a non-empty take from an empty axes.
|
IndexError
|
def to_numeric(arg, errors="raise", downcast=None):
"""
Convert argument to a numeric type.
The default return dtype is `float64` or `int64`
depending on the data supplied. Use the `downcast` parameter
to obtain other dtypes.
Please note that precision loss may occur if really large numbers
are passed in. Due to the internal limitations of `ndarray`, if
numbers smaller than `-9223372036854775808` (np.iinfo(np.int64).min)
or larger than `18446744073709551615` (np.iinfo(np.uint64).max) are
passed in, it is very likely they will be converted to float so that
they can stored in an `ndarray`. These warnings apply similarly to
`Series` since it internally leverages `ndarray`.
Parameters
----------
arg : scalar, list, tuple, 1-d array, or Series
Argument to be converted.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception.
- If 'coerce', then invalid parsing will be set as NaN.
- If 'ignore', then invalid parsing will return the input.
downcast : {'integer', 'signed', 'unsigned', 'float'}, default None
If not None, and if the data has been successfully cast to a
numerical dtype (or if the data was numeric to begin with),
downcast that resulting data to the smallest numerical dtype
possible according to the following rules:
- 'integer' or 'signed': smallest signed int dtype (min.: np.int8)
- 'unsigned': smallest unsigned int dtype (min.: np.uint8)
- 'float': smallest float dtype (min.: np.float32)
As this behaviour is separate from the core conversion to
numeric values, any errors raised during the downcasting
will be surfaced regardless of the value of the 'errors' input.
In addition, downcasting will only occur if the size
of the resulting data's dtype is strictly larger than
the dtype it is to be cast to, so if none of the dtypes
checked satisfy that specification, no downcasting will be
performed on the data.
Returns
-------
ret
Numeric if parsing succeeded.
Return type depends on input. Series if Series, otherwise ndarray.
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
numpy.ndarray.astype : Cast a numpy array to a specified type.
DataFrame.convert_dtypes : Convert dtypes.
Examples
--------
Take separate series and convert to numeric, coercing when told to
>>> s = pd.Series(['1.0', '2', -3])
>>> pd.to_numeric(s)
0 1.0
1 2.0
2 -3.0
dtype: float64
>>> pd.to_numeric(s, downcast='float')
0 1.0
1 2.0
2 -3.0
dtype: float32
>>> pd.to_numeric(s, downcast='signed')
0 1
1 2
2 -3
dtype: int8
>>> s = pd.Series(['apple', '1.0', '2', -3])
>>> pd.to_numeric(s, errors='ignore')
0 apple
1 1.0
2 2
3 -3
dtype: object
>>> pd.to_numeric(s, errors='coerce')
0 NaN
1 1.0
2 2.0
3 -3.0
dtype: float64
Downcasting of nullable integer and floating dtypes is supported:
>>> s = pd.Series([1, 2, 3], dtype="Int64")
>>> pd.to_numeric(s, downcast="integer")
0 1
1 2
2 3
dtype: Int8
>>> s = pd.Series([1.0, 2.1, 3.0], dtype="Float64")
>>> pd.to_numeric(s, downcast="float")
0 1.0
1 2.1
2 3.0
dtype: Float32
"""
if downcast not in (None, "integer", "signed", "unsigned", "float"):
raise ValueError("invalid downcasting method provided")
if errors not in ("ignore", "raise", "coerce"):
raise ValueError("invalid error value specified")
is_series = False
is_index = False
is_scalars = False
if isinstance(arg, ABCSeries):
is_series = True
values = arg.values
elif isinstance(arg, ABCIndex):
is_index = True
if needs_i8_conversion(arg.dtype):
values = arg.asi8
else:
values = arg.values
elif isinstance(arg, (list, tuple)):
values = np.array(arg, dtype="O")
elif is_scalar(arg):
if is_decimal(arg):
return float(arg)
if is_number(arg):
return arg
is_scalars = True
values = np.array([arg], dtype="O")
elif getattr(arg, "ndim", 1) > 1:
raise TypeError("arg must be a list, tuple, 1-d array, or Series")
else:
values = arg
# GH33013: for IntegerArray & FloatingArray extract non-null values for casting
# save mask to reconstruct the full array after casting
if isinstance(values, NumericArray):
mask = values._mask
values = values._data[~mask]
else:
mask = None
values_dtype = getattr(values, "dtype", None)
if is_numeric_dtype(values_dtype):
pass
elif is_datetime_or_timedelta_dtype(values_dtype):
values = values.view(np.int64)
else:
values = ensure_object(values)
coerce_numeric = errors not in ("ignore", "raise")
try:
values = lib.maybe_convert_numeric(
values, set(), coerce_numeric=coerce_numeric
)
except (ValueError, TypeError):
if errors == "raise":
raise
# attempt downcast only if the data has been successfully converted
# to a numerical dtype and if a downcast method has been specified
if downcast is not None and is_numeric_dtype(values.dtype):
typecodes = None
if downcast in ("integer", "signed"):
typecodes = np.typecodes["Integer"]
elif downcast == "unsigned" and (not len(values) or np.min(values) >= 0):
typecodes = np.typecodes["UnsignedInteger"]
elif downcast == "float":
typecodes = np.typecodes["Float"]
# pandas support goes only to np.float32,
# as float dtypes smaller than that are
# extremely rare and not well supported
float_32_char = np.dtype(np.float32).char
float_32_ind = typecodes.index(float_32_char)
typecodes = typecodes[float_32_ind:]
if typecodes is not None:
# from smallest to largest
for dtype in typecodes:
dtype = np.dtype(dtype)
if dtype.itemsize <= values.dtype.itemsize:
values = maybe_downcast_numeric(values, dtype)
# successful conversion
if values.dtype == dtype:
break
# GH33013: for IntegerArray & FloatingArray need to reconstruct masked array
if mask is not None:
data = np.zeros(mask.shape, dtype=values.dtype)
data[~mask] = values
from pandas.core.arrays import FloatingArray, IntegerArray
klass = IntegerArray if is_integer_dtype(data.dtype) else FloatingArray
values = klass(data, mask.copy())
if is_series:
return arg._constructor(values, index=arg.index, name=arg.name)
elif is_index:
# because we want to coerce to numeric if possible,
# do not use _shallow_copy
return pd.Index(values, name=arg.name)
elif is_scalars:
return values[0]
else:
return values
|
def to_numeric(arg, errors="raise", downcast=None):
"""
Convert argument to a numeric type.
The default return dtype is `float64` or `int64`
depending on the data supplied. Use the `downcast` parameter
to obtain other dtypes.
Please note that precision loss may occur if really large numbers
are passed in. Due to the internal limitations of `ndarray`, if
numbers smaller than `-9223372036854775808` (np.iinfo(np.int64).min)
or larger than `18446744073709551615` (np.iinfo(np.uint64).max) are
passed in, it is very likely they will be converted to float so that
they can stored in an `ndarray`. These warnings apply similarly to
`Series` since it internally leverages `ndarray`.
Parameters
----------
arg : scalar, list, tuple, 1-d array, or Series
Argument to be converted.
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception.
- If 'coerce', then invalid parsing will be set as NaN.
- If 'ignore', then invalid parsing will return the input.
downcast : {'integer', 'signed', 'unsigned', 'float'}, default None
If not None, and if the data has been successfully cast to a
numerical dtype (or if the data was numeric to begin with),
downcast that resulting data to the smallest numerical dtype
possible according to the following rules:
- 'integer' or 'signed': smallest signed int dtype (min.: np.int8)
- 'unsigned': smallest unsigned int dtype (min.: np.uint8)
- 'float': smallest float dtype (min.: np.float32)
As this behaviour is separate from the core conversion to
numeric values, any errors raised during the downcasting
will be surfaced regardless of the value of the 'errors' input.
In addition, downcasting will only occur if the size
of the resulting data's dtype is strictly larger than
the dtype it is to be cast to, so if none of the dtypes
checked satisfy that specification, no downcasting will be
performed on the data.
Returns
-------
ret
Numeric if parsing succeeded.
Return type depends on input. Series if Series, otherwise ndarray.
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
numpy.ndarray.astype : Cast a numpy array to a specified type.
DataFrame.convert_dtypes : Convert dtypes.
Examples
--------
Take separate series and convert to numeric, coercing when told to
>>> s = pd.Series(['1.0', '2', -3])
>>> pd.to_numeric(s)
0 1.0
1 2.0
2 -3.0
dtype: float64
>>> pd.to_numeric(s, downcast='float')
0 1.0
1 2.0
2 -3.0
dtype: float32
>>> pd.to_numeric(s, downcast='signed')
0 1
1 2
2 -3
dtype: int8
>>> s = pd.Series(['apple', '1.0', '2', -3])
>>> pd.to_numeric(s, errors='ignore')
0 apple
1 1.0
2 2
3 -3
dtype: object
>>> pd.to_numeric(s, errors='coerce')
0 NaN
1 1.0
2 2.0
3 -3.0
dtype: float64
Downcasting of nullable integer and floating dtypes is supported:
>>> s = pd.Series([1, 2, 3], dtype="Int64")
>>> pd.to_numeric(s, downcast="integer")
0 1
1 2
2 3
dtype: Int8
>>> s = pd.Series([1.0, 2.1, 3.0], dtype="Float64")
>>> pd.to_numeric(s, downcast="float")
0 1.0
1 2.1
2 3.0
dtype: Float32
"""
if downcast not in (None, "integer", "signed", "unsigned", "float"):
raise ValueError("invalid downcasting method provided")
if errors not in ("ignore", "raise", "coerce"):
raise ValueError("invalid error value specified")
is_series = False
is_index = False
is_scalars = False
if isinstance(arg, ABCSeries):
is_series = True
values = arg.values
elif isinstance(arg, ABCIndex):
is_index = True
if needs_i8_conversion(arg.dtype):
values = arg.asi8
else:
values = arg.values
elif isinstance(arg, (list, tuple)):
values = np.array(arg, dtype="O")
elif is_scalar(arg):
if is_decimal(arg):
return float(arg)
if is_number(arg):
return arg
is_scalars = True
values = np.array([arg], dtype="O")
elif getattr(arg, "ndim", 1) > 1:
raise TypeError("arg must be a list, tuple, 1-d array, or Series")
else:
values = arg
# GH33013: for IntegerArray & FloatingArray extract non-null values for casting
# save mask to reconstruct the full array after casting
if isinstance(values, NumericArray):
mask = values._mask
values = values._data[~mask]
else:
mask = None
values_dtype = getattr(values, "dtype", None)
if is_numeric_dtype(values_dtype):
pass
elif is_datetime_or_timedelta_dtype(values_dtype):
values = values.view(np.int64)
else:
values = ensure_object(values)
coerce_numeric = errors not in ("ignore", "raise")
try:
values = lib.maybe_convert_numeric(
values, set(), coerce_numeric=coerce_numeric
)
except (ValueError, TypeError):
if errors == "raise":
raise
# attempt downcast only if the data has been successfully converted
# to a numerical dtype and if a downcast method has been specified
if downcast is not None and is_numeric_dtype(values.dtype):
typecodes = None
if downcast in ("integer", "signed"):
typecodes = np.typecodes["Integer"]
elif downcast == "unsigned" and (not len(values) or np.min(values) >= 0):
typecodes = np.typecodes["UnsignedInteger"]
elif downcast == "float":
typecodes = np.typecodes["Float"]
# pandas support goes only to np.float32,
# as float dtypes smaller than that are
# extremely rare and not well supported
float_32_char = np.dtype(np.float32).char
float_32_ind = typecodes.index(float_32_char)
typecodes = typecodes[float_32_ind:]
if typecodes is not None:
# from smallest to largest
for dtype in typecodes:
dtype = np.dtype(dtype)
if dtype.itemsize <= values.dtype.itemsize:
values = maybe_downcast_numeric(values, dtype)
# successful conversion
if values.dtype == dtype:
break
# GH33013: for IntegerArray & FloatingArray need to reconstruct masked array
if mask is not None:
data = np.zeros(mask.shape, dtype=values.dtype)
data[~mask] = values
from pandas.core.arrays import FloatingArray, IntegerArray
klass = IntegerArray if is_integer_dtype(data.dtype) else FloatingArray
values = klass(data, mask)
if is_series:
return arg._constructor(values, index=arg.index, name=arg.name)
elif is_index:
# because we want to coerce to numeric if possible,
# do not use _shallow_copy
return pd.Index(values, name=arg.name)
elif is_scalars:
return values[0]
else:
return values
|
https://github.com/pandas-dev/pandas/issues/38974
|
In [9]: import pandas as pd
...: import pandas._testing as tm
...:
...: arr = pd.array([1, 2, pd.NA], dtype="Int64")
...:
...: result = pd.to_numeric(arr, downcast="integer")
...: expected = pd.array([1, 2, pd.NA], dtype="Int8")
...: tm.assert_extension_array_equal(result, expected)
...:
...: arr[1] = pd.NA # should not modify result
...: tm.assert_extension_array_equal(result, expected)
...:
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-9-f72c43e18273> in <module>
9
10 arr[1] = pd.NA
---> 11 tm.assert_extension_array_equal(result, expected)
~/repos/pandas/pandas/_testing/asserters.py in assert_extension_array_equal(left, right, check_dtype, index_values, check_less_precise, check_exact, rtol, atol)
794 left_na = np.asarray(left.isna())
795 right_na = np.asarray(right.isna())
--> 796 assert_numpy_array_equal(
797 left_na, right_na, obj="ExtensionArray NA mask", index_values=index_values
798 )
[... skipping hidden 1 frame]
~/repos/pandas/pandas/_testing/asserters.py in _raise(left, right, err_msg)
699 diff = diff * 100.0 / left.size
700 msg = f"{obj} values are different ({np.round(diff, 5)} %)"
--> 701 raise_assert_detail(obj, msg, left, right, index_values=index_values)
702
703 raise AssertionError(err_msg)
~/repos/pandas/pandas/_testing/asserters.py in raise_assert_detail(obj, message, left, right, diff, index_values)
629 msg += f"\n[diff]: {diff}"
630
--> 631 raise AssertionError(msg)
632
633
AssertionError: ExtensionArray NA mask are different
ExtensionArray NA mask values are different (33.33333 %)
[left]: [False, True, True]
[right]: [False, False, True]
|
AssertionError
|
def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
left_has_missing = None
right_has_missing = None
keys = zip(self.join_names, self.left_on, self.right_on)
for i, (name, lname, rname) in enumerate(keys):
if not _should_fill(lname, rname):
continue
take_left, take_right = None, None
if name in result:
if left_indexer is not None and right_indexer is not None:
if name in self.left:
if left_has_missing is None:
left_has_missing = (left_indexer == -1).any()
if left_has_missing:
take_right = self.right_join_keys[i]
if not is_dtype_equal(
result[name].dtype, self.left[name].dtype
):
take_left = self.left[name]._values
elif name in self.right:
if right_has_missing is None:
right_has_missing = (right_indexer == -1).any()
if right_has_missing:
take_left = self.left_join_keys[i]
if not is_dtype_equal(
result[name].dtype, self.right[name].dtype
):
take_right = self.right[name]._values
elif left_indexer is not None and is_array_like(self.left_join_keys[i]):
take_left = self.left_join_keys[i]
take_right = self.right_join_keys[i]
if take_left is not None or take_right is not None:
if take_left is None:
lvals = result[name]._values
else:
lfill = na_value_for_dtype(take_left.dtype)
lvals = algos.take_1d(take_left, left_indexer, fill_value=lfill)
if take_right is None:
rvals = result[name]._values
else:
rfill = na_value_for_dtype(take_right.dtype)
rvals = algos.take_1d(take_right, right_indexer, fill_value=rfill)
# if we have an all missing left_indexer
# make sure to just use the right values or vice-versa
mask_left = left_indexer == -1
mask_right = right_indexer == -1
if mask_left.all():
key_col = Index(rvals)
elif right_indexer is not None and mask_right.all():
key_col = Index(lvals)
else:
key_col = Index(lvals).where(~mask_left, rvals)
if result._is_label_reference(name):
result[name] = key_col
elif result._is_level_reference(name):
if isinstance(result.index, MultiIndex):
key_col.name = name
idx_list = [
result.index.get_level_values(level_name)
if level_name != name
else key_col
for level_name in result.index.names
]
result.set_index(idx_list, inplace=True)
else:
result.index = Index(key_col, name=name)
else:
result.insert(i, name or f"key_{i}", key_col)
|
def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
left_has_missing = None
right_has_missing = None
keys = zip(self.join_names, self.left_on, self.right_on)
for i, (name, lname, rname) in enumerate(keys):
if not _should_fill(lname, rname):
continue
take_left, take_right = None, None
if name in result:
if left_indexer is not None and right_indexer is not None:
if name in self.left:
if left_has_missing is None:
left_has_missing = (left_indexer == -1).any()
if left_has_missing:
take_right = self.right_join_keys[i]
if not is_dtype_equal(
result[name].dtype, self.left[name].dtype
):
take_left = self.left[name]._values
elif name in self.right:
if right_has_missing is None:
right_has_missing = (right_indexer == -1).any()
if right_has_missing:
take_left = self.left_join_keys[i]
if not is_dtype_equal(
result[name].dtype, self.right[name].dtype
):
take_right = self.right[name]._values
elif left_indexer is not None and is_array_like(self.left_join_keys[i]):
take_left = self.left_join_keys[i]
take_right = self.right_join_keys[i]
if take_left is not None or take_right is not None:
if take_left is None:
lvals = result[name]._values
else:
lfill = na_value_for_dtype(take_left.dtype)
lvals = algos.take_1d(take_left, left_indexer, fill_value=lfill)
if take_right is None:
rvals = result[name]._values
else:
rfill = na_value_for_dtype(take_right.dtype)
rvals = algos.take_1d(take_right, right_indexer, fill_value=rfill)
# if we have an all missing left_indexer
# make sure to just use the right values or vice-versa
mask_left = left_indexer == -1
mask_right = right_indexer == -1
if mask_left.all():
key_col = rvals
elif right_indexer is not None and mask_right.all():
key_col = lvals
else:
key_col = Index(lvals).where(~mask_left, rvals)
if result._is_label_reference(name):
result[name] = key_col
elif result._is_level_reference(name):
if isinstance(result.index, MultiIndex):
key_col.name = name
idx_list = [
result.index.get_level_values(level_name)
if level_name != name
else key_col
for level_name in result.index.names
]
result.set_index(idx_list, inplace=True)
else:
result.index = Index(key_col, name=name)
else:
result.insert(i, name or f"key_{i}", key_col)
|
https://github.com/pandas-dev/pandas/issues/33814
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python3.8/site-packages/pandas/core/reshape/merge.py", line 88, in merge
return op.get_result()
File "/usr/lib/python3.8/site-packages/pandas/core/reshape/merge.py", line 668, in get_result
self._maybe_add_join_keys(result, left_indexer, right_indexer)
File "/usr/lib/python3.8/site-packages/pandas/core/reshape/merge.py", line 824, in _maybe_add_join_keys
key_col.name = name
AttributeError: 'numpy.ndarray' object has no attribute 'name'
|
AttributeError
|
def setup(self, N):
data = np.arange(N, dtype=float)
data[40] = np.nan
self.array = pd.array(data, dtype="Int64")
|
def setup(self):
N = 10**5
na = np.arange(int(N / 2))
self.left = np.concatenate([na[: int(N / 4)], na[: int(N / 4)]])
self.right = np.concatenate([na, na])
|
https://github.com/pandas-dev/pandas/issues/6963
|
In [9]: df1 = pd.DataFrame(np.random.randn(3,3), columns=['A', 'A', 'B1'])
...: df2 = pd.DataFrame(np.random.randn(3,3), columns=['A', 'A', 'B2'])
In [10]: pd.concat([df1, df2])
Traceback (most recent call last):
File "<ipython-input-10-f61a1ab4009e>", line 1, in <module>
pd.concat([df1, df2])
...
File "c:\users\vdbosscj\scipy\pandas-joris\pandas\core\index.py", line 765, in take
taken = self.view(np.ndarray).take(indexer)
IndexError: index 3 is out of bounds for axis 0 with size 3
|
IndexError
|
def get_result(self):
cons: Type[FrameOrSeriesUnion]
sample: FrameOrSeriesUnion
# series only
if self._is_series:
sample = cast("Series", self.objs[0])
# stack blocks
if self.bm_axis == 0:
name = com.consensus_name_attr(self.objs)
cons = sample._constructor
arrs = [ser._values for ser in self.objs]
res = concat_compat(arrs, axis=0)
result = cons(res, index=self.new_axes[0], name=name, dtype=res.dtype)
return result.__finalize__(self, method="concat")
# combine as columns in a frame
else:
data = dict(zip(range(len(self.objs)), self.objs))
# GH28330 Preserves subclassed objects through concat
cons = sample._constructor_expanddim
index, columns = self.new_axes
df = cons(data, index=index)
df.columns = columns
return df.__finalize__(self, method="concat")
# combine block managers
else:
sample = cast("DataFrame", self.objs[0])
mgrs_indexers = []
for obj in self.objs:
indexers = {}
for ax, new_labels in enumerate(self.new_axes):
# ::-1 to convert BlockManager ax to DataFrame ax
if ax == self.bm_axis:
# Suppress reindexing on concat axis
continue
# 1-ax to convert BlockManager axis to DataFrame axis
obj_labels = obj.axes[1 - ax]
if not new_labels.equals(obj_labels):
indexers[ax] = obj_labels.get_indexer(new_labels)
mgrs_indexers.append((obj._mgr, indexers))
new_data = concatenate_block_managers(
mgrs_indexers, self.new_axes, concat_axis=self.bm_axis, copy=self.copy
)
if not self.copy:
new_data._consolidate_inplace()
cons = sample._constructor
return cons(new_data).__finalize__(self, method="concat")
|
def get_result(self):
cons: Type[FrameOrSeriesUnion]
sample: FrameOrSeriesUnion
# series only
if self._is_series:
sample = cast("Series", self.objs[0])
# stack blocks
if self.bm_axis == 0:
name = com.consensus_name_attr(self.objs)
cons = sample._constructor
arrs = [ser._values for ser in self.objs]
res = concat_compat(arrs, axis=0)
result = cons(res, index=self.new_axes[0], name=name, dtype=res.dtype)
return result.__finalize__(self, method="concat")
# combine as columns in a frame
else:
data = dict(zip(range(len(self.objs)), self.objs))
# GH28330 Preserves subclassed objects through concat
cons = sample._constructor_expanddim
index, columns = self.new_axes
df = cons(data, index=index)
df.columns = columns
return df.__finalize__(self, method="concat")
# combine block managers
else:
sample = cast("DataFrame", self.objs[0])
mgrs_indexers = []
for obj in self.objs:
indexers = {}
for ax, new_labels in enumerate(self.new_axes):
# ::-1 to convert BlockManager ax to DataFrame ax
if ax == self.bm_axis:
# Suppress reindexing on concat axis
continue
# 1-ax to convert BlockManager axis to DataFrame axis
obj_labels = obj.axes[1 - ax]
if not new_labels.equals(obj_labels):
# We have to remove the duplicates from obj_labels
# in new labels to make them unique, otherwise we would
# duplicate or duplicates again
if not obj_labels.is_unique:
new_labels = algos.make_duplicates_of_left_unique_in_right(
np.asarray(obj_labels), np.asarray(new_labels)
)
indexers[ax] = obj_labels.reindex(new_labels)[1]
mgrs_indexers.append((obj._mgr, indexers))
new_data = concatenate_block_managers(
mgrs_indexers, self.new_axes, concat_axis=self.bm_axis, copy=self.copy
)
if not self.copy:
new_data._consolidate_inplace()
cons = sample._constructor
return cons(new_data).__finalize__(self, method="concat")
|
https://github.com/pandas-dev/pandas/issues/6963
|
In [9]: df1 = pd.DataFrame(np.random.randn(3,3), columns=['A', 'A', 'B1'])
...: df2 = pd.DataFrame(np.random.randn(3,3), columns=['A', 'A', 'B2'])
In [10]: pd.concat([df1, df2])
Traceback (most recent call last):
File "<ipython-input-10-f61a1ab4009e>", line 1, in <module>
pd.concat([df1, df2])
...
File "c:\users\vdbosscj\scipy\pandas-joris\pandas\core\index.py", line 765, in take
taken = self.view(np.ndarray).take(indexer)
IndexError: index 3 is out of bounds for axis 0 with size 3
|
IndexError
|
def astype(self, dtype, copy=True):
# Some notes on cases we don't have to handle here in the base class:
# 1. PeriodArray.astype handles period -> period
# 2. DatetimeArray.astype handles conversion between tz.
# 3. DatetimeArray.astype handles datetime -> period
dtype = pandas_dtype(dtype)
if is_object_dtype(dtype):
return self._box_values(self.asi8.ravel()).reshape(self.shape)
elif is_string_dtype(dtype) and not is_categorical_dtype(dtype):
if is_extension_array_dtype(dtype):
arr_cls = dtype.construct_array_type()
return arr_cls._from_sequence(self, dtype=dtype, copy=copy)
else:
return self._format_native_types()
elif is_integer_dtype(dtype):
# we deliberately ignore int32 vs. int64 here.
# See https://github.com/pandas-dev/pandas/issues/24381 for more.
warnings.warn(
f"casting {self.dtype} values to int64 with .astype(...) is "
"deprecated and will raise in a future version. "
"Use .view(...) instead.",
FutureWarning,
stacklevel=3,
)
values = self.asi8
if is_unsigned_integer_dtype(dtype):
# Again, we ignore int32 vs. int64
values = values.view("uint64")
if copy:
values = values.copy()
return values
elif (
is_datetime_or_timedelta_dtype(dtype) and not is_dtype_equal(self.dtype, dtype)
) or is_float_dtype(dtype):
# disallow conversion between datetime/timedelta,
# and conversions for any datetimelike to float
msg = f"Cannot cast {type(self).__name__} to dtype {dtype}"
raise TypeError(msg)
elif is_categorical_dtype(dtype):
arr_cls = dtype.construct_array_type()
return arr_cls(self, dtype=dtype)
else:
return np.asarray(self, dtype=dtype)
|
def astype(self, dtype, copy=True):
# Some notes on cases we don't have to handle here in the base class:
# 1. PeriodArray.astype handles period -> period
# 2. DatetimeArray.astype handles conversion between tz.
# 3. DatetimeArray.astype handles datetime -> period
dtype = pandas_dtype(dtype)
if is_object_dtype(dtype):
return self._box_values(self.asi8.ravel()).reshape(self.shape)
elif is_string_dtype(dtype) and not is_categorical_dtype(dtype):
if is_extension_array_dtype(dtype):
arr_cls = dtype.construct_array_type()
return arr_cls._from_sequence(self, dtype=dtype, copy=copy)
else:
return self._format_native_types()
elif is_integer_dtype(dtype):
# we deliberately ignore int32 vs. int64 here.
# See https://github.com/pandas-dev/pandas/issues/24381 for more.
values = self.asi8
if is_unsigned_integer_dtype(dtype):
# Again, we ignore int32 vs. int64
values = values.view("uint64")
if copy:
values = values.copy()
return values
elif (
is_datetime_or_timedelta_dtype(dtype) and not is_dtype_equal(self.dtype, dtype)
) or is_float_dtype(dtype):
# disallow conversion between datetime/timedelta,
# and conversions for any datetimelike to float
msg = f"Cannot cast {type(self).__name__} to dtype {dtype}"
raise TypeError(msg)
elif is_categorical_dtype(dtype):
arr_cls = dtype.construct_array_type()
return arr_cls(self, dtype=dtype)
else:
return np.asarray(self, dtype=dtype)
|
https://github.com/pandas-dev/pandas/issues/24381
|
In [10]: idx._data.astype('int32').astype("int32", casting="safe")
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-10-2c2a4a677a5c> in <module>
----> 1 idx._data.astype('int32').astype("int32", casting="safe")
TypeError: Cannot cast array from dtype('int64') to dtype('int32') according to the rule 'safe'
|
TypeError
|
def astype_nansafe(
arr: np.ndarray, dtype: DtypeObj, copy: bool = True, skipna: bool = False
) -> ArrayLike:
"""
Cast the elements of an array to a given dtype a nan-safe manner.
Parameters
----------
arr : ndarray
dtype : np.dtype or ExtensionDtype
copy : bool, default True
If False, a view will be attempted but may fail, if
e.g. the item sizes don't align.
skipna: bool, default False
Whether or not we should skip NaN when casting as a string-type.
Raises
------
ValueError
The dtype was a datetime64/timedelta64 dtype, but it had no unit.
"""
if arr.ndim > 1:
# Make sure we are doing non-copy ravel and reshape.
flags = arr.flags
flat = arr.ravel("K")
result = astype_nansafe(flat, dtype, copy=copy, skipna=skipna)
order = "F" if flags.f_contiguous else "C"
return result.reshape(arr.shape, order=order)
# We get here with 0-dim from sparse
arr = np.atleast_1d(arr)
# dispatch on extension dtype if needed
if isinstance(dtype, ExtensionDtype):
return dtype.construct_array_type()._from_sequence(arr, dtype=dtype, copy=copy)
elif not isinstance(dtype, np.dtype):
raise ValueError("dtype must be np.dtype or ExtensionDtype")
if arr.dtype.kind in ["m", "M"] and (
issubclass(dtype.type, str) or dtype == object
):
from pandas.core.construction import ensure_wrapped_if_datetimelike
arr = ensure_wrapped_if_datetimelike(arr)
return arr.astype(dtype, copy=copy)
if issubclass(dtype.type, str):
return lib.ensure_string_array(arr, skipna=skipna, convert_na_value=False)
elif is_datetime64_dtype(arr):
if dtype == np.int64:
warnings.warn(
f"casting {arr.dtype} values to int64 with .astype(...) "
"is deprecated and will raise in a future version. "
"Use .view(...) instead.",
FutureWarning,
# stacklevel chosen to be correct when reached via Series.astype
stacklevel=7,
)
if isna(arr).any():
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
# allow frequency conversions
if dtype.kind == "M":
return arr.astype(dtype)
raise TypeError(f"cannot astype a datetimelike from [{arr.dtype}] to [{dtype}]")
elif is_timedelta64_dtype(arr):
if dtype == np.int64:
warnings.warn(
f"casting {arr.dtype} values to int64 with .astype(...) "
"is deprecated and will raise in a future version. "
"Use .view(...) instead.",
FutureWarning,
# stacklevel chosen to be correct when reached via Series.astype
stacklevel=7,
)
if isna(arr).any():
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
elif dtype.kind == "m":
return astype_td64_unit_conversion(arr, dtype, copy=copy)
raise TypeError(f"cannot astype a timedelta from [{arr.dtype}] to [{dtype}]")
elif np.issubdtype(arr.dtype, np.floating) and np.issubdtype(dtype, np.integer):
if not np.isfinite(arr).all():
raise ValueError("Cannot convert non-finite values (NA or inf) to integer")
elif is_object_dtype(arr):
# work around NumPy brokenness, #1987
if np.issubdtype(dtype.type, np.integer):
return lib.astype_intsafe(arr, dtype)
# if we have a datetime/timedelta array of objects
# then coerce to a proper dtype and recall astype_nansafe
elif is_datetime64_dtype(dtype):
from pandas import to_datetime
return astype_nansafe(to_datetime(arr).values, dtype, copy=copy)
elif is_timedelta64_dtype(dtype):
from pandas import to_timedelta
return astype_nansafe(to_timedelta(arr)._values, dtype, copy=copy)
if dtype.name in ("datetime64", "timedelta64"):
msg = (
f"The '{dtype.name}' dtype has no unit. Please pass in "
f"'{dtype.name}[ns]' instead."
)
raise ValueError(msg)
if copy or is_object_dtype(arr) or is_object_dtype(dtype):
# Explicit copy, or required since NumPy can't view from / to object.
return arr.astype(dtype, copy=True)
return arr.view(dtype)
|
def astype_nansafe(
arr: np.ndarray, dtype: DtypeObj, copy: bool = True, skipna: bool = False
) -> ArrayLike:
"""
Cast the elements of an array to a given dtype a nan-safe manner.
Parameters
----------
arr : ndarray
dtype : np.dtype or ExtensionDtype
copy : bool, default True
If False, a view will be attempted but may fail, if
e.g. the item sizes don't align.
skipna: bool, default False
Whether or not we should skip NaN when casting as a string-type.
Raises
------
ValueError
The dtype was a datetime64/timedelta64 dtype, but it had no unit.
"""
if arr.ndim > 1:
# Make sure we are doing non-copy ravel and reshape.
flags = arr.flags
flat = arr.ravel("K")
result = astype_nansafe(flat, dtype, copy=copy, skipna=skipna)
order = "F" if flags.f_contiguous else "C"
return result.reshape(arr.shape, order=order)
# We get here with 0-dim from sparse
arr = np.atleast_1d(arr)
# dispatch on extension dtype if needed
if isinstance(dtype, ExtensionDtype):
return dtype.construct_array_type()._from_sequence(arr, dtype=dtype, copy=copy)
elif not isinstance(dtype, np.dtype):
raise ValueError("dtype must be np.dtype or ExtensionDtype")
if arr.dtype.kind in ["m", "M"] and (
issubclass(dtype.type, str) or dtype == object
):
from pandas.core.construction import ensure_wrapped_if_datetimelike
arr = ensure_wrapped_if_datetimelike(arr)
return arr.astype(dtype, copy=copy)
if issubclass(dtype.type, str):
return lib.ensure_string_array(arr, skipna=skipna, convert_na_value=False)
elif is_datetime64_dtype(arr):
if dtype == np.int64:
if isna(arr).any():
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
# allow frequency conversions
if dtype.kind == "M":
return arr.astype(dtype)
raise TypeError(f"cannot astype a datetimelike from [{arr.dtype}] to [{dtype}]")
elif is_timedelta64_dtype(arr):
if dtype == np.int64:
if isna(arr).any():
raise ValueError("Cannot convert NaT values to integer")
return arr.view(dtype)
elif dtype.kind == "m":
return astype_td64_unit_conversion(arr, dtype, copy=copy)
raise TypeError(f"cannot astype a timedelta from [{arr.dtype}] to [{dtype}]")
elif np.issubdtype(arr.dtype, np.floating) and np.issubdtype(dtype, np.integer):
if not np.isfinite(arr).all():
raise ValueError("Cannot convert non-finite values (NA or inf) to integer")
elif is_object_dtype(arr):
# work around NumPy brokenness, #1987
if np.issubdtype(dtype.type, np.integer):
return lib.astype_intsafe(arr, dtype)
# if we have a datetime/timedelta array of objects
# then coerce to a proper dtype and recall astype_nansafe
elif is_datetime64_dtype(dtype):
from pandas import to_datetime
return astype_nansafe(to_datetime(arr).values, dtype, copy=copy)
elif is_timedelta64_dtype(dtype):
from pandas import to_timedelta
return astype_nansafe(to_timedelta(arr)._values, dtype, copy=copy)
if dtype.name in ("datetime64", "timedelta64"):
msg = (
f"The '{dtype.name}' dtype has no unit. Please pass in "
f"'{dtype.name}[ns]' instead."
)
raise ValueError(msg)
if copy or is_object_dtype(arr) or is_object_dtype(dtype):
# Explicit copy, or required since NumPy can't view from / to object.
return arr.astype(dtype, copy=True)
return arr.view(dtype)
|
https://github.com/pandas-dev/pandas/issues/24381
|
In [10]: idx._data.astype('int32').astype("int32", casting="safe")
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-10-2c2a4a677a5c> in <module>
----> 1 idx._data.astype('int32').astype("int32", casting="safe")
TypeError: Cannot cast array from dtype('int64') to dtype('int32') according to the rule 'safe'
|
TypeError
|
def _transform_general(self, func, *args, **kwargs):
"""
Transform with a non-str `func`.
"""
klass = type(self._selected_obj)
results = []
for name, group in self:
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
if isinstance(res, (DataFrame, Series)):
res = res._values
results.append(klass(res, index=group.index))
# check for empty "results" to avoid concat ValueError
if results:
from pandas.core.reshape.concat import concat
concatenated = concat(results)
result = self._set_result_index_ordered(concatenated)
else:
result = self.obj._constructor(dtype=np.float64)
# we will only try to coerce the result type if
# we have a numeric dtype, as these are *always* user-defined funcs
# the cython take a different path (and casting)
if is_numeric_dtype(result.dtype):
common_dtype = find_common_type([self._selected_obj.dtype, result.dtype])
if common_dtype is result.dtype:
result = maybe_downcast_numeric(result, self._selected_obj.dtype)
result.name = self._selected_obj.name
return result
|
def _transform_general(self, func, *args, **kwargs):
"""
Transform with a non-str `func`.
"""
klass = type(self._selected_obj)
results = []
for name, group in self:
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
if isinstance(res, (DataFrame, Series)):
res = res._values
results.append(klass(res, index=group.index))
# check for empty "results" to avoid concat ValueError
if results:
from pandas.core.reshape.concat import concat
concatenated = concat(results)
result = self._set_result_index_ordered(concatenated)
else:
result = self.obj._constructor(dtype=np.float64)
# we will only try to coerce the result type if
# we have a numeric dtype, as these are *always* user-defined funcs
# the cython take a different path (and casting)
if is_numeric_dtype(result.dtype):
common_dtype = find_common_type([self._selected_obj.dtype, result.dtype])
if common_dtype is result.dtype:
result = maybe_downcast_numeric(result, self._selected_obj.dtype)
result.name = self._selected_obj.name
result.index = self._selected_obj.index
return result
|
https://github.com/pandas-dev/pandas/issues/35612
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-4-3bae7d67a46f> in <module>
----> 1 gb['B'].transform(len)
/workspaces/pandas-arw2019/pandas/core/groupby/generic.py in transform(self, func, engine, engine_kwargs, *args, **kwargs)
487
488 if not isinstance(func, str):
--> 489 return self._transform_general(
490 func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
491 )
/workspaces/pandas-arw2019/pandas/core/groupby/generic.py in _transform_general(self, func, engine, engine_kwargs, *args, **kwargs)
556
557 result.name = self._selected_obj.name
--> 558 result.index = self._selected_obj.index
559 return result
560
/workspaces/pandas-arw2019/pandas/core/generic.py in __setattr__(self, name, value)
5167 try:
5168 object.__getattribute__(self, name)
-> 5169 return object.__setattr__(self, name, value)
5170 except AttributeError:
5171 pass
/workspaces/pandas-arw2019/pandas/_libs/properties.pyx in pandas._libs.properties.AxisProperty.__set__()
64
65 def __set__(self, obj, value):
---> 66 obj._set_axis(self.axis, value)
/workspaces/pandas-arw2019/pandas/core/series.py in _set_axis(self, axis, labels, fastpath)
422 if not fastpath:
423 # The ensure_index call above ensures we have an Index object
--> 424 self._mgr.set_axis(axis, labels)
425
426 # ndarray compatibility
/workspaces/pandas-arw2019/pandas/core/internals/managers.py in set_axis(self, axis, new_labels)
214
215 if new_len != old_len:
--> 216 raise ValueError(
217 f"Length mismatch: Expected axis has {old_len} elements, new "
218 f"values have {new_len} elements"
ValueError: Length mismatch: Expected axis has 3 elements, new values have 4 elements
|
ValueError
|
def _set_result_index_ordered(
self, result: "OutputFrameOrSeries"
) -> "OutputFrameOrSeries":
# set the result index on the passed values object and
# return the new object, xref 8046
if self.grouper.is_monotonic:
# shortcut if we have an already ordered grouper
result.set_axis(self.obj._get_axis(self.axis), axis=self.axis, inplace=True)
return result
# row order is scrambled => sort the rows by position in original index
original_positions = Index(
np.concatenate(self._get_indices(self.grouper.result_index))
)
result.set_axis(original_positions, axis=self.axis, inplace=True)
result = result.sort_index(axis=self.axis)
dropped_rows = len(result.index) < len(self.obj.index)
if dropped_rows:
# get index by slicing original index according to original positions
# slice drops attrs => use set_axis when no rows were dropped
sorted_indexer = result.index
result.index = self._selected_obj.index[sorted_indexer]
else:
result.set_axis(self.obj._get_axis(self.axis), axis=self.axis, inplace=True)
return result
|
def _set_result_index_ordered(
self, result: "OutputFrameOrSeries"
) -> "OutputFrameOrSeries":
# set the result index on the passed values object and
# return the new object, xref 8046
# the values/counts are repeated according to the group index
# shortcut if we have an already ordered grouper
if not self.grouper.is_monotonic:
index = Index(np.concatenate(self._get_indices(self.grouper.result_index)))
result.set_axis(index, axis=self.axis, inplace=True)
result = result.sort_index(axis=self.axis)
result.set_axis(self.obj._get_axis(self.axis), axis=self.axis, inplace=True)
return result
|
https://github.com/pandas-dev/pandas/issues/35612
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-4-3bae7d67a46f> in <module>
----> 1 gb['B'].transform(len)
/workspaces/pandas-arw2019/pandas/core/groupby/generic.py in transform(self, func, engine, engine_kwargs, *args, **kwargs)
487
488 if not isinstance(func, str):
--> 489 return self._transform_general(
490 func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
491 )
/workspaces/pandas-arw2019/pandas/core/groupby/generic.py in _transform_general(self, func, engine, engine_kwargs, *args, **kwargs)
556
557 result.name = self._selected_obj.name
--> 558 result.index = self._selected_obj.index
559 return result
560
/workspaces/pandas-arw2019/pandas/core/generic.py in __setattr__(self, name, value)
5167 try:
5168 object.__getattribute__(self, name)
-> 5169 return object.__setattr__(self, name, value)
5170 except AttributeError:
5171 pass
/workspaces/pandas-arw2019/pandas/_libs/properties.pyx in pandas._libs.properties.AxisProperty.__set__()
64
65 def __set__(self, obj, value):
---> 66 obj._set_axis(self.axis, value)
/workspaces/pandas-arw2019/pandas/core/series.py in _set_axis(self, axis, labels, fastpath)
422 if not fastpath:
423 # The ensure_index call above ensures we have an Index object
--> 424 self._mgr.set_axis(axis, labels)
425
426 # ndarray compatibility
/workspaces/pandas-arw2019/pandas/core/internals/managers.py in set_axis(self, axis, new_labels)
214
215 if new_len != old_len:
--> 216 raise ValueError(
217 f"Length mismatch: Expected axis has {old_len} elements, new "
218 f"values have {new_len} elements"
ValueError: Length mismatch: Expected axis has 3 elements, new values have 4 elements
|
ValueError
|
def maybe_box_datetimelike(value: Scalar, dtype: Optional[Dtype] = None) -> Scalar:
"""
Cast scalar to Timestamp or Timedelta if scalar is datetime-like
and dtype is not object.
Parameters
----------
value : scalar
dtype : Dtype, optional
Returns
-------
scalar
"""
if dtype == object:
pass
elif isinstance(value, (np.datetime64, datetime)):
value = Timestamp(value)
elif isinstance(value, (np.timedelta64, timedelta)):
value = Timedelta(value)
return value
|
def maybe_box_datetimelike(value: Scalar, dtype: Optional[Dtype] = None) -> Scalar:
"""
Cast scalar to Timestamp or Timedelta if scalar is datetime-like
and dtype is not object.
Parameters
----------
value : scalar
dtype : Dtype, optional
Returns
-------
scalar
"""
if dtype == object:
pass
elif isinstance(value, (np.datetime64, datetime)):
value = tslibs.Timestamp(value)
elif isinstance(value, (np.timedelta64, timedelta)):
value = tslibs.Timedelta(value)
return value
|
https://github.com/pandas-dev/pandas/issues/38032
|
In [31]: import pandas as pd
...: import pandas._testing as tm
...:
...: td = pd.Timedelta(nanoseconds=500)
...: ser = pd.Series({"a": td})
...: expected = pd.Series(td, index=["a"], dtype="timedelta64[ns]")
...:
...: tm.assert_series_equal(ser, expected)
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-31-a9c6a6312101> in <module>
6 expected = pd.Series(td, index=["a"], dtype="timedelta64[ns]")
7
----> 8 tm.assert_series_equal(ser, expected)
[... skipping hidden 1 frame]
~/repos/pandas/pandas/_testing.py in assert_extension_array_equal(left, right, check_dtype, index_values, check_less_precise, check_exact, rtol, atol)
1243 # Avoid slow object-dtype comparisons
1244 # np.asarray for case where we have a np.MaskedArray
-> 1245 assert_numpy_array_equal(
1246 np.asarray(left.asi8), np.asarray(right.asi8), index_values=index_values
1247 )
[... skipping hidden 1 frame]
~/repos/pandas/pandas/_testing.py in _raise(left, right, err_msg)
1155 diff = diff * 100.0 / left.size
1156 msg = f"{obj} values are different ({np.round(diff, 5)} %)"
-> 1157 raise_assert_detail(obj, msg, left, right, index_values=index_values)
1158
1159 raise AssertionError(err_msg)
~/repos/pandas/pandas/_testing.py in raise_assert_detail(obj, message, left, right, diff, index_values)
1085 msg += f"\n[diff]: {diff}"
1086
-> 1087 raise AssertionError(msg)
1088
1089
AssertionError: numpy array are different
numpy array values are different (100.0 %)
[index]: [a]
[left]: [500]
[right]: [0]
|
AssertionError
|
def maybe_cast_to_datetime(value, dtype: Optional[DtypeObj]):
"""
try to cast the array/value to a datetimelike dtype, converting float
nan to iNaT
"""
from pandas.core.tools.datetimes import to_datetime
from pandas.core.tools.timedeltas import to_timedelta
if dtype is not None:
is_datetime64 = is_datetime64_dtype(dtype)
is_datetime64tz = is_datetime64tz_dtype(dtype)
is_timedelta64 = is_timedelta64_dtype(dtype)
if is_datetime64 or is_datetime64tz or is_timedelta64:
# Force the dtype if needed.
msg = (
f"The '{dtype.name}' dtype has no unit. "
f"Please pass in '{dtype.name}[ns]' instead."
)
if is_datetime64:
# unpack e.g. SparseDtype
dtype = getattr(dtype, "subtype", dtype)
if not is_dtype_equal(dtype, DT64NS_DTYPE):
# pandas supports dtype whose granularity is less than [ns]
# e.g., [ps], [fs], [as]
if dtype <= np.dtype("M8[ns]"):
if dtype.name == "datetime64":
raise ValueError(msg)
dtype = DT64NS_DTYPE
else:
raise TypeError(
f"cannot convert datetimelike to dtype [{dtype}]"
)
elif is_datetime64tz:
# our NaT doesn't support tz's
# this will coerce to DatetimeIndex with
# a matching dtype below
if is_scalar(value) and isna(value):
value = [value]
elif is_timedelta64 and not is_dtype_equal(dtype, TD64NS_DTYPE):
# pandas supports dtype whose granularity is less than [ns]
# e.g., [ps], [fs], [as]
if dtype <= np.dtype("m8[ns]"):
if dtype.name == "timedelta64":
raise ValueError(msg)
dtype = TD64NS_DTYPE
else:
raise TypeError(f"cannot convert timedeltalike to dtype [{dtype}]")
if is_scalar(value):
value = maybe_unbox_datetimelike(value, dtype)
elif not is_sparse(value):
value = np.array(value, copy=False)
# have a scalar array-like (e.g. NaT)
if value.ndim == 0:
value = iNaT
# we have an array of datetime or timedeltas & nulls
elif np.prod(value.shape) or not is_dtype_equal(value.dtype, dtype):
try:
if is_datetime64:
value = to_datetime(value, errors="raise")
# GH 25843: Remove tz information since the dtype
# didn't specify one
if value.tz is not None:
value = value.tz_localize(None)
value = value._values
elif is_datetime64tz:
# The string check can be removed once issue #13712
# is solved. String data that is passed with a
# datetime64tz is assumed to be naive which should
# be localized to the timezone.
is_dt_string = is_string_dtype(value.dtype)
value = to_datetime(value, errors="raise").array
if is_dt_string:
# Strings here are naive, so directly localize
value = value.tz_localize(dtype.tz)
else:
# Numeric values are UTC at this point,
# so localize and convert
value = value.tz_localize("UTC").tz_convert(dtype.tz)
elif is_timedelta64:
value = to_timedelta(value, errors="raise")._values
except OutOfBoundsDatetime:
raise
except (AttributeError, ValueError, TypeError):
pass
# coerce datetimelike to object
elif is_datetime64_dtype(
getattr(value, "dtype", None)
) and not is_datetime64_dtype(dtype):
if is_object_dtype(dtype):
if value.dtype != DT64NS_DTYPE:
value = value.astype(DT64NS_DTYPE)
ints = np.asarray(value).view("i8")
return ints_to_pydatetime(ints)
# we have a non-castable dtype that was passed
raise TypeError(f"Cannot cast datetime64 to {dtype}")
else:
is_array = isinstance(value, np.ndarray)
# catch a datetime/timedelta that is not of ns variety
# and no coercion specified
if is_array and value.dtype.kind in ["M", "m"]:
dtype = value.dtype
if dtype.kind == "M" and dtype != DT64NS_DTYPE:
value = conversion.ensure_datetime64ns(value)
elif dtype.kind == "m" and dtype != TD64NS_DTYPE:
value = conversion.ensure_timedelta64ns(value)
# only do this if we have an array and the dtype of the array is not
# setup already we are not an integer/object, so don't bother with this
# conversion
elif not (
is_array
and not (
issubclass(value.dtype.type, np.integer) or value.dtype == np.object_
)
):
value = maybe_infer_to_datetimelike(value)
return value
|
def maybe_cast_to_datetime(value, dtype: Optional[DtypeObj]):
"""
try to cast the array/value to a datetimelike dtype, converting float
nan to iNaT
"""
from pandas.core.tools.datetimes import to_datetime
from pandas.core.tools.timedeltas import to_timedelta
if dtype is not None:
is_datetime64 = is_datetime64_dtype(dtype)
is_datetime64tz = is_datetime64tz_dtype(dtype)
is_timedelta64 = is_timedelta64_dtype(dtype)
if is_datetime64 or is_datetime64tz or is_timedelta64:
# Force the dtype if needed.
msg = (
f"The '{dtype.name}' dtype has no unit. "
f"Please pass in '{dtype.name}[ns]' instead."
)
if is_datetime64:
# unpack e.g. SparseDtype
dtype = getattr(dtype, "subtype", dtype)
if not is_dtype_equal(dtype, DT64NS_DTYPE):
# pandas supports dtype whose granularity is less than [ns]
# e.g., [ps], [fs], [as]
if dtype <= np.dtype("M8[ns]"):
if dtype.name == "datetime64":
raise ValueError(msg)
dtype = DT64NS_DTYPE
else:
raise TypeError(
f"cannot convert datetimelike to dtype [{dtype}]"
)
elif is_datetime64tz:
# our NaT doesn't support tz's
# this will coerce to DatetimeIndex with
# a matching dtype below
if is_scalar(value) and isna(value):
value = [value]
elif is_timedelta64 and not is_dtype_equal(dtype, TD64NS_DTYPE):
# pandas supports dtype whose granularity is less than [ns]
# e.g., [ps], [fs], [as]
if dtype <= np.dtype("m8[ns]"):
if dtype.name == "timedelta64":
raise ValueError(msg)
dtype = TD64NS_DTYPE
else:
raise TypeError(f"cannot convert timedeltalike to dtype [{dtype}]")
if is_scalar(value):
if value == iNaT or isna(value):
value = iNaT
elif not is_sparse(value):
value = np.array(value, copy=False)
# have a scalar array-like (e.g. NaT)
if value.ndim == 0:
value = iNaT
# we have an array of datetime or timedeltas & nulls
elif np.prod(value.shape) or not is_dtype_equal(value.dtype, dtype):
try:
if is_datetime64:
value = to_datetime(value, errors="raise")
# GH 25843: Remove tz information since the dtype
# didn't specify one
if value.tz is not None:
value = value.tz_localize(None)
value = value._values
elif is_datetime64tz:
# The string check can be removed once issue #13712
# is solved. String data that is passed with a
# datetime64tz is assumed to be naive which should
# be localized to the timezone.
is_dt_string = is_string_dtype(value.dtype)
value = to_datetime(value, errors="raise").array
if is_dt_string:
# Strings here are naive, so directly localize
value = value.tz_localize(dtype.tz)
else:
# Numeric values are UTC at this point,
# so localize and convert
value = value.tz_localize("UTC").tz_convert(dtype.tz)
elif is_timedelta64:
value = to_timedelta(value, errors="raise")._values
except OutOfBoundsDatetime:
raise
except (AttributeError, ValueError, TypeError):
pass
# coerce datetimelike to object
elif is_datetime64_dtype(
getattr(value, "dtype", None)
) and not is_datetime64_dtype(dtype):
if is_object_dtype(dtype):
if value.dtype != DT64NS_DTYPE:
value = value.astype(DT64NS_DTYPE)
ints = np.asarray(value).view("i8")
return ints_to_pydatetime(ints)
# we have a non-castable dtype that was passed
raise TypeError(f"Cannot cast datetime64 to {dtype}")
else:
is_array = isinstance(value, np.ndarray)
# catch a datetime/timedelta that is not of ns variety
# and no coercion specified
if is_array and value.dtype.kind in ["M", "m"]:
dtype = value.dtype
if dtype.kind == "M" and dtype != DT64NS_DTYPE:
value = conversion.ensure_datetime64ns(value)
elif dtype.kind == "m" and dtype != TD64NS_DTYPE:
value = conversion.ensure_timedelta64ns(value)
# only do this if we have an array and the dtype of the array is not
# setup already we are not an integer/object, so don't bother with this
# conversion
elif not (
is_array
and not (
issubclass(value.dtype.type, np.integer) or value.dtype == np.object_
)
):
value = maybe_infer_to_datetimelike(value)
return value
|
https://github.com/pandas-dev/pandas/issues/38032
|
In [31]: import pandas as pd
...: import pandas._testing as tm
...:
...: td = pd.Timedelta(nanoseconds=500)
...: ser = pd.Series({"a": td})
...: expected = pd.Series(td, index=["a"], dtype="timedelta64[ns]")
...:
...: tm.assert_series_equal(ser, expected)
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-31-a9c6a6312101> in <module>
6 expected = pd.Series(td, index=["a"], dtype="timedelta64[ns]")
7
----> 8 tm.assert_series_equal(ser, expected)
[... skipping hidden 1 frame]
~/repos/pandas/pandas/_testing.py in assert_extension_array_equal(left, right, check_dtype, index_values, check_less_precise, check_exact, rtol, atol)
1243 # Avoid slow object-dtype comparisons
1244 # np.asarray for case where we have a np.MaskedArray
-> 1245 assert_numpy_array_equal(
1246 np.asarray(left.asi8), np.asarray(right.asi8), index_values=index_values
1247 )
[... skipping hidden 1 frame]
~/repos/pandas/pandas/_testing.py in _raise(left, right, err_msg)
1155 diff = diff * 100.0 / left.size
1156 msg = f"{obj} values are different ({np.round(diff, 5)} %)"
-> 1157 raise_assert_detail(obj, msg, left, right, index_values=index_values)
1158
1159 raise AssertionError(err_msg)
~/repos/pandas/pandas/_testing.py in raise_assert_detail(obj, message, left, right, diff, index_values)
1085 msg += f"\n[diff]: {diff}"
1086
-> 1087 raise AssertionError(msg)
1088
1089
AssertionError: numpy array are different
numpy array values are different (100.0 %)
[index]: [a]
[left]: [500]
[right]: [0]
|
AssertionError
|
def construct_1d_arraylike_from_scalar(
value: Scalar, length: int, dtype: Optional[DtypeObj]
) -> ArrayLike:
"""
create a np.ndarray / pandas type of specified shape and dtype
filled with values
Parameters
----------
value : scalar value
length : int
dtype : pandas_dtype or np.dtype
Returns
-------
np.ndarray / pandas type of length, filled with value
"""
if dtype is None:
dtype, value = infer_dtype_from_scalar(value, pandas_dtype=True)
if is_extension_array_dtype(dtype):
cls = dtype.construct_array_type()
subarr = cls._from_sequence([value] * length, dtype=dtype)
else:
if length and is_integer_dtype(dtype) and isna(value):
# coerce if we have nan for an integer dtype
dtype = np.dtype("float64")
elif isinstance(dtype, np.dtype) and dtype.kind in ("U", "S"):
# we need to coerce to object dtype to avoid
# to allow numpy to take our string as a scalar value
dtype = np.dtype("object")
if not isna(value):
value = ensure_str(value)
elif dtype.kind in ["M", "m"]:
value = maybe_unbox_datetimelike(value, dtype)
subarr = np.empty(length, dtype=dtype)
subarr.fill(value)
return subarr
|
def construct_1d_arraylike_from_scalar(
value: Scalar, length: int, dtype: Optional[DtypeObj]
) -> ArrayLike:
"""
create a np.ndarray / pandas type of specified shape and dtype
filled with values
Parameters
----------
value : scalar value
length : int
dtype : pandas_dtype or np.dtype
Returns
-------
np.ndarray / pandas type of length, filled with value
"""
if dtype is None:
dtype, value = infer_dtype_from_scalar(value, pandas_dtype=True)
if is_extension_array_dtype(dtype):
cls = dtype.construct_array_type()
subarr = cls._from_sequence([value] * length, dtype=dtype)
else:
if length and is_integer_dtype(dtype) and isna(value):
# coerce if we have nan for an integer dtype
dtype = np.dtype("float64")
elif isinstance(dtype, np.dtype) and dtype.kind in ("U", "S"):
# we need to coerce to object dtype to avoid
# to allow numpy to take our string as a scalar value
dtype = np.dtype("object")
if not isna(value):
value = ensure_str(value)
elif dtype.kind in ["M", "m"] and is_valid_nat_for_dtype(value, dtype):
# GH36541: can't fill array directly with pd.NaT
# > np.empty(10, dtype="datetime64[64]").fill(pd.NaT)
# ValueError: cannot convert float NaN to integer
value = dtype.type("NaT", "ns")
subarr = np.empty(length, dtype=dtype)
subarr.fill(value)
return subarr
|
https://github.com/pandas-dev/pandas/issues/38032
|
In [31]: import pandas as pd
...: import pandas._testing as tm
...:
...: td = pd.Timedelta(nanoseconds=500)
...: ser = pd.Series({"a": td})
...: expected = pd.Series(td, index=["a"], dtype="timedelta64[ns]")
...:
...: tm.assert_series_equal(ser, expected)
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-31-a9c6a6312101> in <module>
6 expected = pd.Series(td, index=["a"], dtype="timedelta64[ns]")
7
----> 8 tm.assert_series_equal(ser, expected)
[... skipping hidden 1 frame]
~/repos/pandas/pandas/_testing.py in assert_extension_array_equal(left, right, check_dtype, index_values, check_less_precise, check_exact, rtol, atol)
1243 # Avoid slow object-dtype comparisons
1244 # np.asarray for case where we have a np.MaskedArray
-> 1245 assert_numpy_array_equal(
1246 np.asarray(left.asi8), np.asarray(right.asi8), index_values=index_values
1247 )
[... skipping hidden 1 frame]
~/repos/pandas/pandas/_testing.py in _raise(left, right, err_msg)
1155 diff = diff * 100.0 / left.size
1156 msg = f"{obj} values are different ({np.round(diff, 5)} %)"
-> 1157 raise_assert_detail(obj, msg, left, right, index_values=index_values)
1158
1159 raise AssertionError(err_msg)
~/repos/pandas/pandas/_testing.py in raise_assert_detail(obj, message, left, right, diff, index_values)
1085 msg += f"\n[diff]: {diff}"
1086
-> 1087 raise AssertionError(msg)
1088
1089
AssertionError: numpy array are different
numpy array values are different (100.0 %)
[index]: [a]
[left]: [500]
[right]: [0]
|
AssertionError
|
def __init__(
self,
data=None,
index: Optional[Axes] = None,
columns: Optional[Axes] = None,
dtype: Optional[Dtype] = None,
copy: bool = False,
):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._mgr
if isinstance(data, BlockManager):
if index is None and columns is None and dtype is None and copy is False:
# GH#33357 fastpath
NDFrame.__init__(self, data)
return
mgr = self._init_mgr(
data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy
)
elif isinstance(data, dict):
mgr = init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = masked_rec_array_to_mgr(data, index, columns, dtype, copy)
# a masked array
else:
data = sanitize_masked_array(data)
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
elif isinstance(data, (np.ndarray, Series, Index)):
if data.dtype.names:
data_columns = list(data.dtype.names)
data = {k: data[k] for k in data_columns}
if columns is None:
columns = data_columns
mgr = init_dict(data, index, columns, dtype=dtype)
elif getattr(data, "name", None) is not None:
mgr = init_dict({data.name: data}, index, columns, dtype=dtype)
else:
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
# For data is list-like, or Iterable (will consume into list)
elif isinstance(data, abc.Iterable) and not isinstance(data, (str, bytes)):
if not isinstance(data, (abc.Sequence, ExtensionArray)):
data = list(data)
if len(data) > 0:
if is_dataclass(data[0]):
data = dataclasses_to_dicts(data)
if is_list_like(data[0]) and getattr(data[0], "ndim", 1) == 1:
if is_named_tuple(data[0]) and columns is None:
columns = data[0]._fields
arrays, columns = to_arrays(data, columns, dtype=dtype)
columns = ensure_index(columns)
# set the index
if index is None:
if isinstance(data[0], Series):
index = get_names_from_index(data)
elif isinstance(data[0], Categorical):
index = ibase.default_index(len(data[0]))
else:
index = ibase.default_index(len(data))
mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
else:
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
else:
mgr = init_dict({}, index, columns, dtype=dtype)
# For data is scalar
else:
if index is None or columns is None:
raise ValueError("DataFrame constructor not properly called!")
if not dtype:
dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True)
# For data is a scalar extension dtype
if is_extension_array_dtype(dtype):
values = [
construct_1d_arraylike_from_scalar(data, len(index), dtype)
for _ in range(len(columns))
]
mgr = arrays_to_mgr(values, columns, index, columns, dtype=None)
else:
if dtype.kind in ["m", "M"]:
data = maybe_unbox_datetimelike(data, dtype)
# Attempt to coerce to a numpy array
try:
arr = np.array(data, dtype=dtype, copy=copy)
except (ValueError, TypeError) as err:
exc = TypeError(
"DataFrame constructor called with "
f"incompatible data and dtype: {err}"
)
raise exc from err
if arr.ndim != 0:
raise ValueError("DataFrame constructor not properly called!")
shape = (len(index), len(columns))
values = np.full(shape, arr)
mgr = init_ndarray(values, index, columns, dtype=values.dtype, copy=False)
NDFrame.__init__(self, mgr)
|
def __init__(
self,
data=None,
index: Optional[Axes] = None,
columns: Optional[Axes] = None,
dtype: Optional[Dtype] = None,
copy: bool = False,
):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._mgr
if isinstance(data, BlockManager):
if index is None and columns is None and dtype is None and copy is False:
# GH#33357 fastpath
NDFrame.__init__(self, data)
return
mgr = self._init_mgr(
data, axes={"index": index, "columns": columns}, dtype=dtype, copy=copy
)
elif isinstance(data, dict):
mgr = init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = masked_rec_array_to_mgr(data, index, columns, dtype, copy)
# a masked array
else:
data = sanitize_masked_array(data)
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
elif isinstance(data, (np.ndarray, Series, Index)):
if data.dtype.names:
data_columns = list(data.dtype.names)
data = {k: data[k] for k in data_columns}
if columns is None:
columns = data_columns
mgr = init_dict(data, index, columns, dtype=dtype)
elif getattr(data, "name", None) is not None:
mgr = init_dict({data.name: data}, index, columns, dtype=dtype)
else:
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
# For data is list-like, or Iterable (will consume into list)
elif isinstance(data, abc.Iterable) and not isinstance(data, (str, bytes)):
if not isinstance(data, (abc.Sequence, ExtensionArray)):
data = list(data)
if len(data) > 0:
if is_dataclass(data[0]):
data = dataclasses_to_dicts(data)
if is_list_like(data[0]) and getattr(data[0], "ndim", 1) == 1:
if is_named_tuple(data[0]) and columns is None:
columns = data[0]._fields
arrays, columns = to_arrays(data, columns, dtype=dtype)
columns = ensure_index(columns)
# set the index
if index is None:
if isinstance(data[0], Series):
index = get_names_from_index(data)
elif isinstance(data[0], Categorical):
index = ibase.default_index(len(data[0]))
else:
index = ibase.default_index(len(data))
mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
else:
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
else:
mgr = init_dict({}, index, columns, dtype=dtype)
# For data is scalar
else:
if index is None or columns is None:
raise ValueError("DataFrame constructor not properly called!")
if not dtype:
dtype, _ = infer_dtype_from_scalar(data, pandas_dtype=True)
# For data is a scalar extension dtype
if is_extension_array_dtype(dtype):
values = [
construct_1d_arraylike_from_scalar(data, len(index), dtype)
for _ in range(len(columns))
]
mgr = arrays_to_mgr(values, columns, index, columns, dtype=None)
else:
# Attempt to coerce to a numpy array
try:
arr = np.array(data, dtype=dtype, copy=copy)
except (ValueError, TypeError) as err:
exc = TypeError(
"DataFrame constructor called with "
f"incompatible data and dtype: {err}"
)
raise exc from err
if arr.ndim != 0:
raise ValueError("DataFrame constructor not properly called!")
shape = (len(index), len(columns))
values = np.full(shape, arr)
mgr = init_ndarray(values, index, columns, dtype=values.dtype, copy=False)
NDFrame.__init__(self, mgr)
|
https://github.com/pandas-dev/pandas/issues/38032
|
In [31]: import pandas as pd
...: import pandas._testing as tm
...:
...: td = pd.Timedelta(nanoseconds=500)
...: ser = pd.Series({"a": td})
...: expected = pd.Series(td, index=["a"], dtype="timedelta64[ns]")
...:
...: tm.assert_series_equal(ser, expected)
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-31-a9c6a6312101> in <module>
6 expected = pd.Series(td, index=["a"], dtype="timedelta64[ns]")
7
----> 8 tm.assert_series_equal(ser, expected)
[... skipping hidden 1 frame]
~/repos/pandas/pandas/_testing.py in assert_extension_array_equal(left, right, check_dtype, index_values, check_less_precise, check_exact, rtol, atol)
1243 # Avoid slow object-dtype comparisons
1244 # np.asarray for case where we have a np.MaskedArray
-> 1245 assert_numpy_array_equal(
1246 np.asarray(left.asi8), np.asarray(right.asi8), index_values=index_values
1247 )
[... skipping hidden 1 frame]
~/repos/pandas/pandas/_testing.py in _raise(left, right, err_msg)
1155 diff = diff * 100.0 / left.size
1156 msg = f"{obj} values are different ({np.round(diff, 5)} %)"
-> 1157 raise_assert_detail(obj, msg, left, right, index_values=index_values)
1158
1159 raise AssertionError(err_msg)
~/repos/pandas/pandas/_testing.py in raise_assert_detail(obj, message, left, right, diff, index_values)
1085 msg += f"\n[diff]: {diff}"
1086
-> 1087 raise AssertionError(msg)
1088
1089
AssertionError: numpy array are different
numpy array values are different (100.0 %)
[index]: [a]
[left]: [500]
[right]: [0]
|
AssertionError
|
def _should_parse_dates(self, i):
if isinstance(self.parse_dates, bool):
return self.parse_dates
else:
if self.index_names is not None:
name = self.index_names[i]
else:
name = None
j = i if self.index_col is None else self.index_col[i]
if is_scalar(self.parse_dates):
return (j == self.parse_dates) or (
name is not None and name == self.parse_dates
)
else:
return (j in self.parse_dates) or (
name is not None and name in self.parse_dates
)
|
def _should_parse_dates(self, i):
if isinstance(self.parse_dates, bool):
return self.parse_dates
else:
if self.index_names is not None:
name = self.index_names[i]
else:
name = None
j = self.index_col[i]
if is_scalar(self.parse_dates):
return (j == self.parse_dates) or (
name is not None and name == self.parse_dates
)
else:
return (j in self.parse_dates) or (
name is not None and name in self.parse_dates
)
|
https://github.com/pandas-dev/pandas/issues/33699
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-146-082b3d7afa0a> in <module>()
1 import io
2 s = """A,B,\n1,2"""
----> 3 pd.read_csv(io.StringIO(s), parse_dates=["B"], names=["B"])
/usr/lib/python3/dist-packages/pandas/io/parsers.py in parser_f(filepath_or_buffer, sep, delimiter, header, names, index_col, usecols, squeeze, prefix, mangle_dupe_cols, dtype, engine, converters, true_values, false_values, skipinitialspace, skiprows, nrows, na_values, keep_default_na, na_filter, verbose, skip_blank_lines, parse_dates, infer_datetime_format, keep_date_col, date_parser, dayfirst, iterator, chunksize, compression, thousands, decimal, lineterminator, quotechar, quoting, escapechar, comment, encoding, dialect, tupleize_cols, error_bad_lines, warn_bad_lines, skipfooter, doublequote, delim_whitespace, low_memory, memory_map, float_precision)
676 skip_blank_lines=skip_blank_lines)
677
--> 678 return _read(filepath_or_buffer, kwds)
679
680 parser_f.__name__ = name
/usr/lib/python3/dist-packages/pandas/io/parsers.py in _read(filepath_or_buffer, kwds)
444
445 try:
--> 446 data = parser.read(nrows)
447 finally:
448 parser.close()
/usr/lib/python3/dist-packages/pandas/io/parsers.py in read(self, nrows)
1034 raise ValueError('skipfooter not supported for iteration')
1035
-> 1036 ret = self._engine.read(nrows)
1037
1038 # May alter columns / col_dict
/usr/lib/python3/dist-packages/pandas/io/parsers.py in read(self, nrows)
1887
1888 values = self._maybe_parse_dates(values, i,
-> 1889 try_parse_dates=True)
1890 arrays.append(values)
1891
/usr/lib/python3/dist-packages/pandas/io/parsers.py in _maybe_parse_dates(self, values, index, try_parse_dates)
1946
1947 def _maybe_parse_dates(self, values, index, try_parse_dates=True):
-> 1948 if try_parse_dates and self._should_parse_dates(index):
1949 values = self._date_conv(values)
1950 return values
/usr/lib/python3/dist-packages/pandas/io/parsers.py in _should_parse_dates(self, i)
1319 else:
1320 name = None
-> 1321 j = self.index_col[i]
1322
1323 if is_scalar(self.parse_dates):
TypeError: 'NoneType' object is not subscriptable
|
TypeError
|
def drop(self, labels, errors: str_t = "raise"):
"""
Make new Index with passed list of labels deleted.
Parameters
----------
labels : array-like
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
Returns
-------
dropped : Index
Raises
------
KeyError
If not all of the labels are found in the selected axis
"""
arr_dtype = "object" if self.dtype == "object" else None
labels = com.index_labels_to_array(labels, dtype=arr_dtype)
indexer = self.get_indexer_for(labels)
mask = indexer == -1
if mask.any():
if errors != "ignore":
raise KeyError(f"{labels[mask]} not found in axis")
indexer = indexer[~mask]
return self.delete(indexer)
|
def drop(self, labels, errors: str_t = "raise"):
"""
Make new Index with passed list of labels deleted.
Parameters
----------
labels : array-like
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
Returns
-------
dropped : Index
Raises
------
KeyError
If not all of the labels are found in the selected axis
"""
arr_dtype = "object" if self.dtype == "object" else None
labels = com.index_labels_to_array(labels, dtype=arr_dtype)
indexer = self.get_indexer(labels)
mask = indexer == -1
if mask.any():
if errors != "ignore":
raise KeyError(f"{labels[mask]} not found in axis")
indexer = indexer[~mask]
return self.delete(indexer)
|
https://github.com/pandas-dev/pandas/issues/38051
|
index = pd.Index(range(3)).repeat(2)
index.drop(1)
Traceback (most recent call last):
[...]
pandas.errors.InvalidIndexError: Reindexing only valid with uniquely valued Index objects
|
pandas.errors.InvalidIndexError
|
def drop(self, codes, level=None, errors="raise"):
"""
Make new MultiIndex with passed list of codes deleted
Parameters
----------
codes : array-like
Must be a list of tuples when level is not specified
level : int or level name, default None
errors : str, default 'raise'
Returns
-------
dropped : MultiIndex
"""
if level is not None:
return self._drop_from_level(codes, level, errors)
if not isinstance(codes, (np.ndarray, Index)):
try:
codes = com.index_labels_to_array(codes, dtype=object)
except ValueError:
pass
inds = []
for level_codes in codes:
try:
loc = self.get_loc(level_codes)
# get_loc returns either an integer, a slice, or a boolean
# mask
if isinstance(loc, int):
inds.append(loc)
elif isinstance(loc, slice):
step = loc.step if loc.step is not None else 1
inds.extend(range(loc.start, loc.stop, step))
elif com.is_bool_indexer(loc):
if self.lexsort_depth == 0:
warnings.warn(
"dropping on a non-lexsorted multi-index "
"without a level parameter may impact performance.",
PerformanceWarning,
stacklevel=3,
)
loc = loc.nonzero()[0]
inds.extend(loc)
else:
msg = f"unsupported indexer of type {type(loc)}"
raise AssertionError(msg)
except KeyError:
if errors != "ignore":
raise
return self.delete(inds)
|
def drop(self, codes, level=None, errors="raise"):
"""
Make new MultiIndex with passed list of codes deleted
Parameters
----------
codes : array-like
Must be a list of tuples when level is not specified
level : int or level name, default None
errors : str, default 'raise'
Returns
-------
dropped : MultiIndex
"""
if level is not None:
return self._drop_from_level(codes, level, errors)
if not isinstance(codes, (np.ndarray, Index)):
try:
codes = com.index_labels_to_array(codes, dtype=object)
except ValueError:
pass
inds = []
for level_codes in codes:
try:
loc = self.get_loc(level_codes)
# get_loc returns either an integer, a slice, or a boolean
# mask
if isinstance(loc, int):
inds.append(loc)
elif isinstance(loc, slice):
inds.extend(range(loc.start, loc.stop))
elif com.is_bool_indexer(loc):
if self.lexsort_depth == 0:
warnings.warn(
"dropping on a non-lexsorted multi-index "
"without a level parameter may impact performance.",
PerformanceWarning,
stacklevel=3,
)
loc = loc.nonzero()[0]
inds.extend(loc)
else:
msg = f"unsupported indexer of type {type(loc)}"
raise AssertionError(msg)
except KeyError:
if errors != "ignore":
raise
return self.delete(inds)
|
https://github.com/pandas-dev/pandas/issues/38051
|
index = pd.Index(range(3)).repeat(2)
index.drop(1)
Traceback (most recent call last):
[...]
pandas.errors.InvalidIndexError: Reindexing only valid with uniquely valued Index objects
|
pandas.errors.InvalidIndexError
|
def _groupby_and_merge(by, on, left: "DataFrame", right: "DataFrame", merge_pieces):
"""
groupby & merge; we are always performing a left-by type operation
Parameters
----------
by: field to group
on: duplicates field
left: DataFrame
right: DataFrame
merge_pieces: function for merging
"""
pieces = []
if not isinstance(by, (list, tuple)):
by = [by]
lby = left.groupby(by, sort=False)
rby: Optional[groupby.DataFrameGroupBy] = None
# if we can groupby the rhs
# then we can get vastly better perf
try:
rby = right.groupby(by, sort=False)
except KeyError:
pass
for key, lhs in lby:
if rby is None:
rhs = right
else:
try:
rhs = right.take(rby.indices[key])
except KeyError:
# key doesn't exist in left
lcols = lhs.columns.tolist()
cols = lcols + [r for r in right.columns if r not in set(lcols)]
merged = lhs.reindex(columns=cols)
merged.index = range(len(merged))
pieces.append(merged)
continue
merged = merge_pieces(lhs, rhs)
# make sure join keys are in the merged
# TODO, should merge_pieces do this?
merged[by] = key
pieces.append(merged)
# preserve the original order
# if we have a missing piece this can be reset
from pandas.core.reshape.concat import concat
result = concat(pieces, ignore_index=True)
result = result.reindex(columns=pieces[0].columns, copy=False)
return result, lby
|
def _groupby_and_merge(by, on, left: "DataFrame", right: "DataFrame", merge_pieces):
"""
groupby & merge; we are always performing a left-by type operation
Parameters
----------
by: field to group
on: duplicates field
left: DataFrame
right: DataFrame
merge_pieces: function for merging
"""
pieces = []
if not isinstance(by, (list, tuple)):
by = [by]
lby = left.groupby(by, sort=False)
rby: Optional[groupby.DataFrameGroupBy] = None
# if we can groupby the rhs
# then we can get vastly better perf
try:
rby = right.groupby(by, sort=False)
except KeyError:
pass
for key, lhs in lby:
if rby is None:
rhs = right
else:
try:
rhs = right.take(rby.indices[key])
except KeyError:
# key doesn't exist in left
lcols = lhs.columns.tolist()
cols = lcols + [r for r in right.columns if r not in set(lcols)]
merged = lhs.reindex(columns=cols)
merged.index = range(len(merged))
pieces.append(merged)
continue
merged = merge_pieces(lhs, rhs)
# make sure join keys are in the merged
# TODO, should merge_pieces do this?
for k in by:
if k in merged:
merged[k] = key
pieces.append(merged)
# preserve the original order
# if we have a missing piece this can be reset
from pandas.core.reshape.concat import concat
result = concat(pieces, ignore_index=True)
result = result.reindex(columns=pieces[0].columns, copy=False)
return result, lby
|
https://github.com/pandas-dev/pandas/issues/35269
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/lib/python3.8/site-packages/pandas/core/reshape/merge.py", line 290, in merge_ordered
result, _ = _groupby_and_merge(
File "/usr/lib/python3.8/site-packages/pandas/core/reshape/merge.py", line 162, in _groupby_and_merge
merged[k] = key
File "/usr/lib/python3.8/site-packages/pandas/core/frame.py", line 2938, in __setitem__
self._set_item(key, value)
File "/usr/lib/python3.8/site-packages/pandas/core/frame.py", line 3000, in _set_item
value = self._sanitize_column(key, value)
File "/usr/lib/python3.8/site-packages/pandas/core/frame.py", line 3636, in _sanitize_column
value = sanitize_index(value, self.index, copy=False)
File "/usr/lib/python3.8/site-packages/pandas/core/internals/construction.py", line 611, in sanitize_index
raise ValueError("Length of values does not match length of index")
ValueError: Length of values does not match length of index
|
ValueError
|
def _convert_list_indexer(self, keyarr):
"""
we are passed a list-like indexer. Return the
indexer for matching intervals.
"""
locs = self.get_indexer_for(keyarr)
# we have missing values
if (locs == -1).any():
raise KeyError(keyarr[locs == -1].tolist())
return locs
|
def _convert_list_indexer(self, keyarr):
"""
we are passed a list-like indexer. Return the
indexer for matching intervals.
"""
locs = self.get_indexer_for(keyarr)
# we have missing values
if (locs == -1).any():
raise KeyError
return locs
|
https://github.com/pandas-dev/pandas/issues/27365
|
import numpy as np
import pandas as pd
pd.__version__
'0.25.0rc0+59.g0437f6899'
s=pd.Series(np.arange(5), pd.IntervalIndex.from_breaks(np.arange(6)))
s
(0, 1] 0
(1, 2] 1
(2, 3] 2
(3, 4] 3
(4, 5] 4
dtype: int32
s.loc[[4,5]]
(3, 4] 3
(4, 5] 4
dtype: int32
s.loc[[4,5,6]]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Users\simon\OneDrive\code\pandas-simonjayhawkins\pandas\core\indexing.py", line 1409, in __getitem__
return self._getitem_axis(maybe_callable, axis=axis)
File "C:\Users\simon\OneDrive\code\pandas-simonjayhawkins\pandas\core\indexing.py", line 1816, in _getitem_axis
return self._getitem_iterable(key, axis=axis)
File "C:\Users\simon\OneDrive\code\pandas-simonjayhawkins\pandas\core\indexing.py", line 1118, in _getitem_iterable
keyarr, indexer = self._get_listlike_indexer(key, axis, raise_missing=False)
File "C:\Users\simon\OneDrive\code\pandas-simonjayhawkins\pandas\core\indexing.py", line 1060, in _get_listlike_indexer
indexer, keyarr = ax._convert_listlike_indexer(key, kind=self.name)
File "C:\Users\simon\OneDrive\code\pandas-simonjayhawkins\pandas\core\indexes\base.py", line 3239, in _convert_listlike_indexer
indexer = self._convert_list_indexer(keyarr, kind=kind)
File "C:\Users\simon\OneDrive\code\pandas-simonjayhawkins\pandas\core\indexes\interval.py", line 626, in _convert_list_indexer
raise KeyError
KeyError
s.to_frame().loc[[4,5,6]]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "C:\Users\simon\OneDrive\code\pandas-simonjayhawkins\pandas\core\indexing.py", line 1409, in __getitem__
return self._getitem_axis(maybe_callable, axis=axis)
File "C:\Users\simon\OneDrive\code\pandas-simonjayhawkins\pandas\core\indexing.py", line 1816, in _getitem_axis
return self._getitem_iterable(key, axis=axis)
File "C:\Users\simon\OneDrive\code\pandas-simonjayhawkins\pandas\core\indexing.py", line 1118, in _getitem_iterable
keyarr, indexer = self._get_listlike_indexer(key, axis, raise_missing=False)
File "C:\Users\simon\OneDrive\code\pandas-simonjayhawkins\pandas\core\indexing.py", line 1060, in _get_listlike_indexer
indexer, keyarr = ax._convert_listlike_indexer(key, kind=self.name)
File "C:\Users\simon\OneDrive\code\pandas-simonjayhawkins\pandas\core\indexes\base.py", line 3239, in _convert_listlike_indexer
indexer = self._convert_list_indexer(keyarr, kind=kind)
File "C:\Users\simon\OneDrive\code\pandas-simonjayhawkins\pandas\core\indexes\interval.py", line 626, in _convert_list_indexer
raise KeyError
KeyError
|
KeyError
|
def _partial_tup_index(self, tup, side="left"):
if len(tup) > self.lexsort_depth:
raise UnsortedIndexError(
f"Key length ({len(tup)}) was greater than MultiIndex lexsort depth "
f"({self.lexsort_depth})"
)
n = len(tup)
start, end = 0, len(self)
zipped = zip(tup, self.levels, self.codes)
for k, (lab, lev, labs) in enumerate(zipped):
section = labs[start:end]
if lab not in lev and not isna(lab):
if not lev.is_type_compatible(lib.infer_dtype([lab], skipna=False)):
raise TypeError(f"Level type mismatch: {lab}")
# short circuit
loc = lev.searchsorted(lab, side=side)
if side == "right" and loc >= 0:
loc -= 1
return start + section.searchsorted(loc, side=side)
idx = self._get_loc_single_level_index(lev, lab)
if isinstance(idx, slice) and k < n - 1:
# Get start and end value from slice, necessary when a non-integer
# interval is given as input GH#37707
start = idx.start
end = idx.stop
elif k < n - 1:
end = start + section.searchsorted(idx, side="right")
start = start + section.searchsorted(idx, side="left")
elif isinstance(idx, slice):
idx = idx.start
return start + section.searchsorted(idx, side=side)
else:
return start + section.searchsorted(idx, side=side)
|
def _partial_tup_index(self, tup, side="left"):
if len(tup) > self.lexsort_depth:
raise UnsortedIndexError(
f"Key length ({len(tup)}) was greater than MultiIndex lexsort depth "
f"({self.lexsort_depth})"
)
n = len(tup)
start, end = 0, len(self)
zipped = zip(tup, self.levels, self.codes)
for k, (lab, lev, labs) in enumerate(zipped):
section = labs[start:end]
if lab not in lev and not isna(lab):
if not lev.is_type_compatible(lib.infer_dtype([lab], skipna=False)):
raise TypeError(f"Level type mismatch: {lab}")
# short circuit
loc = lev.searchsorted(lab, side=side)
if side == "right" and loc >= 0:
loc -= 1
return start + section.searchsorted(loc, side=side)
idx = self._get_loc_single_level_index(lev, lab)
if k < n - 1:
end = start + section.searchsorted(idx, side="right")
start = start + section.searchsorted(idx, side="left")
else:
return start + section.searchsorted(idx, side=side)
|
https://github.com/pandas-dev/pandas/issues/24263
|
In [1]: import pandas as pd
In [2]: index = pd.date_range('2001-01-01', periods=100)
In [3]: mindex = pd.MultiIndex.from_arrays([index])
In [4]: mindex.get_loc('2001-01')
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-4-1914bb512715> in <module>
----> 1 mindex.get_loc('2001-01')
~/dev/pandas/pandas/core/indexes/multi.py in get_loc(self, key, method)
2257
2258 if not isinstance(key, tuple):
-> 2259 loc = self._get_level_indexer(key, level=0)
2260 return _maybe_to_slice(loc)
2261
~/dev/pandas/pandas/core/indexes/multi.py in _get_level_indexer(self, key, level, indexer)
2525 return locs
2526
-> 2527 i = labels.searchsorted(code, side='left')
2528 j = labels.searchsorted(code, side='right')
2529 if i == j:
~/dev/pandas/pandas/util/_decorators.py in wrapper(*args, **kwargs)
175 else:
176 kwargs[new_arg_name] = new_arg_value
--> 177 return func(*args, **kwargs)
178 return wrapper
179 return _deprecate_kwarg
~/dev/pandas/pandas/core/indexes/frozen.py in searchsorted(self, value, side, sorter)
181 # xref: https://github.com/numpy/numpy/issues/5370
182 try:
--> 183 value = self.dtype.type(value)
184 except ValueError:
185 pass
TypeError: int() argument must be a string, a bytes-like object or a number, not 'slice'
|
TypeError
|
def _get_level_indexer(self, key, level: int = 0, indexer=None):
# `level` kwarg is _always_ positional, never name
# return an indexer, boolean array or a slice showing where the key is
# in the totality of values
# if the indexer is provided, then use this
level_index = self.levels[level]
level_codes = self.codes[level]
def convert_indexer(start, stop, step, indexer=indexer, codes=level_codes):
# given the inputs and the codes/indexer, compute an indexer set
# if we have a provided indexer, then this need not consider
# the entire labels set
r = np.arange(start, stop, step)
if indexer is not None and len(indexer) != len(codes):
# we have an indexer which maps the locations in the labels
# that we have already selected (and is not an indexer for the
# entire set) otherwise this is wasteful so we only need to
# examine locations that are in this set the only magic here is
# that the result are the mappings to the set that we have
# selected
from pandas import Series
mapper = Series(indexer)
indexer = codes.take(ensure_platform_int(indexer))
result = Series(Index(indexer).isin(r).nonzero()[0])
m = result.map(mapper)
m = np.asarray(m)
else:
m = np.zeros(len(codes), dtype=bool)
m[np.in1d(codes, r, assume_unique=Index(codes).is_unique)] = True
return m
if isinstance(key, slice):
# handle a slice, returning a slice if we can
# otherwise a boolean indexer
try:
if key.start is not None:
start = level_index.get_loc(key.start)
else:
start = 0
if key.stop is not None:
stop = level_index.get_loc(key.stop)
elif isinstance(start, slice):
stop = len(level_index)
else:
stop = len(level_index) - 1
step = key.step
except KeyError:
# we have a partial slice (like looking up a partial date
# string)
start = stop = level_index.slice_indexer(
key.start, key.stop, key.step, kind="loc"
)
step = start.step
if isinstance(start, slice) or isinstance(stop, slice):
# we have a slice for start and/or stop
# a partial date slicer on a DatetimeIndex generates a slice
# note that the stop ALREADY includes the stopped point (if
# it was a string sliced)
start = getattr(start, "start", start)
stop = getattr(stop, "stop", stop)
return convert_indexer(start, stop, step)
elif level > 0 or self.lexsort_depth == 0 or step is not None:
# need to have like semantics here to right
# searching as when we are using a slice
# so include the stop+1 (so we include stop)
return convert_indexer(start, stop + 1, step)
else:
# sorted, so can return slice object -> view
i = level_codes.searchsorted(start, side="left")
j = level_codes.searchsorted(stop, side="right")
return slice(i, j, step)
else:
idx = self._get_loc_single_level_index(level_index, key)
if level > 0 or self.lexsort_depth == 0:
# Desired level is not sorted
locs = np.array(level_codes == idx, dtype=bool, copy=False)
if not locs.any():
# The label is present in self.levels[level] but unused:
raise KeyError(key)
return locs
if isinstance(idx, slice):
start = idx.start
end = idx.stop
else:
start = level_codes.searchsorted(idx, side="left")
end = level_codes.searchsorted(idx, side="right")
if start == end:
# The label is present in self.levels[level] but unused:
raise KeyError(key)
return slice(start, end)
|
def _get_level_indexer(self, key, level: int = 0, indexer=None):
# `level` kwarg is _always_ positional, never name
# return an indexer, boolean array or a slice showing where the key is
# in the totality of values
# if the indexer is provided, then use this
level_index = self.levels[level]
level_codes = self.codes[level]
def convert_indexer(start, stop, step, indexer=indexer, codes=level_codes):
# given the inputs and the codes/indexer, compute an indexer set
# if we have a provided indexer, then this need not consider
# the entire labels set
r = np.arange(start, stop, step)
if indexer is not None and len(indexer) != len(codes):
# we have an indexer which maps the locations in the labels
# that we have already selected (and is not an indexer for the
# entire set) otherwise this is wasteful so we only need to
# examine locations that are in this set the only magic here is
# that the result are the mappings to the set that we have
# selected
from pandas import Series
mapper = Series(indexer)
indexer = codes.take(ensure_platform_int(indexer))
result = Series(Index(indexer).isin(r).nonzero()[0])
m = result.map(mapper)
m = np.asarray(m)
else:
m = np.zeros(len(codes), dtype=bool)
m[np.in1d(codes, r, assume_unique=Index(codes).is_unique)] = True
return m
if isinstance(key, slice):
# handle a slice, returning a slice if we can
# otherwise a boolean indexer
try:
if key.start is not None:
start = level_index.get_loc(key.start)
else:
start = 0
if key.stop is not None:
stop = level_index.get_loc(key.stop)
else:
stop = len(level_index) - 1
step = key.step
except KeyError:
# we have a partial slice (like looking up a partial date
# string)
start = stop = level_index.slice_indexer(
key.start, key.stop, key.step, kind="loc"
)
step = start.step
if isinstance(start, slice) or isinstance(stop, slice):
# we have a slice for start and/or stop
# a partial date slicer on a DatetimeIndex generates a slice
# note that the stop ALREADY includes the stopped point (if
# it was a string sliced)
start = getattr(start, "start", start)
stop = getattr(stop, "stop", stop)
return convert_indexer(start, stop, step)
elif level > 0 or self.lexsort_depth == 0 or step is not None:
# need to have like semantics here to right
# searching as when we are using a slice
# so include the stop+1 (so we include stop)
return convert_indexer(start, stop + 1, step)
else:
# sorted, so can return slice object -> view
i = level_codes.searchsorted(start, side="left")
j = level_codes.searchsorted(stop, side="right")
return slice(i, j, step)
else:
code = self._get_loc_single_level_index(level_index, key)
if level > 0 or self.lexsort_depth == 0:
# Desired level is not sorted
locs = np.array(level_codes == code, dtype=bool, copy=False)
if not locs.any():
# The label is present in self.levels[level] but unused:
raise KeyError(key)
return locs
i = level_codes.searchsorted(code, side="left")
j = level_codes.searchsorted(code, side="right")
if i == j:
# The label is present in self.levels[level] but unused:
raise KeyError(key)
return slice(i, j)
|
https://github.com/pandas-dev/pandas/issues/24263
|
In [1]: import pandas as pd
In [2]: index = pd.date_range('2001-01-01', periods=100)
In [3]: mindex = pd.MultiIndex.from_arrays([index])
In [4]: mindex.get_loc('2001-01')
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-4-1914bb512715> in <module>
----> 1 mindex.get_loc('2001-01')
~/dev/pandas/pandas/core/indexes/multi.py in get_loc(self, key, method)
2257
2258 if not isinstance(key, tuple):
-> 2259 loc = self._get_level_indexer(key, level=0)
2260 return _maybe_to_slice(loc)
2261
~/dev/pandas/pandas/core/indexes/multi.py in _get_level_indexer(self, key, level, indexer)
2525 return locs
2526
-> 2527 i = labels.searchsorted(code, side='left')
2528 j = labels.searchsorted(code, side='right')
2529 if i == j:
~/dev/pandas/pandas/util/_decorators.py in wrapper(*args, **kwargs)
175 else:
176 kwargs[new_arg_name] = new_arg_value
--> 177 return func(*args, **kwargs)
178 return wrapper
179 return _deprecate_kwarg
~/dev/pandas/pandas/core/indexes/frozen.py in searchsorted(self, value, side, sorter)
181 # xref: https://github.com/numpy/numpy/issues/5370
182 try:
--> 183 value = self.dtype.type(value)
184 except ValueError:
185 pass
TypeError: int() argument must be a string, a bytes-like object or a number, not 'slice'
|
TypeError
|
def slice_indexer(self, start=None, end=None, step=None, kind=None):
"""
Return indexer for specified label slice.
Index.slice_indexer, customized to handle time slicing.
In addition to functionality provided by Index.slice_indexer, does the
following:
- if both `start` and `end` are instances of `datetime.time`, it
invokes `indexer_between_time`
- if `start` and `end` are both either string or None perform
value-based selection in non-monotonic cases.
"""
# For historical reasons DatetimeIndex supports slices between two
# instances of datetime.time as if it were applying a slice mask to
# an array of (self.hour, self.minute, self.seconds, self.microsecond).
if isinstance(start, time) and isinstance(end, time):
if step is not None and step != 1:
raise ValueError("Must have step size of 1 with time slices")
return self.indexer_between_time(start, end)
if isinstance(start, time) or isinstance(end, time):
raise KeyError("Cannot mix time and non-time slice keys")
# Pandas supports slicing with dates, treated as datetimes at midnight.
# https://github.com/pandas-dev/pandas/issues/31501
if isinstance(start, date) and not isinstance(start, datetime):
start = datetime.combine(start, time(0, 0))
if isinstance(end, date) and not isinstance(end, datetime):
end = datetime.combine(end, time(0, 0))
try:
return Index.slice_indexer(self, start, end, step, kind=kind)
except KeyError:
# For historical reasons DatetimeIndex by default supports
# value-based partial (aka string) slices on non-monotonic arrays,
# let's try that.
if (start is None or isinstance(start, str)) and (
end is None or isinstance(end, str)
):
mask = np.array(True)
deprecation_mask = np.array(True)
if start is not None:
start_casted = self._maybe_cast_slice_bound(start, "left", kind)
mask = start_casted <= self
deprecation_mask = start_casted == self
if end is not None:
end_casted = self._maybe_cast_slice_bound(end, "right", kind)
mask = (self <= end_casted) & mask
deprecation_mask = (end_casted == self) | deprecation_mask
if not deprecation_mask.any():
warnings.warn(
"Value based partial slicing on non-monotonic DatetimeIndexes "
"with non-existing keys is deprecated and will raise a "
"KeyError in a future Version.",
FutureWarning,
stacklevel=5,
)
indexer = mask.nonzero()[0][::step]
if len(indexer) == len(self):
return slice(None)
else:
return indexer
else:
raise
|
def slice_indexer(self, start=None, end=None, step=None, kind=None):
"""
Return indexer for specified label slice.
Index.slice_indexer, customized to handle time slicing.
In addition to functionality provided by Index.slice_indexer, does the
following:
- if both `start` and `end` are instances of `datetime.time`, it
invokes `indexer_between_time`
- if `start` and `end` are both either string or None perform
value-based selection in non-monotonic cases.
"""
# For historical reasons DatetimeIndex supports slices between two
# instances of datetime.time as if it were applying a slice mask to
# an array of (self.hour, self.minute, self.seconds, self.microsecond).
if isinstance(start, time) and isinstance(end, time):
if step is not None and step != 1:
raise ValueError("Must have step size of 1 with time slices")
return self.indexer_between_time(start, end)
if isinstance(start, time) or isinstance(end, time):
raise KeyError("Cannot mix time and non-time slice keys")
# Pandas supports slicing with dates, treated as datetimes at midnight.
# https://github.com/pandas-dev/pandas/issues/31501
if isinstance(start, date) and not isinstance(start, datetime):
start = datetime.combine(start, time(0, 0))
if isinstance(end, date) and not isinstance(end, datetime):
end = datetime.combine(end, time(0, 0))
try:
return Index.slice_indexer(self, start, end, step, kind=kind)
except KeyError:
# For historical reasons DatetimeIndex by default supports
# value-based partial (aka string) slices on non-monotonic arrays,
# let's try that.
if (start is None or isinstance(start, str)) and (
end is None or isinstance(end, str)
):
mask = np.array(True)
if start is not None:
start_casted = self._maybe_cast_slice_bound(start, "left", kind)
mask = start_casted <= self
if end is not None:
end_casted = self._maybe_cast_slice_bound(end, "right", kind)
mask = (self <= end_casted) & mask
indexer = mask.nonzero()[0][::step]
if len(indexer) == len(self):
return slice(None)
else:
return indexer
else:
raise
|
https://github.com/pandas-dev/pandas/issues/18531
|
In [9]: import pandas as pd
...:
...: df1 = pd.DataFrame({"A": [1, 2, 3]},
...: index=[pd.Timestamp('2017'),
...: pd.Timestamp('2019'),
...: pd.Timestamp('2018')])
...: df2 = pd.DataFrame({"A": [1, 2, 3]},
...: index=['a', 'c', 'b'])
...:
In [10]: df1.loc['2020':'2022']
Out[10]:
Empty DataFrame
Columns: [A]
Index: []
In [11]: df2.loc['d':'e']
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/indexes/base.py in get_slice_bound(self, label, side, kind)
3664 try:
-> 3665 return self._searchsorted_monotonic(label, side)
3666 except ValueError:
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/indexes/base.py in _searchsorted_monotonic(self, label, side)
3623
-> 3624 raise ValueError('index must be monotonic increasing or decreasing')
3625
ValueError: index must be monotonic increasing or decreasing
During handling of the above exception, another exception occurred:
KeyError Traceback (most recent call last)
<ipython-input-11-e86df68316ba> in <module>()
----> 1 df2.loc['d':'e']
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/indexing.py in __getitem__(self, key)
1367
1368 maybe_callable = com._apply_if_callable(key, self.obj)
-> 1369 return self._getitem_axis(maybe_callable, axis=axis)
1370
1371 def _is_scalar_access(self, key):
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/indexing.py in _getitem_axis(self, key, axis)
1575 if isinstance(key, slice):
1576 self._has_valid_type(key, axis)
-> 1577 return self._get_slice_axis(key, axis=axis)
1578 elif is_bool_indexer(key):
1579 return self._getbool_axis(key, axis=axis)
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/indexing.py in _get_slice_axis(self, slice_obj, axis)
1400 labels = obj._get_axis(axis)
1401 indexer = labels.slice_indexer(slice_obj.start, slice_obj.stop,
-> 1402 slice_obj.step, kind=self.name)
1403
1404 if isinstance(indexer, slice):
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/indexes/base.py in slice_indexer(self, start, end, step, kind)
3529 """
3530 start_slice, end_slice = self.slice_locs(start, end, step=step,
-> 3531 kind=kind)
3532
3533 # return a slice
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/indexes/base.py in slice_locs(self, start, end, step, kind)
3730 start_slice = None
3731 if start is not None:
-> 3732 start_slice = self.get_slice_bound(start, 'left', kind)
3733 if start_slice is None:
3734 start_slice = 0
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/indexes/base.py in get_slice_bound(self, label, side, kind)
3666 except ValueError:
3667 # raise the original KeyError
-> 3668 raise err
3669
3670 if isinstance(slc, np.ndarray):
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/indexes/base.py in get_slice_bound(self, label, side, kind)
3660 # we need to look up the label
3661 try:
-> 3662 slc = self._get_loc_only_exact_matches(label)
3663 except KeyError as err:
3664 try:
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/indexes/base.py in _get_loc_only_exact_matches(self, key)
3629 get_slice_bound.
3630 """
-> 3631 return self.get_loc(key)
3632
3633 def get_slice_bound(self, label, side, kind):
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/core/indexes/base.py in get_loc(self, key, method, tolerance)
2529 return self._engine.get_loc(key)
2530 except KeyError:
-> 2531 return self._engine.get_loc(self._maybe_cast_indexer(key))
2532
2533 indexer = self.get_indexer([key], method=method, tolerance=tolerance)
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc()
137 util.set_value_at(arr, loc, value)
138
--> 139 cpdef get_loc(self, object val):
140 if is_definitely_invalid_key(val):
141 raise TypeError("'{val}' is an invalid key".format(val=val))
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/_libs/index.pyx in pandas._libs.index.IndexEngine.get_loc()
159
160 try:
--> 161 return self.mapping.get_item(val)
162 except (TypeError, ValueError):
163 raise KeyError(val)
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.PyObjectHashTable.get_item()
1263 sizeof(uint32_t)) # flags
1264
-> 1265 cpdef get_item(self, object val):
1266 cdef khiter_t k
1267 if val != val or val is None:
~/Envs/pandas-dev/lib/python3.6/site-packages/pandas/pandas/_libs/hashtable_class_helper.pxi in pandas._libs.hashtable.PyObjectHashTable.get_item()
1271 return self.table.vals[k]
1272 else:
-> 1273 raise KeyError(val)
1274
1275 cpdef set_item(self, object key, Py_ssize_t val):
KeyError: 'd'
|
ValueError
|
def head(self, n=5):
"""
Return first n rows of each group.
Similar to ``.apply(lambda x: x.head(n))``, but it returns a subset of rows
from the original DataFrame with original index and order preserved
(``as_index`` flag is ignored).
Does not work for negative values of `n`.
Returns
-------
Series or DataFrame
%(see_also)s
Examples
--------
>>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]],
... columns=['A', 'B'])
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(-1)
Empty DataFrame
Columns: [A, B]
Index: []
"""
self._reset_group_selection()
mask = self._cumcount_array() < n
if self.axis == 0:
return self._selected_obj[mask]
else:
return self._selected_obj.iloc[:, mask]
|
def head(self, n=5):
"""
Return first n rows of each group.
Similar to ``.apply(lambda x: x.head(n))``, but it returns a subset of rows
from the original DataFrame with original index and order preserved
(``as_index`` flag is ignored).
Does not work for negative values of `n`.
Returns
-------
Series or DataFrame
%(see_also)s
Examples
--------
>>> df = pd.DataFrame([[1, 2], [1, 4], [5, 6]],
... columns=['A', 'B'])
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(-1)
Empty DataFrame
Columns: [A, B]
Index: []
"""
self._reset_group_selection()
mask = self._cumcount_array() < n
return self._selected_obj[mask]
|
https://github.com/pandas-dev/pandas/issues/9772
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/jonas/Code/pandas/pandas/core/groupby.py", line 986, in head
in_head = self._cumcount_array() < n
File "/home/jonas/Code/pandas/pandas/core/groupby.py", line 1044, in _cumcount_array
cumcounts[indices] = values
IndexError: index 10 is out of bounds for axis 1 with size 10
|
IndexError
|
def tail(self, n=5):
"""
Return last n rows of each group.
Similar to ``.apply(lambda x: x.tail(n))``, but it returns a subset of rows
from the original DataFrame with original index and order preserved
(``as_index`` flag is ignored).
Does not work for negative values of `n`.
Returns
-------
Series or DataFrame
%(see_also)s
Examples
--------
>>> df = pd.DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]],
... columns=['A', 'B'])
>>> df.groupby('A').tail(1)
A B
1 a 2
3 b 2
>>> df.groupby('A').tail(-1)
Empty DataFrame
Columns: [A, B]
Index: []
"""
self._reset_group_selection()
mask = self._cumcount_array(ascending=False) < n
if self.axis == 0:
return self._selected_obj[mask]
else:
return self._selected_obj.iloc[:, mask]
|
def tail(self, n=5):
"""
Return last n rows of each group.
Similar to ``.apply(lambda x: x.tail(n))``, but it returns a subset of rows
from the original DataFrame with original index and order preserved
(``as_index`` flag is ignored).
Does not work for negative values of `n`.
Returns
-------
Series or DataFrame
%(see_also)s
Examples
--------
>>> df = pd.DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]],
... columns=['A', 'B'])
>>> df.groupby('A').tail(1)
A B
1 a 2
3 b 2
>>> df.groupby('A').tail(-1)
Empty DataFrame
Columns: [A, B]
Index: []
"""
self._reset_group_selection()
mask = self._cumcount_array(ascending=False) < n
return self._selected_obj[mask]
|
https://github.com/pandas-dev/pandas/issues/9772
|
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/jonas/Code/pandas/pandas/core/groupby.py", line 986, in head
in_head = self._cumcount_array() < n
File "/home/jonas/Code/pandas/pandas/core/groupby.py", line 1044, in _cumcount_array
cumcounts[indices] = values
IndexError: index 10 is out of bounds for axis 1 with size 10
|
IndexError
|
def safe_sort(
values,
codes=None,
na_sentinel: int = -1,
assume_unique: bool = False,
verify: bool = True,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""
Sort ``values`` and reorder corresponding ``codes``.
``values`` should be unique if ``codes`` is not None.
Safe for use with mixed types (int, str), orders ints before strs.
Parameters
----------
values : list-like
Sequence; must be unique if ``codes`` is not None.
codes : list_like, optional
Indices to ``values``. All out of bound indices are treated as
"not found" and will be masked with ``na_sentinel``.
na_sentinel : int, default -1
Value in ``codes`` to mark "not found".
Ignored when ``codes`` is None.
assume_unique : bool, default False
When True, ``values`` are assumed to be unique, which can speed up
the calculation. Ignored when ``codes`` is None.
verify : bool, default True
Check if codes are out of bound for the values and put out of bound
codes equal to na_sentinel. If ``verify=False``, it is assumed there
are no out of bound codes. Ignored when ``codes`` is None.
.. versionadded:: 0.25.0
Returns
-------
ordered : ndarray
Sorted ``values``
new_codes : ndarray
Reordered ``codes``; returned when ``codes`` is not None.
Raises
------
TypeError
* If ``values`` is not list-like or if ``codes`` is neither None
nor list-like
* If ``values`` cannot be sorted
ValueError
* If ``codes`` is not None and ``values`` contain duplicates.
"""
if not is_list_like(values):
raise TypeError(
"Only list-like objects are allowed to be passed to safe_sort as values"
)
if not isinstance(values, (np.ndarray, ABCExtensionArray)):
# don't convert to string types
dtype, _ = infer_dtype_from_array(values)
values = np.asarray(values, dtype=dtype)
sorter = None
if (
not is_extension_array_dtype(values)
and lib.infer_dtype(values, skipna=False) == "mixed-integer"
):
ordered = _sort_mixed(values)
else:
try:
sorter = values.argsort()
ordered = values.take(sorter)
except TypeError:
# Previous sorters failed or were not applicable, try `_sort_mixed`
# which would work, but which fails for special case of 1d arrays
# with tuples.
if values.size and isinstance(values[0], tuple):
ordered = _sort_tuples(values)
else:
ordered = _sort_mixed(values)
# codes:
if codes is None:
return ordered
if not is_list_like(codes):
raise TypeError(
"Only list-like objects or None are allowed to "
"be passed to safe_sort as codes"
)
codes = ensure_platform_int(np.asarray(codes))
if not assume_unique and not len(unique(values)) == len(values):
raise ValueError("values should be unique if codes is not None")
if sorter is None:
# mixed types
hash_klass, values = get_data_algo(values)
t = hash_klass(len(values))
t.map_locations(values)
sorter = ensure_platform_int(t.lookup(ordered))
if na_sentinel == -1:
# take_1d is faster, but only works for na_sentinels of -1
order2 = sorter.argsort()
new_codes = take_1d(order2, codes, fill_value=-1)
if verify:
mask = (codes < -len(values)) | (codes >= len(values))
else:
mask = None
else:
reverse_indexer = np.empty(len(sorter), dtype=np.int_)
reverse_indexer.put(sorter, np.arange(len(sorter)))
# Out of bound indices will be masked with `na_sentinel` next, so we
# may deal with them here without performance loss using `mode='wrap'`
new_codes = reverse_indexer.take(codes, mode="wrap")
mask = codes == na_sentinel
if verify:
mask = mask | (codes < -len(values)) | (codes >= len(values))
if mask is not None:
np.putmask(new_codes, mask, na_sentinel)
return ordered, ensure_platform_int(new_codes)
|
def safe_sort(
values,
codes=None,
na_sentinel: int = -1,
assume_unique: bool = False,
verify: bool = True,
) -> Union[np.ndarray, Tuple[np.ndarray, np.ndarray]]:
"""
Sort ``values`` and reorder corresponding ``codes``.
``values`` should be unique if ``codes`` is not None.
Safe for use with mixed types (int, str), orders ints before strs.
Parameters
----------
values : list-like
Sequence; must be unique if ``codes`` is not None.
codes : list_like, optional
Indices to ``values``. All out of bound indices are treated as
"not found" and will be masked with ``na_sentinel``.
na_sentinel : int, default -1
Value in ``codes`` to mark "not found".
Ignored when ``codes`` is None.
assume_unique : bool, default False
When True, ``values`` are assumed to be unique, which can speed up
the calculation. Ignored when ``codes`` is None.
verify : bool, default True
Check if codes are out of bound for the values and put out of bound
codes equal to na_sentinel. If ``verify=False``, it is assumed there
are no out of bound codes. Ignored when ``codes`` is None.
.. versionadded:: 0.25.0
Returns
-------
ordered : ndarray
Sorted ``values``
new_codes : ndarray
Reordered ``codes``; returned when ``codes`` is not None.
Raises
------
TypeError
* If ``values`` is not list-like or if ``codes`` is neither None
nor list-like
* If ``values`` cannot be sorted
ValueError
* If ``codes`` is not None and ``values`` contain duplicates.
"""
if not is_list_like(values):
raise TypeError(
"Only list-like objects are allowed to be passed to safe_sort as values"
)
if not isinstance(values, (np.ndarray, ABCExtensionArray)):
# don't convert to string types
dtype, _ = infer_dtype_from_array(values)
values = np.asarray(values, dtype=dtype)
def sort_mixed(values):
# order ints before strings, safe in py3
str_pos = np.array([isinstance(x, str) for x in values], dtype=bool)
nums = np.sort(values[~str_pos])
strs = np.sort(values[str_pos])
return np.concatenate([nums, np.asarray(strs, dtype=object)])
sorter = None
if (
not is_extension_array_dtype(values)
and lib.infer_dtype(values, skipna=False) == "mixed-integer"
):
# unorderable in py3 if mixed str/int
ordered = sort_mixed(values)
else:
try:
sorter = values.argsort()
ordered = values.take(sorter)
except TypeError:
# try this anyway
ordered = sort_mixed(values)
# codes:
if codes is None:
return ordered
if not is_list_like(codes):
raise TypeError(
"Only list-like objects or None are allowed to "
"be passed to safe_sort as codes"
)
codes = ensure_platform_int(np.asarray(codes))
if not assume_unique and not len(unique(values)) == len(values):
raise ValueError("values should be unique if codes is not None")
if sorter is None:
# mixed types
hash_klass, values = get_data_algo(values)
t = hash_klass(len(values))
t.map_locations(values)
sorter = ensure_platform_int(t.lookup(ordered))
if na_sentinel == -1:
# take_1d is faster, but only works for na_sentinels of -1
order2 = sorter.argsort()
new_codes = take_1d(order2, codes, fill_value=-1)
if verify:
mask = (codes < -len(values)) | (codes >= len(values))
else:
mask = None
else:
reverse_indexer = np.empty(len(sorter), dtype=np.int_)
reverse_indexer.put(sorter, np.arange(len(sorter)))
# Out of bound indices will be masked with `na_sentinel` next, so we
# may deal with them here without performance loss using `mode='wrap'`
new_codes = reverse_indexer.take(codes, mode="wrap")
mask = codes == na_sentinel
if verify:
mask = mask | (codes < -len(values)) | (codes >= len(values))
if mask is not None:
np.putmask(new_codes, mask, na_sentinel)
return ordered, ensure_platform_int(new_codes)
|
https://github.com/pandas-dev/pandas/issues/36562
|
In [79]: x = ['b', 'b', 'c', 'a', 'b', np.nan]
...: y = ['a', 'b', 'c', 'a', 'b', 'd']
...: mi1 = pd.MultiIndex.from_arrays(
...: [x, [1, 2, 3, 4, 5, 6]],
...: names=['a', 'b']
...: )
...: df = pd.DataFrame({'c': [1, 1, 1, 1, 1, 1]}, index=mi1)
...: mi2 = pd.MultiIndex.from_arrays(
...: [y, [1, 1, 1, 1, 1, 1]],
...: names=['a', 'b']
...: )
...: s = pd.Series([1, 2, 3, 4, 5, 6], index=mi2)
...: df.combine_first(pd.DataFrame({'some_col': s}))
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
~/envs/pandas-test/lib/python3.8/site-packages/pandas/core/algorithms.py in safe_sort(values, codes, na_sentinel, assume_unique, verify)
2060 try:
-> 2061 sorter = values.argsort()
2062 ordered = values.take(sorter)
TypeError: '<' not supported between instances of 'float' and 'str'
During handling of the above exception, another exception occurred:
TypeError Traceback (most recent call last)
<ipython-input-79-de018ddfae29> in <module>
11 )
12 s = pd.Series([1, 2, 3, 4, 5, 6], index=mi2)
---> 13 df.combine_first(pd.DataFrame({'some_col': s}))
~/envs/pandas-test/lib/python3.8/site-packages/pandas/core/frame.py in combine_first(self, other)
6239 return expressions.where(mask, y_values, x_values)
6240
-> 6241 return self.combine(other, combiner, overwrite=False)
6242
6243 def update(
~/envs/pandas-test/lib/python3.8/site-packages/pandas/core/frame.py in combine(self, other, func, fill_value, overwrite)
6104 other_idxlen = len(other.index) # save for compare
6105
-> 6106 this, other = self.align(other, copy=False)
6107 new_index = this.index
6108
~/envs/pandas-test/lib/python3.8/site-packages/pandas/core/frame.py in align(self, other, join, axis, level, copy, fill_value, method, limit, fill_axis, broadcast_axis)
3955 broadcast_axis=None,
3956 ) -> "DataFrame":
-> 3957 return super().align(
3958 other,
3959 join=join,
~/envs/pandas-test/lib/python3.8/site-packages/pandas/core/generic.py in align(self, other, join, axis, level, copy, fill_value, method, limit, fill_axis, broadcast_axis)
8542 axis = self._get_axis_number(axis)
8543 if isinstance(other, ABCDataFrame):
-> 8544 return self._align_frame(
8545 other,
8546 join=join,
~/envs/pandas-test/lib/python3.8/site-packages/pandas/core/generic.py in _align_frame(self, other, join, axis, level, copy, fill_value, method, limit, fill_axis)
8589 if axis is None or axis == 0:
8590 if not self.index.equals(other.index):
-> 8591 join_index, ilidx, iridx = self.index.join(
8592 other.index, how=join, level=level, return_indexers=True
8593 )
~/envs/pandas-test/lib/python3.8/site-packages/pandas/core/indexes/base.py in join(self, other, how, level, return_indexers, sort)
3491 )
3492 else:
-> 3493 return self._join_non_unique(
3494 other, how=how, return_indexers=return_indexers
3495 )
~/envs/pandas-test/lib/python3.8/site-packages/pandas/core/indexes/base.py in _join_non_unique(self, other, how, return_indexers)
3618 rvalues = other._get_engine_target()
3619
-> 3620 left_idx, right_idx = _get_join_indexers(
3621 [lvalues], [rvalues], how=how, sort=True
3622 )
~/envs/pandas-test/lib/python3.8/site-packages/pandas/core/reshape/merge.py in _get_join_indexers(left_keys, right_keys, sort, how, **kwargs)
1326 for n in range(len(left_keys))
1327 )
-> 1328 zipped = zip(*mapped)
1329 llab, rlab, shape = [list(x) for x in zipped]
1330
~/envs/pandas-test/lib/python3.8/site-packages/pandas/core/reshape/merge.py in <genexpr>(.0)
1323 # get left & right join labels and num. of levels at each location
1324 mapped = (
-> 1325 _factorize_keys(left_keys[n], right_keys[n], sort=sort, how=how)
1326 for n in range(len(left_keys))
1327 )
~/envs/pandas-test/lib/python3.8/site-packages/pandas/core/reshape/merge.py in _factorize_keys(lk, rk, sort, how)
1978 if sort:
1979 uniques = rizer.uniques.to_array()
-> 1980 llab, rlab = _sort_labels(uniques, llab, rlab)
1981
1982 # NA group
~/envs/pandas-test/lib/python3.8/site-packages/pandas/core/reshape/merge.py in _sort_labels(uniques, left, right)
2003 labels = np.concatenate([left, right])
2004
-> 2005 _, new_labels = algos.safe_sort(uniques, labels, na_sentinel=-1)
2006 new_labels = ensure_int64(new_labels)
2007 new_left, new_right = new_labels[:llength], new_labels[llength:]
~/envs/pandas-test/lib/python3.8/site-packages/pandas/core/algorithms.py in safe_sort(values, codes, na_sentinel, assume_unique, verify)
2063 except TypeError:
2064 # try this anyway
-> 2065 ordered = sort_mixed(values)
2066
2067 # codes:
~/envs/pandas-test/lib/python3.8/site-packages/pandas/core/algorithms.py in sort_mixed(values)
2046 # order ints before strings, safe in py3
2047 str_pos = np.array([isinstance(x, str) for x in values], dtype=bool)
-> 2048 nums = np.sort(values[~str_pos])
2049 strs = np.sort(values[str_pos])
2050 return np.concatenate([nums, np.asarray(strs, dtype=object)])
<__array_function__ internals> in sort(*args, **kwargs)
~/envs/pandas-test/lib/python3.8/site-packages/numpy/core/fromnumeric.py in sort(a, axis, kind, order)
989 else:
990 a = asanyarray(a).copy(order="K")
--> 991 a.sort(axis=axis, kind=kind, order=order)
992 return a
993
TypeError: '<' not supported between instances of 'float' and 'str'
|
TypeError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.