id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
18,735 | from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import math
import copy
import sys
from io import open
import itertools
import numpy as np
import tensorflow as tf
from .configuration_distilbert import DistilBertConfig
from .modeling_tf_utils import TFPreTrainedModel, TFSharedEmbeddings, shape_list, get_initializer
from .file_utils import add_start_docstrings
The provided code snippet includes necessary dependencies for implementing the `gelu_new` function. Write a Python function `def gelu_new(x)` to solve the following problem:
Gaussian Error Linear Unit. This is a smoother version of the RELU. Original paper: https://arxiv.org/abs/1606.08415 Args: x: float Tensor to perform activation. Returns: `x` with the GELU activation applied.
Here is the function:
def gelu_new(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf | Gaussian Error Linear Unit. This is a smoother version of the RELU. Original paper: https://arxiv.org/abs/1606.08415 Args: x: float Tensor to perform activation. Returns: `x` with the GELU activation applied. |
18,736 | from __future__ import absolute_import, division, print_function
import argparse
import os
import sys
from io import open
import torch
import transformers.tokenization_transfo_xl as data_utils
from transformers import CONFIG_NAME, WEIGHTS_NAME
from transformers import (TransfoXLConfig, TransfoXLLMHeadModel,
load_tf_weights_in_transfo_xl)
from transformers.tokenization_transfo_xl import (CORPUS_NAME, VOCAB_FILES_NAMES)
import logging
VOCAB_FILES_NAMES = {'pretrained_vocab_file': 'vocab.bin', 'vocab_file': 'vocab.txt'}
CORPUS_NAME = 'corpus.bin'
def convert_transfo_xl_checkpoint_to_pytorch(tf_checkpoint_path,
transfo_xl_config_file,
pytorch_dump_folder_path,
transfo_xl_dataset_file):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(transfo_xl_dataset_file, "rb") as fp:
corpus = pickle.load(fp, encoding="latin1")
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
pytorch_vocab_dump_path = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print("Save vocabulary to {}".format(pytorch_vocab_dump_path))
corpus_vocab_dict = corpus.vocab.__dict__
torch.save(corpus_vocab_dict, pytorch_vocab_dump_path)
corpus_dict_no_vocab = corpus.__dict__
corpus_dict_no_vocab.pop('vocab', None)
pytorch_dataset_dump_path = pytorch_dump_folder_path + '/' + CORPUS_NAME
print("Save dataset to {}".format(pytorch_dataset_dump_path))
torch.save(corpus_dict_no_vocab, pytorch_dataset_dump_path)
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
config_path = os.path.abspath(transfo_xl_config_file)
tf_path = os.path.abspath(tf_checkpoint_path)
print("Converting Transformer XL checkpoint from {} with config at {}".format(tf_path, config_path))
# Initialise PyTorch model
if transfo_xl_config_file == "":
config = TransfoXLConfig()
else:
config = TransfoXLConfig.from_json_file(transfo_xl_config_file)
print("Building PyTorch model from configuration: {}".format(str(config)))
model = TransfoXLLMHeadModel(config)
model = load_tf_weights_in_transfo_xl(model, config, tf_path)
# Save pytorch-model
pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
pytorch_config_dump_path = os.path.join(pytorch_dump_folder_path, CONFIG_NAME)
print("Save PyTorch model to {}".format(os.path.abspath(pytorch_weights_dump_path)))
torch.save(model.state_dict(), pytorch_weights_dump_path)
print("Save configuration file to {}".format(os.path.abspath(pytorch_config_dump_path)))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string()) | null |
18,737 | from __future__ import absolute_import, division, print_function
import argparse
import logging
import numpy as np
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from transformers import (BertConfig, BertEncoder,
BertIntermediate, BertLayer,
BertModel, BertOutput,
BertSelfAttention,
BertSelfOutput)
from transformers import (RobertaEmbeddings,
RobertaForMaskedLM,
RobertaForSequenceClassification,
RobertaModel)
SAMPLE_TEXT = 'Hello world! cécé herlolip'
The provided code snippet includes necessary dependencies for implementing the `convert_roberta_checkpoint_to_pytorch` function. Write a Python function `def convert_roberta_checkpoint_to_pytorch(roberta_checkpoint_path, pytorch_dump_folder_path, classification_head)` to solve the following problem:
Copy/paste/tweak roberta's weights to our BERT structure.
Here is the function:
def convert_roberta_checkpoint_to_pytorch(roberta_checkpoint_path, pytorch_dump_folder_path, classification_head):
"""
Copy/paste/tweak roberta's weights to our BERT structure.
"""
roberta = FairseqRobertaModel.from_pretrained(roberta_checkpoint_path)
roberta.eval() # disable dropout
config = BertConfig(
vocab_size_or_config_json_file=50265,
hidden_size=roberta.args.encoder_embed_dim,
num_hidden_layers=roberta.args.encoder_layers,
num_attention_heads=roberta.args.encoder_attention_heads,
intermediate_size=roberta.args.encoder_ffn_embed_dim,
max_position_embeddings=514,
type_vocab_size=1,
layer_norm_eps=1e-5, # PyTorch default used in fairseq
)
if classification_head:
config.num_labels = roberta.args.num_classes
print("Our BERT config:", config)
model = RobertaForSequenceClassification(config) if classification_head else RobertaForMaskedLM(config)
model.eval()
# Now let's copy all the weights.
# Embeddings
roberta_sent_encoder = roberta.model.decoder.sentence_encoder
model.roberta.embeddings.word_embeddings.weight = roberta_sent_encoder.embed_tokens.weight
model.roberta.embeddings.position_embeddings.weight = roberta_sent_encoder.embed_positions.weight
model.roberta.embeddings.token_type_embeddings.weight.data = torch.zeros_like(model.roberta.embeddings.token_type_embeddings.weight) # just zero them out b/c RoBERTa doesn't use them.
model.roberta.embeddings.LayerNorm.weight = roberta_sent_encoder.emb_layer_norm.weight
model.roberta.embeddings.LayerNorm.bias = roberta_sent_encoder.emb_layer_norm.bias
for i in range(config.num_hidden_layers):
# Encoder: start of layer
layer: BertLayer = model.roberta.encoder.layer[i]
roberta_layer: TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
### self attention
self_attn: BertSelfAttention = layer.attention.self
assert(
roberta_layer.self_attn.in_proj_weight.shape == torch.Size((3 * config.hidden_size, config.hidden_size))
)
# we use three distinct linear layers so we split the source layer here.
self_attn.query.weight.data = roberta_layer.self_attn.in_proj_weight[:config.hidden_size, :]
self_attn.query.bias.data = roberta_layer.self_attn.in_proj_bias[:config.hidden_size]
self_attn.key.weight.data = roberta_layer.self_attn.in_proj_weight[config.hidden_size:2*config.hidden_size, :]
self_attn.key.bias.data = roberta_layer.self_attn.in_proj_bias[config.hidden_size:2*config.hidden_size]
self_attn.value.weight.data = roberta_layer.self_attn.in_proj_weight[2*config.hidden_size:, :]
self_attn.value.bias.data = roberta_layer.self_attn.in_proj_bias[2*config.hidden_size:]
### self-attention output
self_output: BertSelfOutput = layer.attention.output
assert(
self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
)
self_output.dense.weight = roberta_layer.self_attn.out_proj.weight
self_output.dense.bias = roberta_layer.self_attn.out_proj.bias
self_output.LayerNorm.weight = roberta_layer.self_attn_layer_norm.weight
self_output.LayerNorm.bias = roberta_layer.self_attn_layer_norm.bias
### intermediate
intermediate: BertIntermediate = layer.intermediate
assert(
intermediate.dense.weight.shape == roberta_layer.fc1.weight.shape
)
intermediate.dense.weight = roberta_layer.fc1.weight
intermediate.dense.bias = roberta_layer.fc1.bias
### output
bert_output: BertOutput = layer.output
assert(
bert_output.dense.weight.shape == roberta_layer.fc2.weight.shape
)
bert_output.dense.weight = roberta_layer.fc2.weight
bert_output.dense.bias = roberta_layer.fc2.bias
bert_output.LayerNorm.weight = roberta_layer.final_layer_norm.weight
bert_output.LayerNorm.bias = roberta_layer.final_layer_norm.bias
#### end of layer
if classification_head:
model.classifier.dense.weight = roberta.model.classification_heads['mnli'].dense.weight
model.classifier.dense.bias = roberta.model.classification_heads['mnli'].dense.bias
model.classifier.out_proj.weight = roberta.model.classification_heads['mnli'].out_proj.weight
model.classifier.out_proj.bias = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
model.lm_head.dense.weight = roberta.model.decoder.lm_head.dense.weight
model.lm_head.dense.bias = roberta.model.decoder.lm_head.dense.bias
model.lm_head.layer_norm.weight = roberta.model.decoder.lm_head.layer_norm.weight
model.lm_head.layer_norm.bias = roberta.model.decoder.lm_head.layer_norm.bias
model.lm_head.decoder.weight = roberta.model.decoder.lm_head.weight
model.lm_head.bias = roberta.model.decoder.lm_head.bias
# Let's check that we get the same results.
input_ids: torch.Tensor = roberta.encode(SAMPLE_TEXT).unsqueeze(0) # batch of size 1
our_output = model(input_ids)[0]
if classification_head:
their_output = roberta.model.classification_heads['mnli'](roberta.extract_features(input_ids))
else:
their_output = roberta.model(input_ids)[0]
print(our_output.shape, their_output.shape)
max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item()
print(f"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7
success = torch.allclose(our_output, their_output, atol=1e-3)
print(
"Do both models output the same tensors?",
"🔥" if success else "💩"
)
if not success:
raise Exception("Something went wRoNg")
print(f"Saving model to {pytorch_dump_folder_path}")
model.save_pretrained(pytorch_dump_folder_path) | Copy/paste/tweak roberta's weights to our BERT structure. |
18,738 | from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import json
import logging
import math
import os
import sys
from io import open
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .modeling_utils import PreTrainedModel, Conv1D, prune_conv1d_layer, SequenceSummary
from .configuration_openai import OpenAIGPTConfig
from .file_utils import add_start_docstrings
logger = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `load_tf_weights_in_openai_gpt` function. Write a Python function `def load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path)` to solve the following problem:
Load tf pre-trained weights in a pytorch model (from NumPy arrays here)
Here is the function:
def load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path):
""" Load tf pre-trained weights in a pytorch model (from NumPy arrays here)
"""
import re
import numpy as np
if '.ckpt' in openai_checkpoint_folder_path:
openai_checkpoint_folder_path = os.path.dirname(openai_checkpoint_folder_path)
logger.info("Loading weights from {}".format(openai_checkpoint_folder_path))
names = json.load(open(openai_checkpoint_folder_path + '/parameters_names.json', "r", encoding='utf-8'))
shapes = json.load(open(openai_checkpoint_folder_path + '/params_shapes.json', "r", encoding='utf-8'))
offsets = np.cumsum([np.prod(shape) for shape in shapes])
init_params = [np.load(openai_checkpoint_folder_path + '/params_{}.npy'.format(n)) for n in range(10)]
init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]
init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]
# This was used when we had a single embedding matrix for positions and tokens
# init_params[0] = np.concatenate([init_params[1], init_params[0]], 0)
# del init_params[1]
init_params = [arr.squeeze() for arr in init_params]
try:
assert model.tokens_embed.weight.shape == init_params[1].shape
assert model.positions_embed.weight.shape == init_params[0].shape
except AssertionError as e:
e.args += (model.tokens_embed.weight.shape, init_params[1].shape)
e.args += (model.positions_embed.weight.shape, init_params[0].shape)
raise
model.tokens_embed.weight.data = torch.from_numpy(init_params[1])
model.positions_embed.weight.data = torch.from_numpy(init_params[0])
names.pop(0)
# Pop position and token embedding arrays
init_params.pop(0)
init_params.pop(0)
for name, array in zip(names, init_params): # names[1:n_transfer], init_params[1:n_transfer]):
name = name[6:] # skip "model/"
assert name[-2:] == ":0"
name = name[:-2]
name = name.split('/')
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+\d+', m_name):
l = re.split(r'(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'g':
pointer = getattr(pointer, 'weight')
elif l[0] == 'b':
pointer = getattr(pointer, 'bias')
elif l[0] == 'w':
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model | Load tf pre-trained weights in a pytorch model (from NumPy arrays here) |
18,739 | from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import json
import logging
import math
import os
import sys
from io import open
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .modeling_utils import PreTrainedModel, Conv1D, prune_conv1d_layer, SequenceSummary
from .configuration_openai import OpenAIGPTConfig
from .file_utils import add_start_docstrings
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) | null |
18,740 | from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import json
import logging
import math
import os
import sys
from io import open
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from torch.nn.parameter import Parameter
from .modeling_utils import PreTrainedModel, Conv1D, prune_conv1d_layer, SequenceSummary
from .configuration_openai import OpenAIGPTConfig
from .file_utils import add_start_docstrings
def swish(x):
return x * torch.sigmoid(x) | null |
18,741 | from __future__ import absolute_import, division, print_function
import argparse
import json
from io import open
import torch
import numpy
from transformers import CONFIG_NAME, WEIGHTS_NAME
from transformers.tokenization_xlm import VOCAB_FILES_NAMES
import logging
VOCAB_FILES_NAMES = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
}
def convert_xlm_checkpoint_to_pytorch(xlm_checkpoint_path, pytorch_dump_folder_path):
# Load checkpoint
chkpt = torch.load(xlm_checkpoint_path, map_location='cpu')
state_dict = chkpt['model']
# We have the base model one level deeper than the original XLM repository
two_levels_state_dict = {}
for k, v in state_dict.items():
if 'pred_layer' in k:
two_levels_state_dict[k] = v
else:
two_levels_state_dict['transformer.' + k] = v
config = chkpt['params']
config = dict((n, v) for n, v in config.items() if not isinstance(v, (torch.FloatTensor, numpy.ndarray)))
vocab = chkpt['dico_word2id']
vocab = dict((s + '</w>' if s.find('@@') == -1 and i > 13 else s.replace('@@', ''), i) for s, i in vocab.items())
# Save pytorch-model
pytorch_weights_dump_path = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
pytorch_config_dump_path = pytorch_dump_folder_path + '/' + CONFIG_NAME
pytorch_vocab_dump_path = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print("Save PyTorch model to {}".format(pytorch_weights_dump_path))
torch.save(two_levels_state_dict, pytorch_weights_dump_path)
print("Save configuration file to {}".format(pytorch_config_dump_path))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(json.dumps(config, indent=2) + "\n")
print("Save vocab file to {}".format(pytorch_config_dump_path))
with open(pytorch_vocab_dump_path, "w", encoding="utf-8") as f:
f.write(json.dumps(vocab, indent=2) + "\n") | null |
18,742 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import logging
import os
import re
import numpy
logger = logging.getLogger(__name__)
def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, allow_missing_keys=False):
""" Load pytorch state_dict in a TF 2.0 model.
"""
try:
import torch
import tensorflow as tf
from tensorflow.python.keras import backend as K
except ImportError as e:
logger.error("Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions.")
raise e
if tf_inputs is None:
tf_inputs = tf_model.dummy_inputs
if tf_inputs is not None:
tfo = tf_model(tf_inputs, training=False) # Make sure model is built
# Adapt state dict - TODO remove this and update the AWS weights files instead
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in pt_state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
pt_state_dict[new_key] = pt_state_dict.pop(old_key)
# Make sure we are able to load PyTorch base models as well as derived models (with heads)
# TF models always have a prefix, some of PyTorch models (base ones) don't
start_prefix_to_remove = ''
if not any(s.startswith(tf_model.base_model_prefix) for s in pt_state_dict.keys()):
start_prefix_to_remove = tf_model.base_model_prefix + '.'
symbolic_weights = tf_model.trainable_weights + tf_model.non_trainable_weights
weight_value_tuples = []
all_pytorch_weights = set(list(pt_state_dict.keys()))
for symbolic_weight in symbolic_weights:
sw_name = symbolic_weight.name
name, transpose = convert_tf_weight_name_to_pt_weight_name(sw_name, start_prefix_to_remove=start_prefix_to_remove)
# Find associated numpy array in pytorch model state dict
assert name in pt_state_dict, "{} not found in PyTorch model".format(name)
array = pt_state_dict[name].numpy()
if transpose:
array = numpy.transpose(array)
if len(symbolic_weight.shape) < len(array.shape):
array = numpy.squeeze(array)
elif len(symbolic_weight.shape) > len(array.shape):
array = numpy.expand_dims(array, axis=0)
try:
assert list(symbolic_weight.shape) == list(array.shape)
except AssertionError as e:
e.args += (symbolic_weight.shape, array.shape)
raise e
logger.info("Initialize TF weight {}".format(symbolic_weight.name))
weight_value_tuples.append((symbolic_weight, array))
all_pytorch_weights.discard(name)
K.batch_set_value(weight_value_tuples)
if tf_inputs is not None:
tfo = tf_model(tf_inputs, training=False) # Make sure restore ops are run
logger.info("Weights or buffers not loaded from PyTorch model: {}".format(all_pytorch_weights))
return tf_model
The provided code snippet includes necessary dependencies for implementing the `load_pytorch_checkpoint_in_tf2_model` function. Write a Python function `def load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path, tf_inputs=None, allow_missing_keys=False)` to solve the following problem:
Load pytorch checkpoints in a TF 2.0 model
Here is the function:
def load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path, tf_inputs=None, allow_missing_keys=False):
""" Load pytorch checkpoints in a TF 2.0 model
"""
try:
import tensorflow as tf
import torch
except ImportError as e:
logger.error("Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions.")
raise e
pt_path = os.path.abspath(pytorch_checkpoint_path)
logger.info("Loading PyTorch weights from {}".format(pt_path))
pt_state_dict = torch.load(pt_path, map_location='cpu')
return load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys) | Load pytorch checkpoints in a TF 2.0 model |
18,743 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import logging
import os
import re
import numpy
def load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=None, allow_missing_keys=False):
""" Load pytorch state_dict in a TF 2.0 model.
"""
try:
import torch
import tensorflow as tf
from tensorflow.python.keras import backend as K
except ImportError as e:
logger.error("Loading a PyTorch model in TensorFlow, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions.")
raise e
if tf_inputs is None:
tf_inputs = tf_model.dummy_inputs
if tf_inputs is not None:
tfo = tf_model(tf_inputs, training=False) # Make sure model is built
# Adapt state dict - TODO remove this and update the AWS weights files instead
# Convert old format to new format if needed from a PyTorch state_dict
old_keys = []
new_keys = []
for key in pt_state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
pt_state_dict[new_key] = pt_state_dict.pop(old_key)
# Make sure we are able to load PyTorch base models as well as derived models (with heads)
# TF models always have a prefix, some of PyTorch models (base ones) don't
start_prefix_to_remove = ''
if not any(s.startswith(tf_model.base_model_prefix) for s in pt_state_dict.keys()):
start_prefix_to_remove = tf_model.base_model_prefix + '.'
symbolic_weights = tf_model.trainable_weights + tf_model.non_trainable_weights
weight_value_tuples = []
all_pytorch_weights = set(list(pt_state_dict.keys()))
for symbolic_weight in symbolic_weights:
sw_name = symbolic_weight.name
name, transpose = convert_tf_weight_name_to_pt_weight_name(sw_name, start_prefix_to_remove=start_prefix_to_remove)
# Find associated numpy array in pytorch model state dict
assert name in pt_state_dict, "{} not found in PyTorch model".format(name)
array = pt_state_dict[name].numpy()
if transpose:
array = numpy.transpose(array)
if len(symbolic_weight.shape) < len(array.shape):
array = numpy.squeeze(array)
elif len(symbolic_weight.shape) > len(array.shape):
array = numpy.expand_dims(array, axis=0)
try:
assert list(symbolic_weight.shape) == list(array.shape)
except AssertionError as e:
e.args += (symbolic_weight.shape, array.shape)
raise e
logger.info("Initialize TF weight {}".format(symbolic_weight.name))
weight_value_tuples.append((symbolic_weight, array))
all_pytorch_weights.discard(name)
K.batch_set_value(weight_value_tuples)
if tf_inputs is not None:
tfo = tf_model(tf_inputs, training=False) # Make sure restore ops are run
logger.info("Weights or buffers not loaded from PyTorch model: {}".format(all_pytorch_weights))
return tf_model
The provided code snippet includes necessary dependencies for implementing the `load_pytorch_model_in_tf2_model` function. Write a Python function `def load_pytorch_model_in_tf2_model(tf_model, pt_model, tf_inputs=None, allow_missing_keys=False)` to solve the following problem:
Load pytorch checkpoints in a TF 2.0 model
Here is the function:
def load_pytorch_model_in_tf2_model(tf_model, pt_model, tf_inputs=None, allow_missing_keys=False):
""" Load pytorch checkpoints in a TF 2.0 model
"""
pt_state_dict = pt_model.state_dict()
return load_pytorch_weights_in_tf2_model(tf_model, pt_state_dict, tf_inputs=tf_inputs, allow_missing_keys=allow_missing_keys) | Load pytorch checkpoints in a TF 2.0 model |
18,744 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import logging
import os
import re
import numpy
logger = logging.getLogger(__name__)
def load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=False):
""" Load TF 2.0 model in a pytorch model
"""
weights = tf_model.weights
return load_tf2_weights_in_pytorch_model(pt_model, weights, allow_missing_keys=allow_missing_keys)
The provided code snippet includes necessary dependencies for implementing the `load_tf2_checkpoint_in_pytorch_model` function. Write a Python function `def load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path, tf_inputs=None, allow_missing_keys=False)` to solve the following problem:
Load TF 2.0 HDF5 checkpoint in a PyTorch model We use HDF5 to easily do transfer learning (see https://github.com/tensorflow/tensorflow/blob/ee16fcac960ae660e0e4496658a366e2f745e1f0/tensorflow/python/keras/engine/network.py#L1352-L1357).
Here is the function:
def load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path, tf_inputs=None, allow_missing_keys=False):
""" Load TF 2.0 HDF5 checkpoint in a PyTorch model
We use HDF5 to easily do transfer learning
(see https://github.com/tensorflow/tensorflow/blob/ee16fcac960ae660e0e4496658a366e2f745e1f0/tensorflow/python/keras/engine/network.py#L1352-L1357).
"""
try:
import tensorflow as tf
import torch
except ImportError as e:
logger.error("Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions.")
raise e
import transformers
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Loading TensorFlow weights from {}".format(tf_checkpoint_path))
# Instantiate and load the associated TF 2.0 model
tf_model_class_name = "TF" + pt_model.__class__.__name__ # Add "TF" at the beggining
tf_model_class = getattr(transformers, tf_model_class_name)
tf_model = tf_model_class(pt_model.config)
if tf_inputs is None:
tf_inputs = tf.constant(DUMMY_INPUTS)
if tf_inputs is not None:
tfo = tf_model(tf_inputs, training=False) # Make sure model is built
tf_model.load_weights(tf_checkpoint_path, by_name=True)
return load_tf2_model_in_pytorch_model(pt_model, tf_model, allow_missing_keys=allow_missing_keys) | Load TF 2.0 HDF5 checkpoint in a PyTorch model We use HDF5 to easily do transfer learning (see https://github.com/tensorflow/tensorflow/blob/ee16fcac960ae660e0e4496658a366e2f745e1f0/tensorflow/python/keras/engine/network.py#L1352-L1357). |
18,745 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
import json
import logging
import os
import regex as re
from io import open
from .tokenization_gpt2 import GPT2Tokenizer
def lru_cache():
return lambda func: func | null |
18,746 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import argparse
import tensorflow as tf
from transformers import is_torch_available, cached_path
from transformers import (load_pytorch_checkpoint_in_tf2_model,
BertConfig, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2Config, TFGPT2LMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNetConfig, TFXLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMConfig, TFXLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
TransfoXLConfig, TFTransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaConfig, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRLConfig, TFCTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP)
import logging
MODEL_CLASSES = {
'bert': (BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_MODEL_ARCHIVE_MAP, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP),
'bert-large-uncased-whole-word-masking-finetuned-squad': (BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_MODEL_ARCHIVE_MAP, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP),
'bert-large-cased-whole-word-masking-finetuned-squad': (BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_MODEL_ARCHIVE_MAP, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP),
'bert-base-cased-finetuned-mrpc': (BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_MODEL_ARCHIVE_MAP, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP),
'gpt2': (GPT2Config, TFGPT2LMHeadModel, GPT2LMHeadModel, GPT2_PRETRAINED_MODEL_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP),
'xlnet': (XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_MODEL_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP),
'xlm': (XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_MODEL_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP),
'transfo-xl': (TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP),
'openai-gpt': (OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP),
'roberta': (RobertaConfig, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP),
'roberta-large-mnli': (RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP),
'distilbert': (DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP),
'distilbert-base-uncased-distilled-squad': (DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP),
'ctrl': (CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_MODEL_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP)
}
def convert_pt_checkpoint_to_tf(model_type, pytorch_checkpoint_path, config_file, tf_dump_path, compare_with_pt_model=False, use_cached_models=True):
if model_type not in MODEL_CLASSES:
raise ValueError("Unrecognized model type, should be one of {}.".format(list(MODEL_CLASSES.keys())))
config_class, model_class, pt_model_class, aws_model_maps, aws_config_map = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
config_file = cached_path(aws_config_map[config_file], force_download=not use_cached_models)
config = config_class.from_json_file(config_file)
config.output_hidden_states = True
config.output_attentions = True
print("Building TensorFlow model from configuration: {}".format(str(config)))
tf_model = model_class(config)
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_model_maps:
pytorch_checkpoint_path = cached_path(aws_model_maps[pytorch_checkpoint_path], force_download=not use_cached_models)
# Load PyTorch checkpoint in tf2 model:
tf_model = load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path)
if compare_with_pt_model:
inputs_list = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
tf_inputs = tf.constant(inputs_list)
tfo = tf_model(tf_inputs, training=False) # build the network
pt_model = pt_model_class.from_pretrained(None,
config=config,
state_dict=torch.load(pytorch_checkpoint_path,
map_location='cpu'))
pt_inputs = torch.tensor(inputs_list)
with torch.no_grad():
pto = pt_model(pt_inputs)
np_pt = pto[0].detach().numpy()
np_tf = tfo[0].numpy()
diff = np.amax(np.abs(np_pt - np_tf))
print("Max absolute difference between models outputs {}".format(diff))
assert diff <= 2e-2, "Error, model absolute difference is >2e-2"
# Save pytorch-model
print("Save TensorFlow model to {}".format(tf_dump_path))
tf_model.save_weights(tf_dump_path, save_format='h5')
def convert_all_pt_checkpoints_to_tf(args_model_type, tf_dump_path, model_shortcut_names_or_path=None, config_shortcut_names_or_path=None,
compare_with_pt_model=False, use_cached_models=False, only_convert_finetuned_models=False):
assert os.path.isdir(args.tf_dump_path), "--tf_dump_path should be a directory"
if args_model_type is None:
model_types = list(MODEL_CLASSES.keys())
else:
model_types = [args_model_type]
for j, model_type in enumerate(model_types, start=1):
print("=" * 100)
print(" Converting model type {}/{}: {}".format(j, len(model_types), model_type))
print("=" * 100)
if model_type not in MODEL_CLASSES:
raise ValueError("Unrecognized model type {}, should be one of {}.".format(model_type, list(MODEL_CLASSES.keys())))
config_class, model_class, pt_model_class, aws_model_maps, aws_config_map = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
model_shortcut_names_or_path = list(aws_model_maps.keys())
if config_shortcut_names_or_path is None:
config_shortcut_names_or_path = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(model_shortcut_names_or_path, config_shortcut_names_or_path), start=1):
print("-" * 100)
if '-squad' in model_shortcut_name or '-mrpc' in model_shortcut_name or '-mnli' in model_shortcut_name:
if not only_convert_finetuned_models:
print(" Skipping finetuned checkpoint {}".format(model_shortcut_name))
continue
model_type = model_shortcut_name
elif only_convert_finetuned_models:
print(" Skipping not finetuned checkpoint {}".format(model_shortcut_name))
continue
print(" Converting checkpoint {}/{}: {} - model_type {}".format(i, len(aws_config_map), model_shortcut_name, model_type))
print("-" * 100)
if config_shortcut_name in aws_config_map:
config_file = cached_path(aws_config_map[config_shortcut_name], force_download=not use_cached_models)
else:
config_file = cached_path(config_shortcut_name, force_download=not use_cached_models)
if model_shortcut_name in aws_model_maps:
model_file = cached_path(aws_model_maps[model_shortcut_name], force_download=not use_cached_models)
else:
model_file = cached_path(model_shortcut_name, force_download=not use_cached_models)
if os.path.isfile(model_shortcut_name):
model_shortcut_name = 'converted_model'
convert_pt_checkpoint_to_tf(model_type=model_type,
pytorch_checkpoint_path=model_file,
config_file=config_file,
tf_dump_path=os.path.join(tf_dump_path, model_shortcut_name + '-tf_model.h5'),
compare_with_pt_model=compare_with_pt_model)
os.remove(config_file)
os.remove(model_file) | null |
18,747 | from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import math
import os
import sys
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from .modeling_utils import PreTrainedModel, prune_linear_layer
from .configuration_bert import BertConfig
from .file_utils import add_start_docstrings
logger = logging.getLogger(__name__)
: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling
: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the next sequence prediction (classification)
: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling
: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the next sequence prediction (classification)
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
start_positions=None, end_positions=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
The provided code snippet includes necessary dependencies for implementing the `load_tf_weights_in_bert` function. Write a Python function `def load_tf_weights_in_bert(model, config, tf_checkpoint_path)` to solve the following problem:
Load tf checkpoints in a pytorch model.
Here is the function:
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model.
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error("Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m", "global_step"] for n in name):
logger.info("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
elif l[0] == 'squad':
pointer = getattr(pointer, 'classifier')
else:
try:
pointer = getattr(pointer, l[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model | Load tf checkpoints in a pytorch model. |
18,748 | from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import math
import os
import sys
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from .modeling_utils import PreTrainedModel, prune_linear_layer
from .configuration_bert import BertConfig
from .file_utils import add_start_docstrings
: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling
: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the next sequence prediction (classification)
: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling
: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the next sequence prediction (classification)
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
start_positions=None, end_positions=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
The provided code snippet includes necessary dependencies for implementing the `gelu` function. Write a Python function `def gelu(x)` to solve the following problem:
Original Implementation of the gelu activation function in Google Bert repo when initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415
Here is the function:
def gelu(x):
""" Original Implementation of the gelu activation function in Google Bert repo when initially created.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) | Original Implementation of the gelu activation function in Google Bert repo when initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415 |
18,749 | from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import math
import os
import sys
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from .modeling_utils import PreTrainedModel, prune_linear_layer
from .configuration_bert import BertConfig
from .file_utils import add_start_docstrings
: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling
: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the next sequence prediction (classification)
: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling
: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the next sequence prediction (classification)
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
start_positions=None, end_positions=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
The provided code snippet includes necessary dependencies for implementing the `gelu_new` function. Write a Python function `def gelu_new(x)` to solve the following problem:
Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT). Also see https://arxiv.org/abs/1606.08415
Here is the function:
def gelu_new(x):
""" Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT).
Also see https://arxiv.org/abs/1606.08415
"""
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) | Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT). Also see https://arxiv.org/abs/1606.08415 |
18,750 | from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import math
import os
import sys
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from .modeling_utils import PreTrainedModel, prune_linear_layer
from .configuration_bert import BertConfig
from .file_utils import add_start_docstrings
: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling
: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the next sequence prediction (classification)
: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling
: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the next sequence prediction (classification)
def __init__(self, config):
super(BertForQuestionAnswering, self).__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
start_positions=None, end_positions=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
def swish(x):
return x * torch.sigmoid(x) | null |
18,751 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import copy
import json
import logging
import os
from io import open
import six
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from torch.nn import functional as F
from .configuration_utils import PretrainedConfig
from .file_utils import cached_path, WEIGHTS_NAME, TF_WEIGHTS_NAME, TF2_WEIGHTS_NAME
class Conv1D(nn.Module):
def __init__(self, nf, nx):
""" Conv1D layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2)
Basically works like a Linear layer but the weights are transposed
"""
super(Conv1D, self).__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = nn.Parameter(w)
self.bias = nn.Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
return x
def prune_linear_layer(layer, index, dim=0):
""" Prune a linear layer (a model parameters) to keep only entries in index.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if layer.bias is not None:
if dim == 1:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
if layer.bias is not None:
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
def prune_conv1d_layer(layer, index, dim=1):
""" Prune a Conv1D layer (a model parameters) to keep only entries in index.
A Conv1D work as a Linear layer (see e.g. BERT) but the weights are transposed.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
index = index.to(layer.weight.device)
W = layer.weight.index_select(dim, index).clone().detach()
if dim == 0:
b = layer.bias.clone().detach()
else:
b = layer.bias[index].clone().detach()
new_size = list(layer.weight.size())
new_size[dim] = len(index)
new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device)
new_layer.weight.requires_grad = False
new_layer.weight.copy_(W.contiguous())
new_layer.weight.requires_grad = True
new_layer.bias.requires_grad = False
new_layer.bias.copy_(b.contiguous())
new_layer.bias.requires_grad = True
return new_layer
The provided code snippet includes necessary dependencies for implementing the `prune_layer` function. Write a Python function `def prune_layer(layer, index, dim=None)` to solve the following problem:
Prune a Conv1D or nn.Linear layer (a model parameters) to keep only entries in index. Return the pruned layer as a new layer with requires_grad=True. Used to remove heads.
Here is the function:
def prune_layer(layer, index, dim=None):
""" Prune a Conv1D or nn.Linear layer (a model parameters) to keep only entries in index.
Return the pruned layer as a new layer with requires_grad=True.
Used to remove heads.
"""
if isinstance(layer, nn.Linear):
return prune_linear_layer(layer, index, dim=0 if dim is None else dim)
elif isinstance(layer, Conv1D):
return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim)
else:
raise ValueError("Can't prune layer of class {}".format(layer.__class__)) | Prune a Conv1D or nn.Linear layer (a model parameters) to keep only entries in index. Return the pruned layer as a new layer with requires_grad=True. Used to remove heads. |
18,752 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
import logging
import os
import re
import sys
import unicodedata
from io import open
import sacremoses as sm
from .tokenization_utils import PreTrainedTokenizer
from .tokenization_bert import BasicTokenizer
The provided code snippet includes necessary dependencies for implementing the `get_pairs` function. Write a Python function `def get_pairs(word)` to solve the following problem:
Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length strings)
Here is the function:
def get_pairs(word):
"""
Return set of symbol pairs in a word.
word is represented as tuple of symbols (symbols being variable-length strings)
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs | Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length strings) |
18,753 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
import logging
import os
import re
import sys
import unicodedata
from io import open
import sacremoses as sm
from .tokenization_utils import PreTrainedTokenizer
from .tokenization_bert import BasicTokenizer
The provided code snippet includes necessary dependencies for implementing the `lowercase_and_remove_accent` function. Write a Python function `def lowercase_and_remove_accent(text)` to solve the following problem:
Lowercase and strips accents from a piece of text based on https://github.com/facebookresearch/XLM/blob/master/tools/lowercase_and_remove_accent.py
Here is the function:
def lowercase_and_remove_accent(text):
"""
Lowercase and strips accents from a piece of text based on
https://github.com/facebookresearch/XLM/blob/master/tools/lowercase_and_remove_accent.py
"""
text = ' '.join(text)
text = text.lower()
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output).lower().split(' ') | Lowercase and strips accents from a piece of text based on https://github.com/facebookresearch/XLM/blob/master/tools/lowercase_and_remove_accent.py |
18,754 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
import logging
import os
import re
import sys
import unicodedata
from io import open
import sacremoses as sm
from .tokenization_utils import PreTrainedTokenizer
from .tokenization_bert import BasicTokenizer
The provided code snippet includes necessary dependencies for implementing the `replace_unicode_punct` function. Write a Python function `def replace_unicode_punct(text)` to solve the following problem:
Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl
Here is the function:
def replace_unicode_punct(text):
'''
Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl
'''
text = text.replace(',', ',')
text = re.sub(r'。\s*', '. ', text)
text = text.replace('、', ',')
text = text.replace('”', '"')
text = text.replace('“', '"')
text = text.replace('∶', ':')
text = text.replace(':', ':')
text = text.replace('?', '?')
text = text.replace('《', '"')
text = text.replace('》', '"')
text = text.replace(')', ')')
text = text.replace('!', '!')
text = text.replace('(', '(')
text = text.replace(';', ';')
text = text.replace('1', '"')
text = text.replace('」', '"')
text = text.replace('「', '"')
text = text.replace('0', '0')
text = text.replace('3', '3')
text = text.replace('2', '2')
text = text.replace('5', '5')
text = text.replace('6', '6')
text = text.replace('9', '9')
text = text.replace('7', '7')
text = text.replace('8', '8')
text = text.replace('4', '4')
text = re.sub(r'.\s*', '. ', text)
text = text.replace('~', '~')
text = text.replace('’', '\'')
text = text.replace('…', '...')
text = text.replace('━', '-')
text = text.replace('〈', '<')
text = text.replace('〉', '>')
text = text.replace('【', '[')
text = text.replace('】', ']')
text = text.replace('%', '%')
return text | Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl |
18,755 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
import logging
import os
import re
import sys
import unicodedata
from io import open
import sacremoses as sm
from .tokenization_utils import PreTrainedTokenizer
from .tokenization_bert import BasicTokenizer
The provided code snippet includes necessary dependencies for implementing the `remove_non_printing_char` function. Write a Python function `def remove_non_printing_char(text)` to solve the following problem:
Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl
Here is the function:
def remove_non_printing_char(text):
'''
Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl
'''
output = []
for char in text:
cat = unicodedata.category(char)
if cat.startswith('C'):
continue
output.append(char)
return "".join(output) | Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl |
18,756 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import json
import logging
import os
import re
import sys
import unicodedata
from io import open
import sacremoses as sm
from .tokenization_utils import PreTrainedTokenizer
from .tokenization_bert import BasicTokenizer
The provided code snippet includes necessary dependencies for implementing the `romanian_preprocessing` function. Write a Python function `def romanian_preprocessing(text)` to solve the following problem:
Sennrich's WMT16 scripts for Romanian preprocessing, used by model `xlm-mlm-enro-1024`
Here is the function:
def romanian_preprocessing(text):
'''Sennrich's WMT16 scripts for Romanian preprocessing, used by model `xlm-mlm-enro-1024`'''
# https://github.com/rsennrich/wmt16-scripts/blob/master/preprocess/normalise-romanian.py
text = text.replace("\u015e", "\u0218").replace("\u015f", "\u0219")
text = text.replace("\u0162", "\u021a").replace("\u0163", "\u021b")
# https://github.com/rsennrich/wmt16-scripts/blob/master/preprocess/remove-diacritics.py
text = text.replace("\u0218", "S").replace("\u0219", "s") #s-comma
text = text.replace("\u021a", "T").replace("\u021b", "t") #t-comma
text = text.replace("\u0102", "A").replace("\u0103", "a")
text = text.replace("\u00C2", "A").replace("\u00E2", "a")
text = text.replace("\u00CE", "I").replace("\u00EE", "i")
return text | Sennrich's WMT16 scripts for Romanian preprocessing, used by model `xlm-mlm-enro-1024` |
18,757 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
import json
import logging
import os
import regex as re
from io import open
from .tokenization_utils import PreTrainedTokenizer
def lru_cache():
return lambda func: func | null |
18,758 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
import json
import logging
import os
import regex as re
from io import open
from .tokenization_utils import PreTrainedTokenizer
The provided code snippet includes necessary dependencies for implementing the `bytes_to_unicode` function. Write a Python function `def bytes_to_unicode()` to solve the following problem:
Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control characters the bpe code barfs on. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
Here is the function:
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a mapping to unicode strings.
We specifically avoids mapping to whitespace/control characters the bpe code barfs on.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
"""
_chr = unichr if sys.version_info[0] == 2 else chr
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [_chr(n) for n in cs]
return dict(zip(bs, cs)) | Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control characters the bpe code barfs on. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. |
18,759 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
import json
import logging
import os
import regex as re
from io import open
from .tokenization_utils import PreTrainedTokenizer
The provided code snippet includes necessary dependencies for implementing the `get_pairs` function. Write a Python function `def get_pairs(word)` to solve the following problem:
Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings).
Here is the function:
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs | Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). |
18,760 | from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
from .tokenization_utils import PreTrainedTokenizer
The provided code snippet includes necessary dependencies for implementing the `load_vocab` function. Write a Python function `def load_vocab(vocab_file)` to solve the following problem:
Loads a vocabulary file into a dictionary.
Here is the function:
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip('\n')
vocab[token] = index
return vocab | Loads a vocabulary file into a dictionary. |
18,761 | from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
from .tokenization_utils import PreTrainedTokenizer
The provided code snippet includes necessary dependencies for implementing the `_is_whitespace` function. Write a Python function `def _is_whitespace(char)` to solve the following problem:
Checks whether `chars` is a whitespace character.
Here is the function:
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False | Checks whether `chars` is a whitespace character. |
18,762 | from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
from .tokenization_utils import PreTrainedTokenizer
The provided code snippet includes necessary dependencies for implementing the `_is_control` function. Write a Python function `def _is_control(char)` to solve the following problem:
Checks whether `chars` is a control character.
Here is the function:
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False | Checks whether `chars` is a control character. |
18,763 | from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
from .tokenization_utils import PreTrainedTokenizer
The provided code snippet includes necessary dependencies for implementing the `_is_punctuation` function. Write a Python function `def _is_punctuation(char)` to solve the following problem:
Checks whether `chars` is a punctuation character.
Here is the function:
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False | Checks whether `chars` is a punctuation character. |
18,764 | def _get_ngrams(n, text):
"""Calcualtes n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
The provided code snippet includes necessary dependencies for implementing the `_get_word_ngrams` function. Write a Python function `def _get_word_ngrams(n, sentences)` to solve the following problem:
Calculates word n-grams for multiple sentences.
Here is the function:
def _get_word_ngrams(n, sentences):
"""Calculates word n-grams for multiple sentences.
"""
assert len(sentences) > 0
assert n > 0
# words = _split_into_words(sentences)
words = sum(sentences, [])
# words = [w for w in words if w not in stopwords]
return _get_ngrams(n, words) | Calculates word n-grams for multiple sentences. |
18,765 | import glob
import json
import os
import random
import re
import subprocess
from collections import Counter
from os.path import join as pjoin
import torch
from others.logging import logger
from others.transformers import BertTokenizer
from others.utils import clean
from prepro.utils import _get_word_ngrams
import argparse
import time
logger = logging.getLogger()
def format_to_qg(args):
#format_to_robert_wiki_query_generation
if (args.dataset != ''):
datasets = [args.dataset]
else:
datasets = ['train']
raw_path = '/home/lcl193798/PreRobertaSummMaro/raw_data/dureader_zhidao_data_10k'
save_path = '/home/lcl193798/PreRobertaSummMaro/bert_data/dureader_zhidao_data_100k'
for corpus_type in datasets:
b_data_dict = {"src": src_subtoken_idxs, "tgt": tgt_subtoken_idxs,
"src_sent_labels": sent_labels, "segs": segments_ids, 'clss': cls_ids,
'src_txt': src_txt, "tgt_txt": tgt_txt, "query_id": new_query_id}
datasets.append(b_data_dict)
break
if len(datasets) == 10000:
break
logger.info('Processed instances %d' % len(datasets))
logger.info('Saving to %s' % save_file)
torch.save(datasets, save_file)
datasets = []
gc.collect() | null |
18,766 | import os
import numpy as np
import torch
from tensorboardX import SummaryWriter
import distributed
from models.reporter_ext import ReportMgr, Statistics
from others.logging import logger
from others.utils import test_rouge, rouge_results_to_str
def _tally_parameters(model):
n_params = sum([p.nelement() for p in model.parameters()])
return n_params
class Trainer(object):
"""
Class that controls the training process.
Args:
model(:py:class:`onmt.models.model.NMTModel`): translation model
to train
train_loss(:obj:`onmt.utils.loss.LossComputeBase`):
training loss computation
valid_loss(:obj:`onmt.utils.loss.LossComputeBase`):
training loss computation
optim(:obj:`onmt.utils.optimizers.Optimizer`):
the optimizer responsible for update
trunc_size(int): length of truncated back propagation through time
shard_size(int): compute loss in shards of this size for efficiency
data_type(string): type of the source input: [text|img|audio]
norm_method(string): normalization methods: [sents|tokens]
grad_accum_count(int): accumulate gradients this many times.
report_manager(:obj:`onmt.utils.ReportMgrBase`):
the object that creates reports, or None
model_saver(:obj:`onmt.models.ModelSaverBase`): the saver is
used to save a checkpoint.
Thus nothing will be saved if this parameter is None
"""
def __init__(self, args, model, optim,
grad_accum_count=1, n_gpu=1, gpu_rank=1,
report_manager=None):
# Basic attributes.
self.args = args
self.save_checkpoint_steps = args.save_checkpoint_steps
self.model = model
self.optim = optim
self.grad_accum_count = grad_accum_count
self.n_gpu = n_gpu
self.gpu_rank = gpu_rank
self.report_manager = report_manager
self.loss = torch.nn.BCELoss(reduction='none')
assert grad_accum_count > 0
# Set model in training mode.
if (model):
self.model.train()
def train(self, train_iter_fct, train_steps, valid_iter_fct=None, valid_steps=-1):
"""
The main training loops.
by iterating over training data (i.e. `train_iter_fct`)
and running validation (i.e. iterating over `valid_iter_fct`
Args:
train_iter_fct(function): a function that returns the train
iterator. e.g. something like
train_iter_fct = lambda: generator(*args, **kwargs)
valid_iter_fct(function): same as train_iter_fct, for valid data
train_steps(int):
valid_steps(int):
save_checkpoint_steps(int):
Return:
None
"""
logger.info('Start training...')
# step = self.optim._step + 1
step = self.optim._step + 1
true_batchs = []
accum = 0
normalization = 0
train_iter = train_iter_fct()
total_stats = Statistics()
report_stats = Statistics()
self._start_report_manager(start_time=total_stats.start_time)
while step <= train_steps:
reduce_counter = 0
for i, batch in enumerate(train_iter):
if self.n_gpu == 0 or (i % self.n_gpu == self.gpu_rank):
true_batchs.append(batch)
normalization += batch.batch_size
accum += 1
if accum == self.grad_accum_count:
reduce_counter += 1
if self.n_gpu > 1:
normalization = sum(distributed
.all_gather_list
(normalization))
self._gradient_accumulation(
true_batchs, normalization, total_stats,
report_stats)
report_stats = self._maybe_report_training(
step, train_steps,
self.optim.learning_rate,
report_stats)
true_batchs = []
accum = 0
normalization = 0
if (step % self.save_checkpoint_steps == 0 and self.gpu_rank == 0):
self._save(step)
step += 1
if step > train_steps:
break
train_iter = train_iter_fct()
return total_stats
def validate(self, valid_iter, step=0):
""" Validate model.
valid_iter: validate data iterator
Returns:
:obj:`nmt.Statistics`: validation loss statistics
"""
# Set model in validating mode.
self.model.eval()
stats = Statistics()
with torch.no_grad():
for batch in valid_iter:
src = batch.src
labels = batch.src_sent_labels
segs = batch.segs
clss = batch.clss
mask = batch.mask_src
mask_cls = batch.mask_cls
sent_scores, mask = self.model(src, segs, clss, mask, mask_cls)
loss = self.loss(sent_scores, labels.float())
loss = (loss * mask.float()).sum()
batch_stats = Statistics(float(loss.cpu().data.numpy()), len(labels))
stats.update(batch_stats)
self._report_step(0, step, valid_stats=stats)
return stats
def test(self, test_iter, step, cal_lead=False, cal_oracle=False):
""" Validate model.
valid_iter: validate data iterator
Returns:
:obj:`nmt.Statistics`: validation loss statistics
"""
# Set model in validating mode.
def _get_ngrams(n, text):
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def _block_tri(c, p):
tri_c = _get_ngrams(3, c.split())
for s in p:
tri_s = _get_ngrams(3, s.split())
if len(tri_c.intersection(tri_s)) > 0:
return True
return False
if (not cal_lead and not cal_oracle):
self.model.eval()
stats = Statistics()
can_path = '%s_step%d.candidate' % (self.args.result_path, step)
gold_path = '%s_step%d.gold' % (self.args.result_path, step)
with open(can_path, 'w') as save_pred:
with open(gold_path, 'w') as save_gold:
with torch.no_grad():
for batch in test_iter:
src = batch.src
labels = batch.src_sent_labels
segs = batch.segs
clss = batch.clss
mask = batch.mask_src
mask_cls = batch.mask_cls
gold = []
pred = []
if (cal_lead):
selected_ids = [list(range(batch.clss.size(1)))] * batch.batch_size
elif (cal_oracle):
selected_ids = [[j for j in range(batch.clss.size(1)) if labels[i][j] == 1] for i in
range(batch.batch_size)]
else:
sent_scores, mask = self.model(src, segs, clss, mask, mask_cls)
loss = self.loss(sent_scores, labels.float())
loss = (loss * mask.float()).sum()
batch_stats = Statistics(float(loss.cpu().data.numpy()), len(labels))
stats.update(batch_stats)
sent_scores = sent_scores + mask.float()
sent_scores = sent_scores.cpu().data.numpy()
selected_ids = np.argsort(-sent_scores, 1)
print (selected_ids)
# selected_ids = np.sort(selected_ids,1)
for i, idx in enumerate(selected_ids):
_pred = []
if (len(batch.src_str[i]) == 0):
continue
for j in selected_ids[i][:len(batch.src_str[i])]:
if (j >= len(batch.src_str[i])):
continue
candidate = batch.src_str[i][j].strip()
if (self.args.block_trigram):
if (not _block_tri(candidate, _pred)):
_pred.append(candidate)
else:
_pred.append(candidate)
if ((not cal_oracle) and (not self.args.recall_eval) and len(_pred) == 3):
break
_pred = '<q>'.join(_pred)
if (self.args.recall_eval):
_pred = ' '.join(_pred.split()[:len(batch.tgt_str[i].split())])
pred.append(_pred)
gold.append(batch.tgt_str[i])
for i in range(len(gold)):
save_gold.write(gold[i].strip() + '\n')
for i in range(len(pred)):
save_pred.write(pred[i].strip() + '\n')
if (step != -1 and self.args.report_rouge):
rouges = test_rouge(self.args.temp_dir, can_path, gold_path)
logger.info('Rouges at step %d \n%s' % (step, rouge_results_to_str(rouges)))
self._report_step(0, step, valid_stats=stats)
return stats
def _gradient_accumulation(self, true_batchs, normalization, total_stats,
report_stats):
if self.grad_accum_count > 1:
self.model.zero_grad()
for batch in true_batchs:
if self.grad_accum_count == 1:
self.model.zero_grad()
src = batch.src
labels = batch.src_sent_labels
segs = batch.segs
clss = batch.clss
mask = batch.mask_src
mask_cls = batch.mask_cls
sent_scores, mask = self.model(src, segs, clss, mask, mask_cls)
loss = self.loss(sent_scores, labels.float())
loss = (loss * mask.float()).sum()
(loss / loss.numel()).backward()
# loss.div(float(normalization)).backward()
batch_stats = Statistics(float(loss.cpu().data.numpy()), normalization)
total_stats.update(batch_stats)
report_stats.update(batch_stats)
# 4. Update the parameters and statistics.
if self.grad_accum_count == 1:
# Multi GPU gradient gather
if self.n_gpu > 1:
grads = [p.grad.data for p in self.model.parameters()
if p.requires_grad
and p.grad is not None]
distributed.all_reduce_and_rescale_tensors(
grads, float(1))
self.optim.step()
# in case of multi step gradient accumulation,
# update only after accum batches
if self.grad_accum_count > 1:
if self.n_gpu > 1:
grads = [p.grad.data for p in self.model.parameters()
if p.requires_grad
and p.grad is not None]
distributed.all_reduce_and_rescale_tensors(
grads, float(1))
self.optim.step()
def _save(self, step):
real_model = self.model
# real_generator = (self.generator.module
# if isinstance(self.generator, torch.nn.DataParallel)
# else self.generator)
model_state_dict = real_model.state_dict()
# generator_state_dict = real_generator.state_dict()
checkpoint = {
'model': model_state_dict,
# 'generator': generator_state_dict,
'opt': self.args,
'optim': self.optim,
}
checkpoint_path = os.path.join(self.args.model_path, 'model_step_%d.pt' % step)
logger.info("Saving checkpoint %s" % checkpoint_path)
# checkpoint_path = '%s_step_%d.pt' % (FLAGS.model_path, step)
if (not os.path.exists(checkpoint_path)):
torch.save(checkpoint, checkpoint_path)
return checkpoint, checkpoint_path
def _start_report_manager(self, start_time=None):
"""
Simple function to start report manager (if any)
"""
if self.report_manager is not None:
if start_time is None:
self.report_manager.start()
else:
self.report_manager.start_time = start_time
def _maybe_gather_stats(self, stat):
"""
Gather statistics in multi-processes cases
Args:
stat(:obj:onmt.utils.Statistics): a Statistics object to gather
or None (it returns None in this case)
Returns:
stat: the updated (or unchanged) stat object
"""
if stat is not None and self.n_gpu > 1:
return Statistics.all_gather_stats(stat)
return stat
def _maybe_report_training(self, step, num_steps, learning_rate,
report_stats):
"""
Simple function to report training stats (if report_manager is set)
see `onmt.utils.ReportManagerBase.report_training` for doc
"""
if self.report_manager is not None:
return self.report_manager.report_training(
step, num_steps, learning_rate, report_stats,
multigpu=self.n_gpu > 1)
def _report_step(self, learning_rate, step, train_stats=None,
valid_stats=None):
"""
Simple function to report stats (if report_manager is set)
see `onmt.utils.ReportManagerBase.report_step` for doc
"""
if self.report_manager is not None:
return self.report_manager.report_step(
learning_rate, step, train_stats=train_stats,
valid_stats=valid_stats)
def _maybe_save(self, step):
"""
Save the model if a model saver is set
"""
if self.model_saver is not None:
self.model_saver.maybe_save(step)
class ReportMgr(ReportMgrBase):
def __init__(self, report_every, start_time=-1., tensorboard_writer=None):
"""
A report manager that writes statistics on standard output as well as
(optionally) TensorBoard
Args:
report_every(int): Report status every this many sentences
tensorboard_writer(:obj:`tensorboard.SummaryWriter`):
The TensorBoard Summary writer to use or None
"""
super(ReportMgr, self).__init__(report_every, start_time)
self.tensorboard_writer = tensorboard_writer
def maybe_log_tensorboard(self, stats, prefix, learning_rate, step):
if self.tensorboard_writer is not None:
stats.log_tensorboard(
prefix, self.tensorboard_writer, learning_rate, step)
def _report_training(self, step, num_steps, learning_rate,
report_stats):
"""
See base class method `ReportMgrBase.report_training`.
"""
report_stats.output(step, num_steps,
learning_rate, self.start_time)
# Log the progress using the number of batches on the x-axis.
self.maybe_log_tensorboard(report_stats,
"progress",
learning_rate,
self.progress_step)
report_stats = Statistics()
return report_stats
def _report_step(self, lr, step, train_stats=None, valid_stats=None):
"""
See base class method `ReportMgrBase.report_step`.
"""
if train_stats is not None:
self.log('Train xent: %g' % train_stats.xent())
self.maybe_log_tensorboard(train_stats,
"train",
lr,
step)
if valid_stats is not None:
self.log('Validation xent: %g at step %d' % (valid_stats.xent(), step))
self.maybe_log_tensorboard(valid_stats,
"valid",
lr,
step)
logger = logging.getLogger()
The provided code snippet includes necessary dependencies for implementing the `build_trainer` function. Write a Python function `def build_trainer(args, device_id, model, optim)` to solve the following problem:
Simplify `Trainer` creation based on user `opt`s* Args: opt (:obj:`Namespace`): user options (usually from argument parsing) model (:obj:`onmt.models.NMTModel`): the model to train fields (dict): dict of fields optim (:obj:`onmt.utils.Optimizer`): optimizer used during training data_type (str): string describing the type of data e.g. "text", "img", "audio" model_saver(:obj:`onmt.models.ModelSaverBase`): the utility object used to save the model
Here is the function:
def build_trainer(args, device_id, model, optim):
"""
Simplify `Trainer` creation based on user `opt`s*
Args:
opt (:obj:`Namespace`): user options (usually from argument parsing)
model (:obj:`onmt.models.NMTModel`): the model to train
fields (dict): dict of fields
optim (:obj:`onmt.utils.Optimizer`): optimizer used during training
data_type (str): string describing the type of data
e.g. "text", "img", "audio"
model_saver(:obj:`onmt.models.ModelSaverBase`): the utility object
used to save the model
"""
grad_accum_count = args.accum_count
n_gpu = args.world_size
if device_id >= 0:
gpu_rank = int(args.gpu_ranks[device_id])
else:
gpu_rank = 0
n_gpu = 0
print('gpu_rank %d' % gpu_rank)
tensorboard_log_dir = args.model_path
writer = SummaryWriter(tensorboard_log_dir, comment="Unmt")
report_manager = ReportMgr(args.report_every, start_time=-1, tensorboard_writer=writer)
trainer = Trainer(args, model, optim, grad_accum_count, n_gpu, gpu_rank, report_manager)
# print(tr)
if (model):
n_params = _tally_parameters(model)
logger.info('* number of parameters: %d' % n_params)
return trainer | Simplify `Trainer` creation based on user `opt`s* Args: opt (:obj:`Namespace`): user options (usually from argument parsing) model (:obj:`onmt.models.NMTModel`): the model to train fields (dict): dict of fields optim (:obj:`onmt.utils.Optimizer`): optimizer used during training data_type (str): string describing the type of data e.g. "text", "img", "audio" model_saver(:obj:`onmt.models.ModelSaverBase`): the utility object used to save the model |
18,767 | import os
import numpy as np
import torch
from tensorboardX import SummaryWriter
import h5py
import distributed
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset, Dataset)
from models.reporter import ReportMgr, Statistics
from others.logging import logger
from others.utils import test_rouge, rouge_results_to_str
def convert_instance_to_feature_hdf5(example):
input_x1 = example[0]
input_x2 = example[1]
feature = dict()
feature['input_x1'] = input_x1
feature['input_x2'] = input_x2
# Roberta
#feature['eos_index'] = [2, 1]
# Bert
feature['eos_index'] = [102, 0]
return feature | null |
18,768 | import os
import numpy as np
import torch
from tensorboardX import SummaryWriter
import h5py
import distributed
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset, Dataset)
from models.reporter import ReportMgr, Statistics
from others.logging import logger
from others.utils import test_rouge, rouge_results_to_str
class Batch(object):
def __init__(self, src, tgt, mask_src, mask_tgt):
self.src = src.to("cuda")
self.tgt = tgt.to("cuda")
self.mask_src = mask_src.to("cuda")
self.mask_tgt = mask_tgt.to("cuda")
def batchify(batch):
eos_index, pad_index = batch[0]["eos_index"]
def batch_sentences(sentences, is_tgt=False, eos_index=eos_index):
lengths = torch.LongTensor([len(s) for s in sentences])
max_len = lengths.max().item()
sent = torch.LongTensor(lengths.size(0), max_len).fill_(pad_index)
for i, s in enumerate(sentences):
sent[i, :len(s)].copy_(torch.from_numpy(s.astype(np.int64)))
return sent, max_len
src, max_src = batch_sentences([feature["input_x1"] for feature in batch])
tgt, max_tgt = batch_sentences([feature["input_x2"] for feature in batch], is_tgt=True)
mask_src = ~(src == pad_index)
mask_tgt = ~(tgt == pad_index)
return Batch(src, tgt, mask_src, mask_tgt) | null |
18,769 | import bisect
import os
import gc
import glob
import random
import torch
from others.logging import logger
def abs_batch_size_fn(new, count):
src, tgt = new[0], new[1]
global max_n_sents, max_n_tokens, max_size
if count == 1:
max_size = 0
max_n_sents=0
max_n_tokens=0
max_n_sents = max(max_n_sents, len(tgt))
#max_n_sents = max(max_n_sents, 10)
max_size = max(max_size, max_n_sents)
src_elements = count * max_size
if (count > 6):
return src_elements + 1e3
return src_elements | null |
18,770 | import bisect
import os
import gc
import glob
import random
import torch
from others.logging import logger
def ext_batch_size_fn(new, count):
if (len(new) == 4):
pass
src, labels = new[0], new[4]
global max_n_sents, max_n_tokens, max_size
if count == 1:
max_size = 0
max_n_sents = 0
max_n_tokens = 0
max_n_sents = max(max_n_sents, len(src))
max_size = max(max_size, max_n_sents)
src_elements = count * max_size
return src_elements | null |
18,771 | from __future__ import print_function
from datetime import datetime
import time
import math
import sys
from distributed import all_gather_list
from others.logging import logger
class ReportMgr(ReportMgrBase):
def __init__(self, report_every, start_time=-1., tensorboard_writer=None):
"""
A report manager that writes statistics on standard output as well as
(optionally) TensorBoard
Args:
report_every(int): Report status every this many sentences
tensorboard_writer(:obj:`tensorboard.SummaryWriter`):
The TensorBoard Summary writer to use or None
"""
super(ReportMgr, self).__init__(report_every, start_time)
self.tensorboard_writer = tensorboard_writer
def maybe_log_tensorboard(self, stats, prefix, learning_rate, step):
if self.tensorboard_writer is not None:
stats.log_tensorboard(
prefix, self.tensorboard_writer, learning_rate, step)
def _report_training(self, step, num_steps, learning_rate,
report_stats):
"""
See base class method `ReportMgrBase.report_training`.
"""
report_stats.output(step, num_steps,
learning_rate, self.start_time)
# Log the progress using the number of batches on the x-axis.
self.maybe_log_tensorboard(report_stats,
"progress",
learning_rate,
step)
report_stats = Statistics()
return report_stats
def _report_step(self, lr, step, train_stats=None, valid_stats=None):
"""
See base class method `ReportMgrBase.report_step`.
"""
if train_stats is not None:
self.log('Train perplexity: %g' % train_stats.ppl())
self.log('Train accuracy: %g' % train_stats.accuracy())
self.maybe_log_tensorboard(train_stats,
"train",
lr,
step)
if valid_stats is not None:
self.log('Validation perplexity: %g' % valid_stats.ppl())
self.log('Validation accuracy: %g' % valid_stats.accuracy())
self.maybe_log_tensorboard(valid_stats,
"valid",
lr,
step)
def build_report_manager(opt):
if opt.tensorboard:
from tensorboardX import SummaryWriter
writer = SummaryWriter(opt.tensorboard_log_dir
+ datetime.now().strftime("/%b-%d_%H-%M-%S"),
comment="Unmt")
else:
writer = None
report_mgr = ReportMgr(opt.report_every, start_time=-1,
tensorboard_writer=writer)
return report_mgr | null |
18,772 | from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.reporter import Statistics
def filter_shard_state(state, shard_size=None):
""" ? """
for k, v in state.items():
if shard_size is None:
yield k, v
if v is not None:
v_split = []
if isinstance(v, torch.Tensor):
for v_chunk in torch.split(v, shard_size):
v_chunk = v_chunk.data.clone()
v_chunk.requires_grad = v.requires_grad
v_split.append(v_chunk)
yield k, (v, v_split)
The provided code snippet includes necessary dependencies for implementing the `shards` function. Write a Python function `def shards(state, shard_size, eval_only=False)` to solve the following problem:
Args: state: A dictionary which corresponds to the output of *LossCompute._make_shard_state(). The values for those keys are Tensor-like or None. shard_size: The maximum size of the shards yielded by the model. eval_only: If True, only yield the state, nothing else. Otherwise, yield shards. Yields: Each yielded shard is a dict. Side effect: After the last shard, this function does back-propagation.
Here is the function:
def shards(state, shard_size, eval_only=False):
"""
Args:
state: A dictionary which corresponds to the output of
*LossCompute._make_shard_state(). The values for
those keys are Tensor-like or None.
shard_size: The maximum size of the shards yielded by the model.
eval_only: If True, only yield the state, nothing else.
Otherwise, yield shards.
Yields:
Each yielded shard is a dict.
Side effect:
After the last shard, this function does back-propagation.
"""
if eval_only:
yield filter_shard_state(state)
else:
# non_none: the subdict of the state dictionary where the values
# are not None.
non_none = dict(filter_shard_state(state, shard_size))
#print ("non_none: ", non_none)
# Now, the iteration:
# state is a dictionary of sequences of tensor-like but we
# want a sequence of dictionaries of tensors.
# First, unzip the dictionary into a sequence of keys and a
# sequence of tensor-like sequences.
keys, values = zip(*((k, [v_chunk for v_chunk in v_split])
for k, (_, v_split) in non_none.items()))
# Now, yield a dictionary for each shard. The keys are always
# the same. values is a sequence of length #keys where each
# element is a sequence of length #shards. We want to iterate
# over the shards, not over the keys: therefore, the values need
# to be re-zipped by shard and then each shard can be paired
# with the keys.
for shard_tensors in zip(*values):
yield dict(zip(keys, shard_tensors))
# Assumed backprop'd
variables = []
for k, (v, v_split) in non_none.items():
if isinstance(v, torch.Tensor) and state[k].requires_grad:
variables.extend(zip(torch.split(state[k], shard_size),
[v_chunk.grad for v_chunk in v_split]))
inputs, grads = zip(*variables)
#print("inputs: ", inputs)
#print("grads: ", grads)
torch.autograd.backward(inputs, grads) | Args: state: A dictionary which corresponds to the output of *LossCompute._make_shard_state(). The values for those keys are Tensor-like or None. shard_size: The maximum size of the shards yielded by the model. eval_only: If True, only yield the state, nothing else. Otherwise, yield shards. Yields: Each yielded shard is a dict. Side effect: After the last shard, this function does back-propagation. |
18,773 | from __future__ import print_function
import sys
import time
from datetime import datetime
from others.logging import logger
class ReportMgr(ReportMgrBase):
def __init__(self, report_every, start_time=-1., tensorboard_writer=None):
"""
A report manager that writes statistics on standard output as well as
(optionally) TensorBoard
Args:
report_every(int): Report status every this many sentences
tensorboard_writer(:obj:`tensorboard.SummaryWriter`):
The TensorBoard Summary writer to use or None
"""
super(ReportMgr, self).__init__(report_every, start_time)
self.tensorboard_writer = tensorboard_writer
def maybe_log_tensorboard(self, stats, prefix, learning_rate, step):
if self.tensorboard_writer is not None:
stats.log_tensorboard(
prefix, self.tensorboard_writer, learning_rate, step)
def _report_training(self, step, num_steps, learning_rate,
report_stats):
"""
See base class method `ReportMgrBase.report_training`.
"""
report_stats.output(step, num_steps,
learning_rate, self.start_time)
# Log the progress using the number of batches on the x-axis.
self.maybe_log_tensorboard(report_stats,
"progress",
learning_rate,
self.progress_step)
report_stats = Statistics()
return report_stats
def _report_step(self, lr, step, train_stats=None, valid_stats=None):
"""
See base class method `ReportMgrBase.report_step`.
"""
if train_stats is not None:
self.log('Train xent: %g' % train_stats.xent())
self.maybe_log_tensorboard(train_stats,
"train",
lr,
step)
if valid_stats is not None:
self.log('Validation xent: %g at step %d' % (valid_stats.xent(), step))
self.maybe_log_tensorboard(valid_stats,
"valid",
lr,
step)
def build_report_manager(opt):
if opt.tensorboard:
from tensorboardX import SummaryWriter
tensorboard_log_dir = opt.tensorboard_log_dir
if not opt.train_from:
tensorboard_log_dir += datetime.now().strftime("/%b-%d_%H-%M-%S")
writer = SummaryWriter(tensorboard_log_dir,
comment="Unmt")
else:
writer = None
report_mgr = ReportMgr(opt.report_every, start_time=-1,
tensorboard_writer=writer)
return report_mgr | null |
18,774 | import torch
import torch.optim as optim
from torch.nn.utils import clip_grad_norm_
def use_gpu(opt):
"""
Creates a boolean if gpu used
"""
return (hasattr(opt, 'gpu_ranks') and len(opt.gpu_ranks) > 0) or \
(hasattr(opt, 'gpu') and opt.gpu > -1)
class Optimizer(object):
"""
Controller class for optimization. Mostly a thin
wrapper for `optim`, but also useful for implementing
rate scheduling beyond what is currently available.
Also implements necessary methods for training RNNs such
as grad manipulations.
Args:
method (:obj:`str`): one of [sgd, adagrad, adadelta, adam]
lr (float): learning rate
lr_decay (float, optional): learning rate decay multiplier
start_decay_steps (int, optional): step to start learning rate decay
beta1, beta2 (float, optional): parameters for adam
adagrad_accum (float, optional): initialization parameter for adagrad
decay_method (str, option): custom decay options
warmup_steps (int, option): parameter for `noam` decay
model_size (int, option): parameter for `noam` decay
We use the default parameters for Adam that are suggested by
the original paper https://arxiv.org/pdf/1412.6980.pdf
These values are also used by other established implementations,
e.g. https://www.tensorflow.org/api_docs/python/tf/train/AdamOptimizer
https://keras.io/optimizers/
Recently there are slightly different values used in the paper
"Attention is all you need"
https://arxiv.org/pdf/1706.03762.pdf, particularly the value beta2=0.98
was used there however, beta2=0.999 is still arguably the more
established value, so we use that here as well
"""
def __init__(self, method, learning_rate, max_grad_norm,
lr_decay=1, start_decay_steps=None, decay_steps=None,
beta1=0.9, beta2=0.999,
adagrad_accum=0.0,
decay_method=None,
warmup_steps=4000, weight_decay=0):
self.last_ppl = None
self.learning_rate = learning_rate
self.original_lr = learning_rate
self.max_grad_norm = max_grad_norm
self.method = method
self.lr_decay = lr_decay
self.start_decay_steps = start_decay_steps
self.decay_steps = decay_steps
self.start_decay = False
self._step = 0
self.betas = [beta1, beta2]
self.adagrad_accum = adagrad_accum
self.decay_method = decay_method
self.warmup_steps = warmup_steps
self.weight_decay = weight_decay
def set_parameters(self, params):
""" ? """
self.params = []
self.sparse_params = []
for k, p in params:
if p.requires_grad:
if self.method != 'sparseadam' or "embed" not in k:
self.params.append(p)
else:
self.sparse_params.append(p)
if self.method == 'sgd':
self.optimizer = optim.SGD(self.params, lr=self.learning_rate)
elif self.method == 'adagrad':
self.optimizer = optim.Adagrad(self.params, lr=self.learning_rate)
for group in self.optimizer.param_groups:
for p in group['params']:
self.optimizer.state[p]['sum'] = self.optimizer\
.state[p]['sum'].fill_(self.adagrad_accum)
elif self.method == 'adadelta':
self.optimizer = optim.Adadelta(self.params, lr=self.learning_rate)
elif self.method == 'adam':
self.optimizer = optim.Adam(self.params, lr=self.learning_rate,
betas=self.betas, eps=1e-9)
else:
raise RuntimeError("Invalid optim method: " + self.method)
def _set_rate(self, learning_rate):
self.learning_rate = learning_rate
if self.method != 'sparseadam':
self.optimizer.param_groups[0]['lr'] = self.learning_rate
else:
for op in self.optimizer.optimizers:
op.param_groups[0]['lr'] = self.learning_rate
def step(self):
"""Update the model parameters based on current gradients.
Optionally, will employ gradient modification or update learning
rate.
"""
self._step += 1
# Decay method used in tensor2tensor.
if self.decay_method == "noam":
self._set_rate(
self.original_lr *
min(self._step ** (-0.5),
self._step * self.warmup_steps**(-1.5)))
else:
if ((self.start_decay_steps is not None) and (
self._step >= self.start_decay_steps)):
self.start_decay = True
if self.start_decay:
if ((self._step - self.start_decay_steps)
% self.decay_steps == 0):
self.learning_rate = self.learning_rate * self.lr_decay
if self.method != 'sparseadam':
self.optimizer.param_groups[0]['lr'] = self.learning_rate
if self.max_grad_norm:
clip_grad_norm_(self.params, self.max_grad_norm)
self.optimizer.step()
The provided code snippet includes necessary dependencies for implementing the `build_optim` function. Write a Python function `def build_optim(model, opt, checkpoint)` to solve the following problem:
Build optimizer
Here is the function:
def build_optim(model, opt, checkpoint):
""" Build optimizer """
saved_optimizer_state_dict = None
if opt.train_from:
optim = checkpoint['optim']
# We need to save a copy of optim.optimizer.state_dict() for setting
# the, optimizer state later on in Stage 2 in this method, since
# the method optim.set_parameters(model.parameters()) will overwrite
# optim.optimizer, and with ith the values stored in
# optim.optimizer.state_dict()
saved_optimizer_state_dict = optim.optimizer.state_dict()
else:
optim = Optimizer(
opt.optim, opt.learning_rate, opt.max_grad_norm,
lr_decay=opt.learning_rate_decay,
start_decay_steps=opt.start_decay_steps,
decay_steps=opt.decay_steps,
beta1=opt.adam_beta1,
beta2=opt.adam_beta2,
adagrad_accum=opt.adagrad_accumulator_init,
decay_method=opt.decay_method,
warmup_steps=opt.warmup_steps)
optim.set_parameters(model.named_parameters())
if opt.train_from:
optim.optimizer.load_state_dict(saved_optimizer_state_dict)
if use_gpu(opt):
for state in optim.optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = v.cuda()
if (optim.method == 'adam') and (len(optim.optimizer.state) < 1):
raise RuntimeError(
"Error: loaded Adam optimizer from existing model" +
" but optimizer state is empty")
return optim | Build optimizer |
18,775 | from __future__ import print_function
import codecs
import torch.nn as nn
import torch.nn.functional as F
import subprocess
import os
import math
import json
import torch
from tensorboardX import SummaryWriter
from others.utils import rouge_results_to_str, test_rouge, tile
from translate.beam import GNMTGlobalScorer
def top_k_top_p_filtering(logits, top_k=10, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1):
if top_k > 0:
top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1)) # Safety check
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p < 1.0:
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
# Remove tokens with cumulative probability above the threshold (token with 0 are kept)
sorted_indices_to_remove = cumulative_probs > top_p
if min_tokens_to_keep > 1:
# Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
sorted_indices_to_remove[..., :min_tokens_to_keep] = 0
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# scatter sorted tensors to original indexing
indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove)
logits[indices_to_remove] = filter_value
return logits | null |
18,776 | import copy
import torch
import torch.nn as nn
from others.transformers import BertModel, BertConfig
from others.transformers import RobertaModel, RobertaConfig
from torch.nn.init import xavier_uniform_
from models.decoder import TransformerDecoder
from models.encoder import Classifier, ExtTransformerEncoder
from models.optimizers import Optimizer
def get_generator(vocab_size, dec_hidden_size, device):
gen_func = nn.LogSoftmax(dim=-1)
generator = nn.Sequential(
nn.Linear(dec_hidden_size, vocab_size),
gen_func
)
generator.to(device)
return generator | null |
18,777 | import math
import torch
import torch.nn as nn
import torch
import torch.nn as nn
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `aeq` function. Write a Python function `def aeq(*args)` to solve the following problem:
Assert all arguments have the same value
Here is the function:
def aeq(*args):
"""
Assert all arguments have the same value
"""
arguments = (arg for arg in args)
first = next(arguments)
assert all(arg == first for arg in arguments), \
"Not all arguments have the same value: " + str(args) | Assert all arguments have the same value |
18,778 | import math
import torch
import torch.nn as nn
import torch
import torch.nn as nn
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `sequence_mask` function. Write a Python function `def sequence_mask(lengths, max_len=None)` to solve the following problem:
Creates a boolean mask from sequence lengths.
Here is the function:
def sequence_mask(lengths, max_len=None):
"""
Creates a boolean mask from sequence lengths.
"""
batch_size = lengths.numel()
max_len = max_len or lengths.max()
return (torch.arange(0, max_len)
.type_as(lengths)
.repeat(batch_size, 1)
.lt(lengths.unsqueeze(1))) | Creates a boolean mask from sequence lengths. |
18,779 | import math
import torch
import torch.nn as nn
import torch
import torch.nn as nn
import torch.nn.functional as F
def gelu(x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) | null |
18,780 | import torch
import torch.nn as nn
import numpy as np
from models.encoder import PositionalEncoding
from models.neural import MultiHeadedAttention, PositionwiseFeedForward, DecoderState
class LearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
Padding ids are ignored by either offsetting based on padding_idx
or by setting padding_idx to None and ensuring that the appropriate
position ids are passed to the forward function.
"""
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: int,
):
super().__init__(num_embeddings, embedding_dim, padding_idx)
self.onnx_trace = False
def forward(self, input, incremental_state=None, positions=None):
"""Input is expected to be of size [bsz x seqlen]."""
assert (
(positions is None) or (self.padding_idx is None)
), "If positions is pre-computed then padding_idx should not be set."
if positions is None:
if incremental_state is not None:
# positions is the same for every token when decoding a single step
# Without the int() cast, it doesn't work in some cases when exporting to ONNX
positions = input.data.new(1, 1).fill_(int(self.padding_idx + input.size(1)))
else:
positions = utils.make_positions(
input, self.padding_idx, onnx_trace=self.onnx_trace,
)
return super().forward(positions)
def max_positions(self):
"""Maximum number of supported positions."""
if self.padding_idx is not None:
return self.num_embeddings - self.padding_idx - 1
else:
return self.num_embeddings
def PositionalEmbedding(
num_embeddings: int,
embedding_dim: int,
padding_idx: int,
learned: bool = False,
):
if learned:
# if padding_idx is specified then offset the embedding ids by
# this index and adjust num_embeddings appropriately
# TODO: The right place for this offset would be inside
# LearnedPositionalEmbedding. Move this there for a cleaner implementation.
if padding_idx is not None:
num_embeddings = num_embeddings + padding_idx + 1
m = LearnedPositionalEmbedding(num_embeddings, embedding_dim, padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim ** -0.5)
if padding_idx is not None:
nn.init.constant_(m.weight[padding_idx], 0)
else:
m = SinusoidalPositionalEmbedding(
embedding_dim, padding_idx, init_size=num_embeddings + padding_idx + 1,
)
return m | null |
18,781 | from __future__ import absolute_import, division, print_function
import csv
import os
import textwrap
import numpy as np
import six
import datasets
def _mnli_split_generator(name, data_dir, split, matched):
return datasets.SplitGenerator(
name=name,
gen_kwargs={
"data_file": os.path.join(data_dir, "%s_%s.tsv" % (split, "matched" if matched else "mismatched")),
"split": split,
"mrpc_files": None,
},
) | null |
18,782 | import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
from datasets import load_dataset, load_metric
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
PretrainedConfig,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
from ChildTuningD import ChildTuningDtrainer
from ChildTuningF import ChildTuningFtrainer
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the
# sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named
# label if at least two columns are provided.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.task_name is not None:
# Downloading and loading a dataset from the hub.
datasets = load_dataset("glue.py", data_args.task_name)
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
data_files = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
train_extension = data_args.train_file.split(".")[-1]
test_extension = data_args.test_file.split(".")[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
data_files["test"] = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`.")
for key in data_files.keys():
logger.info(f"load a local file for {key}: {data_files[key]}")
if data_args.train_file.endswith(".csv"):
# Loading a dataset from local csv files
datasets = load_dataset("csv", data_files=data_files)
else:
# Loading a dataset from local json files
datasets = load_dataset("json", data_files=data_files)
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
if data_args.task_name is not None:
is_regression = data_args.task_name == "stsb"
if not is_regression:
label_list = datasets["train"].features["label"].names
num_labels = len(label_list)
else:
num_labels = 1
else:
# Trying to have good defaults here, don't hesitate to tweak to your needs.
is_regression = datasets["train"].features["label"].dtype in ["float32", "float64"]
if is_regression:
num_labels = 1
else:
# A useful fast method:
# https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique
label_list = datasets["train"].unique("label")
label_list.sort() # Let's sort it for determinism
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# Preprocessing the datasets
if data_args.task_name is not None:
sentence1_key, sentence2_key = task_to_keys[data_args.task_name]
else:
# Again, we try to have some nice defaults but don't hesitate to tweak to your use case.
non_label_column_names = [name for name in datasets["train"].column_names if name != "label"]
if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names:
sentence1_key, sentence2_key = "sentence1", "sentence2"
else:
if len(non_label_column_names) >= 2:
sentence1_key, sentence2_key = non_label_column_names[:2]
else:
sentence1_key, sentence2_key = non_label_column_names[0], None
# Padding strategy
if data_args.pad_to_max_length:
padding = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
padding = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
label_to_id = None
if (
model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id
and data_args.task_name is not None
and not is_regression
):
# Some have all caps in their config, some don't.
label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()}
if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)):
label_to_id = {i: label_name_to_id[label_list[i]] for i in range(num_labels)}
else:
logger.warn(
"Your model seems to have been trained with labels, but they don't match the dataset: ",
f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}."
"\nIgnoring the model labels as a result.",
)
elif data_args.task_name is None and not is_regression:
label_to_id = {v: i for i, v in enumerate(label_list)}
def preprocess_function(examples):
# Tokenize the texts
args = (
(examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
)
result = tokenizer(*args, padding=padding, max_length=data_args.max_seq_length, truncation=True)
# Map labels to IDs (not necessary for GLUE tasks)
if label_to_id is not None and "label" in examples:
result["label"] = [label_to_id[l] for l in examples["label"]]
return result
datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache)
train_dataset = datasets["train"]
eval_dataset = datasets["validation_matched" if data_args.task_name == "mnli" else "validation"]
if data_args.task_name is not None or data_args.test_file is not None:
test_dataset = datasets["test_matched" if data_args.task_name == "mnli" else "test"]
# Log a few random samples from the training set:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# Get the metric function
if data_args.task_name is not None:
metric = load_metric("metric.py", data_args.task_name)
# TODO: When datasets metrics include regular accuracy, make an else here and remove special branch from
# compute_metrics
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(p: EvalPrediction):
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1)
if data_args.task_name is not None:
result = metric.compute(predictions=preds, references=p.label_ids)
if len(result) > 1:
result["combined_score"] = np.mean(list(result.values())).item()
return result
elif is_regression:
return {"mse": ((preds - p.label_ids) ** 2).mean().item()}
else:
return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
data_collator = default_data_collator
elif training_args.fp16:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
else:
data_collator = None
# Initialize our Trainer
assert model_args.mode in ['ChildTuning-F', 'ChildTuning-D', None]
if model_args.mode is None:
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset if training_args.do_eval else None,
compute_metrics=compute_metrics,
tokenizer=tokenizer,
data_collator=data_collator,
)
else:
if model_args.mode == 'ChildTuning-F':
trainer_cls = ChildTuningFtrainer
elif model_args.mode == 'ChildTuning-D':
trainer_cls = ChildTuningDtrainer
trainer = trainer_cls(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset if training_args.do_eval else None,
compute_metrics=compute_metrics,
tokenizer=tokenizer,
data_collator=data_collator,
reserve_p=model_args.reserve_p,
mode=model_args.mode
)
# Training
if training_args.do_train:
if last_checkpoint is not None:
checkpoint = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path):
checkpoint = model_args.model_name_or_path
else:
checkpoint = None
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
trainer.save_model() # Saves the tokenizer too for easy upload
output_train_file = os.path.join(training_args.output_dir, "train_results.txt")
if trainer.is_world_process_zero():
with open(output_train_file, "w") as writer:
logger.info("***** Train results *****")
for key, value in sorted(metrics.items()):
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json"))
# Evaluation
eval_results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
tasks = [data_args.task_name]
eval_datasets = [eval_dataset]
if data_args.task_name == "mnli":
tasks.append("mnli-mm")
eval_datasets.append(datasets["validation_mismatched"])
for eval_dataset, task in zip(eval_datasets, tasks):
eval_result = trainer.evaluate(eval_dataset=eval_dataset)
output_eval_file = os.path.join(training_args.output_dir, f"eval_results_{task}.txt")
if trainer.is_world_process_zero():
with open(output_eval_file, "w") as writer:
logger.info(f"***** Eval results {task} *****")
for key, value in sorted(eval_result.items()):
logger.info(f" {key} = {value}")
writer.write(f"{key} = {value}\n")
eval_results.update(eval_result)
if training_args.do_predict:
logger.info("*** Test ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
tasks = [data_args.task_name]
test_datasets = [test_dataset]
if data_args.task_name == "mnli":
tasks.append("mnli-mm")
test_datasets.append(datasets["test_mismatched"])
for test_dataset, task in zip(test_datasets, tasks):
# Removing the `label` columns because it contains -1 and Trainer won't like that.
test_dataset.remove_columns_("label")
predictions = trainer.predict(test_dataset=test_dataset).predictions
predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1)
output_test_file = os.path.join(training_args.output_dir, f"test_results_{task}.txt")
if trainer.is_world_process_zero():
with open(output_test_file, "w") as writer:
logger.info(f"***** Test results {task} *****")
writer.write("index\tprediction\n")
for index, item in enumerate(predictions):
if is_regression:
writer.write(f"{index}\t{item:3.3f}\n")
else:
item = label_list[item]
writer.write(f"{index}\t{item}\n")
return eval_results
def _mp_fn(index):
# For xla_spawn (TPUs)
main() | null |
18,783 | from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import f1_score, matthews_corrcoef
import datasets
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {
"accuracy": acc,
"f1": f1,
} | null |
18,784 | from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import f1_score, matthews_corrcoef
import datasets
def pearson_and_spearman(preds, labels):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
} | null |
18,785 | import argparse
import os
import ruamel_yaml as yaml
import language_evaluation
from torch.autograd import Variable
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from models.model_caption_mplug import MPLUG
from models.vit import interpolate_pos_embed, resize_pos_embed
from models.tokenization_bert import BertTokenizer
import utils
from dataset.utils import save_result
from dataset import create_dataset, create_sampler, create_loader, coco_collate_fn
from scheduler import create_scheduler
from optim import create_optimizer, create_two_optimizer
import language_evaluation.coco_caption_py3.pycocoevalcap as evaluation_tools
import multiprocessing
import itertools
def train(model, data_loader, optimizer, tokenizer, epoch, warmup_steps, device, scheduler, config, do_amp=False,
do_two_optim=False, do_accum=False, accum_steps=1):
def evaluation(model, data_loader, tokenizer, device, config, test_submit=False):
def cal_metric(result_file):
def save_result(result, result_dir, filename, is_json=True, is_list=True, remove_duplicate=""):
def train_scst(model, data_loader, test_loader, optimizer, tokenizer, epoch, warmup_steps, device, scheduler, config, do_amp=False,
do_two_optim=False, do_accum=False, accum_steps=1):
# train
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
if do_two_optim:
metric_logger.add_meter('lr1', utils.SmoothedValue(window_size=50, fmt='{value:.7f}'))
metric_logger.add_meter('lr2', utils.SmoothedValue(window_size=50, fmt='{value:.7f}'))
else:
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.7f}'))
metric_logger.add_meter('loss', utils.SmoothedValue(window_size=1, fmt='{value:.4f}'))
header = 'Train Epoch: [{}]'.format(epoch)
print_freq = 50
step_size = 100
warmup_iterations = warmup_steps * step_size
beam_size=args.beam_size
tokenizer_pool=multiprocessing.Pool()
best_cider = 0.0
for i, (image, caption, object_labels, image_ids, gold_caption) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
image = image.to(device,non_blocking=True)
caption = [each+config['eos'] for each in caption]
question_input = [config['bos']]*len(caption)
caption = tokenizer(caption, padding='longest', truncation=True, max_length=args.max_input_length, return_tensors="pt").to(device)
question_input = tokenizer(question_input, padding='longest', truncation=True, max_length=args.max_input_length, return_tensors="pt").to(device)
topk_ids, topk_probs = model(image, question_input, caption, train=True,out_size=beam_size,scst=True)
probs_list=[]
for item in topk_probs:
probs_list.append(torch.stack(item,dim=0))
topk_probs_tensor=torch.stack(probs_list,dim=0)
caps_gen=[]
topk_words=[]
for img_item in topk_ids:
words=[]
for item in img_item:
caps_gen.append(tokenizer.decode(item).replace("[SEP]", "").replace("[CLS]", "").replace("[PAD]", "").strip())
words.append(item.numel())
topk_words.append(words)
topk_words_tensor = torch.Tensor(topk_words).cuda()
caps_gt = gold_caption
caps_gt = list(itertools.chain(*([c, ] * beam_size for c in caps_gt)))
caps_gen,caps_gt = tokenizer_pool.map(evaluation_tools.PTBTokenizer.tokenize,[caps_gen,caps_gt])
reward=evaluation_tools.compute_ciders(caps_gt,caps_gen)[1].astype(np.float32)
reward = torch.from_numpy(reward).cuda().view(image.shape[0], beam_size)
reward_baseline = torch.mean(reward, -1, keepdim=True)
loss = - (topk_probs_tensor/topk_words_tensor) * (reward-reward_baseline)
loss = loss.mean()
#loss.requires_grad_(True)
#loss = Variable(loss, requires_grad = True)
loss.backward()
optimizer.step()
optimizer.zero_grad()
metric_logger.update(loss=loss.item())
if do_two_optim:
metric_logger.update(lr1=optimizer.param_groups[0]["lr"])
metric_logger.update(lr2=optimizer.param_groups[2]["lr"])
else:
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
if epoch == 0 and i % step_size == 0 and i <= warmup_iterations:
scheduler.step(i // step_size)
del image, question_input,caption,loss
if i > 0 and i % args.eval_steps == 0:
vqa_result = evaluation(model, test_loader, tokenizer, device, config)
result_file = save_result(vqa_result, args.result_dir, 'vqa_result_%d' % i)
model.eval()
if utils.is_main_process():
result = cal_metric(result_file)
print('*'*100)
print(type(result))
print(result)
with open(os.path.join(args.output_dir, "log.txt"), "a") as f:
f.write(json.dumps({'Starting_Training':result}) + "\n")
if result["CIDEr"]*100 > best_cider:
best_cider = result["CIDEr"]*100
torch.save({
'model': model.module.state_dict(),
'optimizer': optimizer.state_dict(),
'config': config,
'epoch': i,
}, os.path.join(args.output_dir, 'checkpoint_best.pth'))
dist.barrier()
model.train()
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger.global_avg())
return {k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()} | null |
18,786 | import argparse
import os
import ruamel_yaml as yaml
import language_evaluation
from torch.autograd import Variable
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from models.model_caption_mplug import MPLUG
from models.vit import interpolate_pos_embed, resize_pos_embed
from models.tokenization_bert import BertTokenizer
import utils
from dataset.utils import save_result
from dataset import create_dataset, create_sampler, create_loader, coco_collate_fn
from scheduler import create_scheduler
from optim import create_optimizer, create_two_optimizer
import language_evaluation.coco_caption_py3.pycocoevalcap as evaluation_tools
import multiprocessing
import itertools
def cal_metric(result_file):
result_list = json.load(open(result_file, "r"))
predicts = []
answers = []
for each in result_list:
predicts.append(each["pred_caption"])
answers.append(each["gold_caption"])
evaluator = language_evaluation.CocoEvaluator(verbose=False)
results = evaluator.run_evaluation(predicts, answers)
print (len(result_list), results)
return results
def evaluate(model, data_loader, dataset, tokenizer, device, config):
# test
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Evaluation:'
print_freq = 50
predicts = []
answers = []
answer_input = None
for n, (image, caption, image_ids, gold_caption) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
image = image.to(device,non_blocking=True)
caption = [each+config['eos'] for each in caption]
question_input = [config['bos']]*len(caption)
caption = tokenizer(caption, padding='longest', truncation=True, max_length=args.max_input_length, return_tensors="pt").to(device)
question_input = tokenizer(question_input, padding='longest', truncation=True, max_length=args.max_input_length, return_tensors="pt").to(device)
for i in range(len(gold_caption)):
predicts.append(gold_caption[i][0])
answers.append(gold_caption[i])
print (predicts, answers)
#{'Bleu_1': 0.9999999999863945, 'Bleu_2': 0.9999999999859791, 'Bleu_3': 0.9999999999854866, 'Bleu_4': 0.999999999984889, 'METEOR': 1.0, 'ROUGE_L': 1.0, 'CIDEr': 2.7246232035629268, 'SPICE': 0.40389416048620613}
result = cal_metric(predicts, answers)
metric_logger.meters['Bleu_1'].update(result["Bleu_1"], n=image.size(0))
metric_logger.meters['Bleu_2'].update(result["Bleu_1"], n=image.size(0))
metric_logger.meters['Bleu_3'].update(result["Bleu_1"], n=image.size(0))
metric_logger.meters['Bleu_4'].update(result["Bleu_1"], n=image.size(0))
metric_logger.meters['Bleu_1'].update(result["Bleu_1"], n=image.size(0))
# gather the stats from all processes
torch.cuda.empty_cache()
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger.global_avg())
return {k: "{:.4f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()} | null |
18,787 | import numpy as np
import io
import os
import time
from collections import defaultdict, deque
import datetime
import torch
import torch.distributed as dist
def compute_acc(logits, label, reduction='mean'):
ret = (torch.argmax(logits, dim=1) == label).float()
if reduction == 'none':
return ret.detach()
elif reduction == 'mean':
return ret.mean().item() | null |
18,788 | import numpy as np
import io
import os
import time
from collections import defaultdict, deque
import datetime
import torch
import torch.distributed as dist
def compute_n_params(model, return_str=True):
tot = 0
for p in model.parameters():
w = 1
for x in p.shape:
w *= x
tot += w
if return_str:
if tot >= 1e6:
return '{:.1f}M'.format(tot / 1e6)
else:
return '{:.1f}K'.format(tot / 1e3)
else:
return tot | null |
18,789 | import numpy as np
import io
import os
import time
from collections import defaultdict, deque
import datetime
import torch
import torch.distributed as dist
def is_main_process():
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs) | null |
18,790 | import numpy as np
import io
import os
import time
from collections import defaultdict, deque
import datetime
import torch
import torch.distributed as dist
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0) | null |
18,791 | import argparse
import os
import ruamel_yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
from models.tokenization_bert import BertTokenizer
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from torch.utils.data import DataLoader
from models.model_retrieval_mplug import MPLUG
from models.vit import interpolate_pos_embed, resize_pos_embed
import utils
from dataset.video_dataset import VideoDataset
def evaluation(model, data_loader, tokenizer, device, config):
# test
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Evaluation:'
print('Computing features for evaluation...')
start_time = time.time()
texts = data_loader.dataset.text
num_text = len(texts)
text_bs = 256
text_feats = []
text_embeds = []
text_atts = []
for i in range(0, num_text, text_bs):
text = texts[i: min(num_text, i + text_bs)]
text_input = tokenizer(text, padding='max_length', truncation=True, max_length=35, return_tensors="pt").to(
device)
text_output = model.text_encoder(text_input.input_ids, attention_mask=text_input.attention_mask)
text_feat = text_output.last_hidden_state
text_embed = F.normalize(model.text_proj(text_output.last_hidden_state[:, 0, :]))
text_embeds.append(text_embed)
text_feats.append(text_feat)
text_atts.append(text_input.attention_mask)
text_embeds = torch.cat(text_embeds, dim=0)
text_feats = torch.cat(text_feats, dim=0)
text_atts = torch.cat(text_atts, dim=0)
video_feats = []
video_embeds = []
for video, video_id in data_loader:
B, N, C, W, H = video.size()
video = video.view(-1, C, W, H)
video = video.to(device, non_blocking=True)
video_feat = model.visual_encoder.visual(video, skip_last_layer=True)
video_feat = model.visn_layer_norm(model.visn_fc(video_feat))
video_embed = model.vision_proj(video_feat[:, 0, :])
video_embed = video_embed.view(B, N, -1).mean(dim=1)
video_embed = F.normalize(video_embed, dim=-1)
video_feat = video_feat.view(B, -1, video_feat.shape[-1])
video_feats.append(video_feat.cpu())
video_embeds.append(video_embed)
video_feats = torch.cat(video_feats, dim=0)
video_embeds = torch.cat(video_embeds, dim=0)
sims_matrix = video_embeds @ text_embeds.t()
score_matrix_v2t = torch.full((len(texts), len(texts)), -100.0).to(device)
num_tasks = utils.get_world_size()
rank = utils.get_rank()
step = sims_matrix.size(0) // num_tasks + 1
start = rank * step
end = min(sims_matrix.size(0), start + step)
for i, sims in enumerate(metric_logger.log_every(sims_matrix[start:end], 50, header)):
topk_sim, topk_idx = sims.topk(k=config['k_test'], dim=0)
encoder_output = video_feats[start + i].repeat(config['k_test'], 1, 1).to(device, non_blocking=True)
encoder_att = torch.ones(encoder_output.size()[:-1], dtype=torch.long).to(device, non_blocking=True)
_, output = model.fusion_encoder(encoder_embeds=text_feats[topk_idx],
attention_mask=text_atts[topk_idx],
encoder_hidden_states=encoder_output,
encoder_attention_mask=encoder_att,
return_dict=False,
)
score = model.itm_head(output[:, 0, :])[:, 1]
score_matrix_v2t[start + i, topk_idx] = score + topk_sim
sims_matrix = sims_matrix.t()
score_matrix_t2v = torch.full((len(texts), len(texts)), -100.0).to(device)
step = sims_matrix.size(0) // num_tasks + 1
start = rank * step
end = min(sims_matrix.size(0), start + step)
for i, sims in enumerate(metric_logger.log_every(sims_matrix[start:end], 50, header)):
topk_sim, topk_idx = sims.topk(k=config['k_test'], dim=0)
encoder_output = video_feats[topk_idx].to(device, non_blocking=True)
encoder_att = torch.ones(encoder_output.size()[:-1], dtype=torch.long).to(device, non_blocking=True)
_, output = model.fusion_encoder(encoder_embeds=text_feats[start + i].repeat(config['k_test'], 1, 1),
attention_mask=text_atts[start + i].repeat(config['k_test'], 1),
encoder_hidden_states=encoder_output,
encoder_attention_mask=encoder_att,
return_dict=False,
)
score = model.itm_head(output[:, 0, :])[:, 1]
score_matrix_t2v[start + i, topk_idx] = score + topk_sim
if args.distributed:
dist.barrier()
torch.distributed.all_reduce(score_matrix_v2t, op=torch.distributed.ReduceOp.SUM)
torch.distributed.all_reduce(score_matrix_t2v, op=torch.distributed.ReduceOp.SUM)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Evaluation time {}'.format(total_time_str))
return score_matrix_v2t.cpu().numpy(), score_matrix_t2v.cpu().numpy() | null |
18,792 | import argparse
import os
import ruamel_yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
from models.tokenization_bert import BertTokenizer
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from torch.utils.data import DataLoader
from models.model_retrieval_mplug import MPLUG
from models.vit import interpolate_pos_embed, resize_pos_embed
import utils
from dataset.video_dataset import VideoDataset
def itm_eval(scores_v2t, scores_t2v, txt2vmg, vid2txt):
# Video->Text
ranks = np.zeros(scores_v2t.shape[0])
for index, score in enumerate(scores_v2t):
inds = np.argsort(score)[::-1]
ranks[index] = np.where(inds == vid2txt[index])[0][0]
# Compute metrics
tr1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
tr5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
tr10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
# Text->Video
ranks = np.zeros(scores_t2v.shape[0])
for index, score in enumerate(scores_t2v):
inds = np.argsort(score)[::-1]
ranks[index] = np.where(inds == txt2vmg[index])[0][0]
mdR = np.median(ranks + 1)
# Compute metrics
vr1 = 100.0 * len(np.where(ranks < 1)[0]) / len(ranks)
vr5 = 100.0 * len(np.where(ranks < 5)[0]) / len(ranks)
vr10 = 100.0 * len(np.where(ranks < 10)[0]) / len(ranks)
tr_mean = (tr1 + tr5 + tr10) / 3
vr_mean = (vr1 + vr5 + vr10) / 3
r_mean = (tr_mean + vr_mean) / 2
eval_result = {'txt_r1': tr1,
'txt_r5': tr5,
'txt_r10': tr10,
'txt_r_mean': tr_mean,
'vid_r1': vr1,
'vid_r5': vr5,
'vid_r10': vr10,
'vid_r_mean': vr_mean,
'vid_mdR': mdR,
'r_mean': r_mean}
return eval_result | null |
18,793 | import cv2
import random, math
import numpy as np
from collections import Iterable
import torch.nn.functional as F
from torch.autograd import Variable
def letterbox(img, mask, height, color=(123.7, 116.3, 103.5)): # resize a rectangular image to a padded square
shape = img.shape[:2] # shape = [height, width]
ratio = float(height) / max(shape) # ratio = old / new
new_shape = (round(shape[1] * ratio), round(shape[0] * ratio))
dw = (height - new_shape[0]) / 2 # width padding
dh = (height - new_shape[1]) / 2 # height padding
top, bottom = round(dh - 0.1), round(dh + 0.1)
left, right = round(dw - 0.1), round(dw + 0.1)
img = cv2.resize(img, new_shape, interpolation=cv2.INTER_AREA) # resized, no border
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # padded square
if mask is not None:
mask = cv2.resize(mask, new_shape, interpolation=cv2.INTER_NEAREST) # resized, no border
# print(top, bottom, left, right)
# input()
mask = cv2.copyMakeBorder(mask, top, bottom, left, right, cv2.BORDER_CONSTANT, value=1) # padded square
# print(mask)
return img, mask, ratio, dw, dh | null |
18,794 | import cv2
import random, math
import numpy as np
from collections import Iterable
import torch.nn.functional as F
from torch.autograd import Variable
def wrap_points(targets, M, height, a):
# n = targets.shape[0]
# points = targets[:, 1:5].copy()
points = targets.copy()
# area0 = (points[:, 2] - points[:, 0]) * (points[:, 3] - points[:, 1])
area0 = (points[2] - points[0]) * (points[3] - points[1])
# warp points
xy = np.ones((4, 3))
xy[:, :2] = points[[0, 1, 2, 3, 0, 3, 2, 1]].reshape(4, 2) # x1y1, x2y2, x1y2, x2y1
xy = (xy @ M.T)[:, :2].reshape(1, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, 1).T
# apply angle-based reduction
radians = a * math.pi / 180
reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
x = (xy[:, 2] + xy[:, 0]) / 2
y = (xy[:, 3] + xy[:, 1]) / 2
w = (xy[:, 2] - xy[:, 0]) * reduction
h = (xy[:, 3] - xy[:, 1]) * reduction
xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, 1).T
# reject warped points outside of image
np.clip(xy, 0, height, out=xy)
w = xy[:, 2] - xy[:, 0]
h = xy[:, 3] - xy[:, 1]
area = w * h
ar = np.maximum(w / (h + 1e-16), h / (w + 1e-16))
i = (w > 4) & (h > 4) & (area / (area0 + 1e-16) > 0.1) & (ar < 10)
## print(targets, xy)
## [ 56 36 108 210] [[ 47.80464857 15.6096533 106.30993434 196.71267693]]
# targets = targets[i]
# targets[:, 1:5] = xy[i]
targets = xy[0]
return targets
def random_affine(img, mask, targets, degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-2, 2),
borderValue=(123.7, 116.3, 103.5), all_bbox=None):
border = 0 # width of added border (optional)
height = max(img.shape[0], img.shape[1]) + border * 2
# Rotation and Scale
R = np.eye(3)
a = random.random() * (degrees[1] - degrees[0]) + degrees[0]
# a += random.choice([-180, -90, 0, 90]) # 90deg rotations added to small rotations
s = random.random() * (scale[1] - scale[0]) + scale[0]
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(img.shape[1] / 2, img.shape[0] / 2), scale=s)
# Translation
T = np.eye(3)
T[0, 2] = (random.random() * 2 - 1) * translate[0] * img.shape[0] + border # x translation (pixels)
T[1, 2] = (random.random() * 2 - 1) * translate[1] * img.shape[1] + border # y translation (pixels)
# Shear
S = np.eye(3)
S[0, 1] = math.tan((random.random() * (shear[1] - shear[0]) + shear[0]) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan((random.random() * (shear[1] - shear[0]) + shear[0]) * math.pi / 180) # y shear (deg)
M = S @ T @ R # Combined rotation matrix. ORDER IS IMPORTANT HERE!!
imw = cv2.warpPerspective(img, M, dsize=(height, height), flags=cv2.INTER_LINEAR,
borderValue=borderValue) # BGR order borderValue
if mask is not None:
maskw = cv2.warpPerspective(mask, M, dsize=(height, height), flags=cv2.INTER_NEAREST,
borderValue=1) # BGR order borderValue
else:
maskw = None
# Return warped points also
if type(targets)==type([1]):
targetlist=[]
for bbox in targets:
targetlist.append(wrap_points(bbox, M, height, a))
return imw, maskw, targetlist, M
elif all_bbox is not None:
targets = wrap_points(targets, M, height, a)
for ii in range(all_bbox.shape[0]):
all_bbox[ii,:] = wrap_points(all_bbox[ii,:], M, height, a)
return imw, maskw, targets, all_bbox, M
elif targets is not None: ## previous main
targets = wrap_points(targets, M, height, a)
return imw, maskw, targets, M
else:
return imw | null |
18,795 | import torch
import numpy as np
import torch.nn.functional as F
from utils.box_utils import bbox_iou, xywh2xyxy, xyxy2xywh, generalized_box_iou
from utils.misc import get_world_size
from icecream import ic
from matplotlib import pyplot as plt
def xywh2xyxy(x):
x_c, y_c, w, h = x.unbind(-1)
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
(x_c + 0.5 * w), (y_c + 0.5 * h)]
return torch.stack(b, dim=-1)
def generalized_box_iou(boxes1, boxes2):
"""
Generalized IoU from https://giou.stanford.edu/
The boxes should be in [x0, y0, x1, y1] format
Returns a [N, M] pairwise matrix, where N = len(boxes1)
and M = len(boxes2)
"""
# degenerate boxes gives inf / nan results
# so do an early check
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
iou, union = box_iou(boxes1, boxes2)
lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
wh = (rb - lt).clamp(min=0) # [N,M,2]
area = wh[:, :, 0] * wh[:, :, 1]
return iou - (area - union) / (area + 1e-5)
The provided code snippet includes necessary dependencies for implementing the `vg_loss` function. Write a Python function `def vg_loss(batch_pred, batch_target)` to solve the following problem:
Compute the losses related to the bounding boxes, including the L1 regression loss and the GIoU loss
Here is the function:
def vg_loss(batch_pred, batch_target):
"""Compute the losses related to the bounding boxes,
including the L1 regression loss and the GIoU loss
"""
batch_size = batch_pred.shape[0]
# world_size = get_world_size()
num_boxes = batch_size
loss_bbox = F.l1_loss(batch_pred, batch_target, reduction='none')
loss_giou = 1 - torch.diag(generalized_box_iou(
xywh2xyxy(batch_pred),
xywh2xyxy(batch_target)
))
losses = {}
losses['loss_bbox'] = loss_bbox.sum() / num_boxes
losses['loss_giou'] = loss_giou.sum() / num_boxes
return losses | Compute the losses related to the bounding boxes, including the L1 regression loss and the GIoU loss |
18,796 | import torch
from torchvision.ops.boxes import box_area
def xyxy2xywh(x):
x0, y0, x1, y1 = x.unbind(-1)
b = [(x0 + x1) / 2.0, (y0 + y1) / 2.0,
(x1 - x0), (y1 - y0)]
return torch.stack(b, dim=-1) | null |
18,797 | import os
import subprocess
import time
from collections import defaultdict, deque
import datetime
import pickle
from typing import Optional, List
import torch
import torch.distributed as dist
from torch import Tensor
import torchvision
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
import math
The provided code snippet includes necessary dependencies for implementing the `all_gather` function. Write a Python function `def all_gather(data)` to solve the following problem:
Run all_gather on arbitrary picklable data (not necessarily tensors) Args: data: any picklable object Returns: list[data]: list of data gathered from each rank
Here is the function:
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.tensor([tensor.numel()], device="cuda")
size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda"))
if local_size != max_size:
padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list | Run all_gather on arbitrary picklable data (not necessarily tensors) Args: data: any picklable object Returns: list[data]: list of data gathered from each rank |
18,798 | import os
import subprocess
import time
from collections import defaultdict, deque
import datetime
import pickle
from typing import Optional, List
import torch
import torch.distributed as dist
from torch import Tensor
import torchvision
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
import math
The provided code snippet includes necessary dependencies for implementing the `reduce_dict` function. Write a Python function `def reduce_dict(input_dict, average=True)` to solve the following problem:
Args: input_dict (dict): all the values will be reduced average (bool): whether to do average or sum Reduce the values in the dictionary from all processes so that all processes have the averaged results. Returns a dict with the same fields as input_dict, after reduction.
Here is the function:
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict | Args: input_dict (dict): all the values will be reduced average (bool): whether to do average or sum Reduce the values in the dictionary from all processes so that all processes have the averaged results. Returns a dict with the same fields as input_dict, after reduction. |
18,799 | import os
import subprocess
import time
from collections import defaultdict, deque
import datetime
import pickle
from typing import Optional, List
import torch
import torch.distributed as dist
from torch import Tensor
import torchvision
import math
def get_sha():
cwd = os.path.dirname(os.path.abspath(__file__))
def _run(command):
return subprocess.check_output(command, cwd=cwd).decode('ascii').strip()
sha = 'N/A'
diff = "clean"
branch = 'N/A'
try:
sha = _run(['git', 'rev-parse', 'HEAD'])
subprocess.check_output(['git', 'diff'], cwd=cwd)
diff = _run(['git', 'diff-index', 'HEAD'])
diff = "has uncommited changes" if diff else "clean"
branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
except Exception:
pass
message = f"sha: {sha}, status: {diff}, branch: {branch}"
return message | null |
18,800 | import os
import subprocess
import time
from collections import defaultdict, deque
import datetime
import pickle
from typing import Optional, List
import torch
import torch.distributed as dist
from torch import Tensor
import torchvision
def _max_by_axis(the_list):
# type: (List[List[int]]) -> List[int]
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
class NestedTensor(object):
def __init__(self, tensors, mask: Optional[Tensor]):
self.tensors = tensors
self.mask = mask
def to(self, device):
# type: (Device) -> NestedTensor # noqa
cast_tensor = self.tensors.to(device)
mask = self.mask
if mask is not None:
assert mask is not None
cast_mask = mask.to(device)
else:
cast_mask = None
return NestedTensor(cast_tensor, cast_mask)
def decompose(self):
return self.tensors, self.mask
def __repr__(self):
return str(self.tensors)
def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor:
max_size = []
for i in range(tensor_list[0].dim()):
max_size_i = torch.max(torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)).to(torch.int64)
max_size.append(max_size_i)
max_size = tuple(max_size)
# work around for
# pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
# m[: img.shape[1], :img.shape[2]] = False
# which is not yet supported in onnx
padded_imgs = []
padded_masks = []
for img in tensor_list:
padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]
padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))
padded_imgs.append(padded_img)
m = torch.zeros_like(img[0], dtype=torch.int, device=img.device)
padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1)
padded_masks.append(padded_mask.to(torch.bool))
tensor = torch.stack(padded_imgs)
mask = torch.stack(padded_masks)
return NestedTensor(tensor, mask=mask)
import math
def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
# TODO make this more general
if tensor_list[0].ndim == 3:
if torchvision._is_tracing():
# nested_tensor_from_tensor_list() does not export well to ONNX
# call _onnx_nested_tensor_from_tensor_list() instead
return _onnx_nested_tensor_from_tensor_list(tensor_list)
# TODO make it support different-sized images
max_size = _max_by_axis([list(img.shape) for img in tensor_list])
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
batch_shape = [len(tensor_list)] + max_size
b, c, h, w = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
for img, pad_img, m in zip(tensor_list, tensor, mask):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
m[: img.shape[1], :img.shape[2]] = False
else:
raise ValueError('not supported')
return NestedTensor(tensor, mask) | null |
18,801 | import os
import subprocess
import time
from collections import defaultdict, deque
import datetime
import pickle
from typing import Optional, List
import torch
import torch.distributed as dist
from torch import Tensor
import torchvision
def is_main_process():
import math
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs) | null |
18,802 | import os
import subprocess
import time
from collections import defaultdict, deque
import datetime
import pickle
from typing import Optional, List
import torch
import torch.distributed as dist
from torch import Tensor
import torchvision
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
import math
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0) | null |
18,803 | import os
import subprocess
import time
from collections import defaultdict, deque
import datetime
import pickle
from typing import Optional, List
import torch
import torch.distributed as dist
from torch import Tensor
import torchvision
import math
The provided code snippet includes necessary dependencies for implementing the `interpolate` function. Write a Python function `def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None)` to solve the following problem:
Equivalent to nn.functional.interpolate, but with support for empty batch sizes. This will eventually be supported natively by PyTorch, and this class can go away.
Here is the function:
def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None):
# type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor
"""
Equivalent to nn.functional.interpolate, but with support for empty batch sizes.
This will eventually be supported natively by PyTorch, and this
class can go away.
"""
return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners) | Equivalent to nn.functional.interpolate, but with support for empty batch sizes. This will eventually be supported natively by PyTorch, and this class can go away. |
18,804 | import os
import subprocess
import time
from collections import defaultdict, deque
import datetime
import pickle
from typing import Optional, List
import torch
import torch.distributed as dist
from torch import Tensor
import torchvision
import math
def get_warmup_cosin_scheduler(optimizer,warmup_ep,all_ep,delta_min,delta_max,enable_list=None):
enable_lambda=lambda cur_ep: cur_ep/warmup_ep if cur_ep < warmup_ep else (delta_min + 0.5*(delta_max-delta_min)*(1.0+math.cos( (cur_ep-warmup_ep)/(all_ep-warmup_ep)*math.pi)))
disable_lambda = lambda cur_ep: 1
if enable_list==None:
lr_lambda=enable_lambda
else:
lr_lambda=[enable_lambda if _ else disable_lambda for _ in enable_list]
scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)
return scheduler | null |
18,805 | import argparse
import os
import ruamel_yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import utils as public_utils
from torch.utils.data import DataLoader
from dataset.grounding_dataset import NestedTensor, collate_fn, collate_fn_val
from models.model_grounding_mplug import MPLUG
from models.vit import interpolate_pos_embed, resize_pos_embed
from models.tokenization_bert import BertTokenizer
from vgTools.utils import misc as utils
from dataset.utils import save_result
from dataset import create_dataset, create_sampler, create_loader
from scheduler import create_scheduler
from optim import create_optimizer, create_two_optimizer
from vgTools.utils import eval_utils
from icecream import ic
from pdb import set_trace as breakpoint
def evaluate(model, data_loader, tokenizer, device):
model.eval()
pred_box_list = []
gt_box_list = []
from tqdm import tqdm
for _, batch in enumerate(tqdm(data_loader)):
img_data, text_data, target,raw_data = batch
# copy to GPU
img_data = img_data.to(device)
text_data = text_data.to(device)
target = target.to(device)
pred_res = model.module(img_data, text_data,{})
pred_boxes=pred_res
pred_box_list.append(pred_boxes.cpu())
gt_box_list.append(target.cpu())
pred_boxes = torch.cat(pred_box_list, dim=0)
gt_boxes = torch.cat(gt_box_list, dim=0)
total_num = gt_boxes.shape[0]
accu_num = eval_utils.trans_vg_eval_test(pred_boxes, gt_boxes)
result_tensor = torch.tensor([accu_num, total_num]).to(device)
torch.cuda.synchronize()
dist.all_reduce(result_tensor)
accuracy = float(result_tensor[0]) / float(result_tensor[1])
return accuracy
def resize_pos_embed(posemb, posemb_new):
# Rescale the grid of position embeddings when loading from state_dict. Adapted from
# https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
ntok_new = posemb_new.shape[1]
if True:
posemb_tok, posemb_grid = posemb[:, :1], posemb[0, 1:]
ntok_new -= 1
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(math.sqrt(len(posemb_grid)))
gs_new = int(math.sqrt(ntok_new))
#_logger.info('Position embedding grid-size from %s to %s', gs_old, gs_new)
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(posemb_grid, size=(gs_new, gs_new), mode='bilinear')
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new * gs_new, -1)
posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
posemb = torch.squeeze(posemb, dim=0)
return posemb
def load_checkpoint(model,checkpoint_path,args,config):
if isinstance(model,torch.nn.parallel.DistributedDataParallel):
model=model.module
checkpoint = torch.load(checkpoint_path, map_location='cpu')
state_dict = checkpoint['model']
tmp = {}
for key in state_dict.keys():
if '_m.' in key:
continue
if 'text_encoder.bert' in key[:len('text_encoder.bert')]:
encoder_key = key.replace('bert.', '')
tmp[encoder_key] = state_dict[key]
elif 'fusion_encoder.fusion' in key:
encoder_key = key.replace('fusion.', '')
tmp[encoder_key]=state_dict[key]
else:
tmp[key]=state_dict[key]
state_dict = tmp
# reshape positional embedding to accomodate for image resolution change
vit_rate = 16*16 if '16' in config['clip_name'] else 14*14
num_patches = int(config["image_res"] * config["image_res"]/vit_rate)
pos_embed = nn.Parameter(torch.zeros(num_patches + 1, config['vision_width']).float())
pos_embed = resize_pos_embed(state_dict['visual_encoder.visual.positional_embedding'].unsqueeze(0),
pos_embed.unsqueeze(0))
state_dict['visual_encoder.visual.positional_embedding'] = pos_embed
if not args.evaluate:
if config['distill']:
num_patches = int(config["image_res"] * config["image_res"] / vit_rate)
pos_embed = nn.Parameter(torch.zeros(num_patches + 1, config['vision_width']).float())
msg = model.load_state_dict(state_dict, strict=False)
print('load checkpoint from %s' % checkpoint_path)
print(msg) | null |
18,806 | import argparse
import os
import ruamel_yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import utils as public_utils
from torch.utils.data import DataLoader
from dataset.grounding_dataset import NestedTensor, collate_fn, collate_fn_val
from models.model_grounding_mplug import MPLUG
from models.vit import interpolate_pos_embed, resize_pos_embed
from models.tokenization_bert import BertTokenizer
from vgTools.utils import misc as utils
from dataset.utils import save_result
from dataset import create_dataset, create_sampler, create_loader
from scheduler import create_scheduler
from optim import create_optimizer, create_two_optimizer
from vgTools.utils import eval_utils
from icecream import ic
from pdb import set_trace as breakpoint
def train(model, data_loader, optimizer, tokenizer, epoch, warmup_steps, device, scheduler, config, do_two_optim=False,do_amp=False):
accum_steps=config.get('accum_steps',1)
# train
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
if do_two_optim:
metric_logger.add_meter('lr1', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
metric_logger.add_meter('lr2', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
else:
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
metric_logger.add_meter('loss_seq', utils.SmoothedValue(window_size=1, fmt='{value:.4f}'))
header = 'Train Epoch: [{}]'.format(epoch)
print_freq = 50
step_size = 100
warmup_iterations = warmup_steps*step_size
for i,batch in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
img_data, text_data, target = batch
# copy to GPU
img_data = img_data.to(device)
text_data = text_data.to(device)
target = target.to(device)
if epoch>0 or not config['warm_up']:
alpha = config['alpha']
else:
alpha = config['alpha']*min(1,i/len(data_loader))
loss_dict = model(img_data, text_data,{'targets':target})
loss = sum(loss_dict[k] for k in loss_dict.keys())
optimizer.zero_grad()
if do_amp:
from apex import amp
with amp.scale_loss(loss, optimizer) as scaled_loss:
# logger.info('scaled loss: {}'.format(str(scaled_loss)))
scaled_loss.backward()
else:
loss.backward()
if (i + 1) % accum_steps == 0:
optimizer.step()
optimizer.zero_grad()
metric_logger.update(loss_seq=loss_dict['loss_seq'].item())
if do_two_optim:
metric_logger.update(lr1=optimizer.param_groups[0]["lr"])
metric_logger.update(lr2=optimizer.param_groups[2]["lr"])
else:
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
if epoch==0 and i%step_size==0 and i<=warmup_iterations:
scheduler.step(i//step_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()} | null |
18,807 | import argparse
import os
import ruamel_yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import utils as public_utils
from torch.utils.data import DataLoader
from dataset.grounding_dataset import NestedTensor, collate_fn, collate_fn_val
from models.model_grounding_mplug import MPLUG
from models.vit import interpolate_pos_embed, resize_pos_embed
from models.tokenization_bert import BertTokenizer
from vgTools.utils import misc as utils
from dataset.utils import save_result
from dataset import create_dataset, create_sampler, create_loader
from scheduler import create_scheduler
from optim import create_optimizer, create_two_optimizer
from vgTools.utils import eval_utils
from icecream import ic
from pdb import set_trace as breakpoint
def val(model, data_loader, tokenizer, device):
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Eval:'
for batch in metric_logger.log_every(data_loader, 10, header):
img_data, text_data, target,raw_data = batch
batch_size = img_data.tensors.size(0)
# copy to GPU
img_data = img_data.to(device)
text_data = text_data.to(device)
target = target.to(device)
pred_res = model(img_data, text_data,{})
pred_boxes=pred_res
miou, accu = eval_utils.trans_vg_eval_val(pred_boxes, target)
metric_logger.update_v2('miou', torch.mean(miou), batch_size)
metric_logger.update_v2('accu', accu, batch_size)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()}
return stats | null |
18,808 | import argparse
import os
import ruamel_yaml as yaml
import language_evaluation
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from models.model_caption_mplug import MPLUG
from models.vit import interpolate_pos_embed, resize_pos_embed
from models.tokenization_bert import BertTokenizer
import utils
from dataset.utils import save_result
from dataset import create_dataset, create_sampler, create_loader, coco_collate_fn
from scheduler import create_scheduler
from optim import create_optimizer, create_two_optimizer
def train(model, data_loader, optimizer, tokenizer, epoch, warmup_steps, device, scheduler, config, do_amp=False,
do_two_optim=False, do_accum=False, accum_steps=1):
# train
model.train()
metric_logger = utils.MetricLogger(delimiter=" ")
if do_two_optim:
metric_logger.add_meter('lr1', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
metric_logger.add_meter('lr2', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
else:
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))
metric_logger.add_meter('loss', utils.SmoothedValue(window_size=1, fmt='{value:.4f}'))
header = 'Train Epoch: [{}]'.format(epoch)
print_freq = 50
step_size = 100
warmup_iterations = warmup_steps * step_size
for i, (image, caption, object_labels, image_ids, gold_caption) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
image = image.to(device, non_blocking=True)
if config['prompt'] != "":
caption = [config['prompt'] + each+config['eos'] for each in caption]
else:
caption = [each+config['eos'] for each in caption]
question_input = [config['bos']+" "+each for each in object_labels]
if i == 0:
print (question_input)
caption = tokenizer(caption, padding='longest', truncation=True, max_length=args.max_input_length, return_tensors="pt").to(device)
question_input = tokenizer(question_input, padding='longest', truncation=True, max_length=args.max_input_length, return_tensors="pt").to(device)
# question_input = caption.input_ids[0,0].repeat(caption.input_ids.size(0), 1)
if epoch > 0 or not config['warm_up']:
alpha = config['alpha']
else:
alpha = config['alpha'] * min(1, i / len(data_loader))
loss = model(image, question_input, caption, train=True)
if accum_steps > 1:
loss = loss / accum_steps
if do_amp:
from apex import amp
with amp.scale_loss(loss, optimizer) as scaled_loss:
# logger.info('scaled loss: {}'.format(str(scaled_loss)))
scaled_loss.backward()
else:
loss.backward()
if (i + 1) % accum_steps == 0:
optimizer.step()
optimizer.zero_grad()
metric_logger.update(loss=loss.item())
if do_two_optim:
metric_logger.update(lr1=optimizer.param_groups[0]["lr"])
metric_logger.update(lr2=optimizer.param_groups[2]["lr"])
else:
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
if epoch == 0 and i % step_size == 0 and i <= warmup_iterations:
scheduler.step(i // step_size)
del image, question_input,caption,loss
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger.global_avg())
return {k: "{:.3f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()}
def evaluation(model, data_loader, tokenizer, device, config):
# test
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Generate VQA test result:'
print_freq = 50
result = []
answer_input = None
for n, (image, caption, object_labels, image_ids, gold_caption) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
image = image.to(device,non_blocking=True)
caption = [each+config['eos'] for each in caption]
question_input = [config['bos']+" "+each for each in object_labels]
caption = tokenizer(caption, padding='longest', truncation=True, max_length=args.max_input_length, return_tensors="pt").to(device)
question_input = tokenizer(question_input, padding='longest', truncation=True, max_length=args.max_input_length, return_tensors="pt").to(device)
topk_ids, topk_probs = model(image, question_input, caption, train=False)
for image_id, topk_id, topk_prob, gold_caption_list in zip(image_ids, topk_ids, topk_probs, gold_caption):
ans = tokenizer.decode(topk_id[0]).replace("[SEP]", "").replace("[CLS]", "").replace("[PAD]", "").strip()
result.append({"question_id":image_id, "pred_caption":ans, "gold_caption":gold_caption_list})
return result | null |
18,809 | import argparse
import os
import ruamel_yaml as yaml
import language_evaluation
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from models.model_caption_mplug import MPLUG
from models.vit import interpolate_pos_embed, resize_pos_embed
from models.tokenization_bert import BertTokenizer
import utils
from dataset.utils import save_result
from dataset import create_dataset, create_sampler, create_loader, coco_collate_fn
from scheduler import create_scheduler
from optim import create_optimizer, create_two_optimizer
def cal_metric(result_file):
def evaluate(model, data_loader, dataset, tokenizer, device, config):
# test
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Evaluation:'
print_freq = 50
predicts = []
answers = []
answer_input = None
for n, (image, caption, image_ids, gold_caption) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
image = image.to(device,non_blocking=True)
caption = [each+config['eos'] for each in caption]
question_input = [config['bos']]*len(caption)
caption = tokenizer(caption, padding='longest', truncation=True, max_length=args.max_input_length, return_tensors="pt").to(device)
question_input = tokenizer(question_input, padding='longest', truncation=True, max_length=args.max_input_length, return_tensors="pt").to(device)
for i in range(len(gold_caption)):
predicts.append(gold_caption[i][0])
answers.append(gold_caption[i])
#{'Bleu_1': 0.9999999999863945, 'Bleu_2': 0.9999999999859791, 'Bleu_3': 0.9999999999854866, 'Bleu_4': 0.999999999984889, 'METEOR': 1.0, 'ROUGE_L': 1.0, 'CIDEr': 2.7246232035629268, 'SPICE': 0.40389416048620613}
result = cal_metric(predicts, answers)
metric_logger.meters['Bleu_1'].update(result["Bleu_1"], n=image.size(0))
metric_logger.meters['Bleu_2'].update(result["Bleu_1"], n=image.size(0))
metric_logger.meters['Bleu_3'].update(result["Bleu_1"], n=image.size(0))
metric_logger.meters['Bleu_4'].update(result["Bleu_1"], n=image.size(0))
metric_logger.meters['Bleu_1'].update(result["Bleu_1"], n=image.size(0))
# gather the stats from all processes
torch.cuda.empty_cache()
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger.global_avg())
return {k: "{:.4f}".format(meter.global_avg) for k, meter in metric_logger.meters.items()} | null |
18,810 | import argparse
import sys
import os
import ruamel_yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from models.model_caption_mplug_vatex import MPLUG
from models.vit import interpolate_pos_embed, resize_pos_embed
from models.tokenization_bert import BertTokenizer
import utils
from dataset.utils import save_result
from dataset import create_dataset, create_sampler, create_loader, nocaps_collate_fn
from scheduler import create_scheduler
from optim import create_optimizer, create_two_optimizer
import language_evaluation
def evaluation(model, data_loader, tokenizer, device, config, test_submit=False):
# test
model.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Generate Vatex Cap test result:'
print_freq = 2
result = []
answer_input = None
for n, (video, video_ids) in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
if config['prompt'] != "":
caption = [config['prompt'] + config['eos']] * video.size(0)
caption = tokenizer(caption, padding='longest', truncation=True, max_length=args.max_input_length,
return_tensors="pt").to(device)
else:
caption = None
# print (caption.input_ids.size())
# image = image.to(device,non_blocking=True)
topk_ids, topk_probs = model(video, caption, None, train=False, device=device)
for image_id, topk_id, topk_prob in zip(video_ids, topk_ids, topk_probs):
ans = tokenizer.decode(topk_id[0]).replace("[SEP]", "").replace("[CLS]", "").replace("[PAD]", "").strip()
ans += ' .'
if test_submit:
# print (image_id, int(image_id.replace(".jpg", "").split("_")[-1]))
result.append({image_id: ans})
else:
result.append({"question_id": image_id, "pred_caption": ans, "gold_caption": gold_caption_list})
return result | null |
18,811 | import argparse
import sys
import os
import ruamel_yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from models.model_caption_mplug_vatex import MPLUG
from models.vit import interpolate_pos_embed, resize_pos_embed
from models.tokenization_bert import BertTokenizer
import utils
from dataset.utils import save_result
from dataset import create_dataset, create_sampler, create_loader, nocaps_collate_fn
from scheduler import create_scheduler
from optim import create_optimizer, create_two_optimizer
import language_evaluation
def cal_metric(result_file):
result_list = json.load(open(result_file, "r"))
predicts = []
answers = []
for each in result_list:
predicts.append(each["pred_caption"])
answers.append(each["gold_caption"])
evaluator = language_evaluation.CocoEvaluator(verbose=False)
results = evaluator.run_evaluation(predicts, answers)
print(len(result_list), results)
return results | null |
18,812 | import argparse
import sys
import os
import ruamel_yaml as yaml
import numpy as np
import random
import time
import datetime
import json
from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
import torch.distributed as dist
from models.model_caption_mplug_vatex import MPLUG
from models.vit import interpolate_pos_embed, resize_pos_embed
from models.tokenization_bert import BertTokenizer
import utils
from dataset.utils import save_result
from dataset import create_dataset, create_sampler, create_loader, nocaps_collate_fn
from scheduler import create_scheduler
from optim import create_optimizer, create_two_optimizer
import language_evaluation
def proces_res_file(submission_path, ref_path):
submission = json.load(open(submission_path))
ref_data = json.load(open(ref_path))
vid_refs_list = []
for item in ref_data:
vid = item['videoID']
ref_caps = item['enCap']
pred_cap = submission[vid]
vid_item_data = {'vid': vid,
'pred_caption': pred_cap,
'gold_captoin': ref_caps}
vid_refs_list.append(vid_item_data)
return vid_refs_list | null |
18,813 | from .cosine_lr import CosineLRScheduler
from .tanh_lr import TanhLRScheduler
from .step_lr import StepLRScheduler
from .plateau_lr import PlateauLRScheduler
class CosineLRScheduler(Scheduler):
def __init__(self,
optimizer: torch.optim.Optimizer,
t_initial: int,
t_mul: float = 1.,
lr_min: float = 0.,
decay_rate: float = 1.,
warmup_t=0,
warmup_lr_init=0,
warmup_prefix=True,
cycle_limit=0,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True) -> None:
def _get_lr(self, t):
def get_epoch_values(self, epoch: int):
def get_update_values(self, num_updates: int):
def get_cycle_length(self, cycles=0):
class TanhLRScheduler(Scheduler):
def __init__(self,
optimizer: torch.optim.Optimizer,
t_initial: int,
lb: float = -6.,
ub: float = 4.,
t_mul: float = 1.,
lr_min: float = 0.,
decay_rate: float = 1.,
warmup_t=0,
warmup_lr_init=0,
warmup_prefix=False,
cycle_limit=0,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True) -> None:
def _get_lr(self, t):
def get_epoch_values(self, epoch: int):
def get_update_values(self, num_updates: int):
def get_cycle_length(self, cycles=0):
class StepLRScheduler(Scheduler):
def __init__(self,
optimizer: torch.optim.Optimizer,
decay_t: float,
decay_rate: float = 1.,
warmup_t=0,
warmup_lr_init=0,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True,
) -> None:
def _get_lr(self, t):
def get_epoch_values(self, epoch: int):
def get_update_values(self, num_updates: int):
class PlateauLRScheduler(Scheduler):
def __init__(self,
optimizer,
decay_rate=0.1,
patience_t=10,
verbose=True,
threshold=1e-4,
cooldown_t=0,
warmup_t=0,
warmup_lr_init=0,
lr_min=0,
mode='max',
noise_range_t=None,
noise_type='normal',
noise_pct=0.67,
noise_std=1.0,
noise_seed=None,
initialize=True,
):
def state_dict(self):
def load_state_dict(self, state_dict):
def step(self, epoch, metric=None):
def _apply_noise(self, epoch):
def create_scheduler(args, optimizer):
num_epochs = args.epochs
if getattr(args, 'lr_noise', None) is not None:
lr_noise = getattr(args, 'lr_noise')
if isinstance(lr_noise, (list, tuple)):
noise_range = [n * num_epochs for n in lr_noise]
if len(noise_range) == 1:
noise_range = noise_range[0]
else:
noise_range = lr_noise * num_epochs
else:
noise_range = None
lr_scheduler = None
if args.sched == 'cosine':
lr_scheduler = CosineLRScheduler(
optimizer,
t_initial=num_epochs,
t_mul=getattr(args, 'lr_cycle_mul', 1.),
lr_min=args.min_lr,
decay_rate=args.decay_rate,
warmup_lr_init=args.warmup_lr,
warmup_t=args.warmup_epochs,
cycle_limit=getattr(args, 'lr_cycle_limit', 1),
t_in_epochs=True,
noise_range_t=noise_range,
noise_pct=getattr(args, 'lr_noise_pct', 0.67),
noise_std=getattr(args, 'lr_noise_std', 1.),
noise_seed=getattr(args, 'seed', 42),
)
num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs
elif args.sched == 'tanh':
lr_scheduler = TanhLRScheduler(
optimizer,
t_initial=num_epochs,
t_mul=getattr(args, 'lr_cycle_mul', 1.),
lr_min=args.min_lr,
warmup_lr_init=args.warmup_lr,
warmup_t=args.warmup_epochs,
cycle_limit=getattr(args, 'lr_cycle_limit', 1),
t_in_epochs=True,
noise_range_t=noise_range,
noise_pct=getattr(args, 'lr_noise_pct', 0.67),
noise_std=getattr(args, 'lr_noise_std', 1.),
noise_seed=getattr(args, 'seed', 42),
)
num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs
elif args.sched == 'step':
lr_scheduler = StepLRScheduler(
optimizer,
decay_t=args.decay_epochs,
decay_rate=args.decay_rate,
warmup_lr_init=args.warmup_lr,
warmup_t=args.warmup_epochs,
noise_range_t=noise_range,
noise_pct=getattr(args, 'lr_noise_pct', 0.67),
noise_std=getattr(args, 'lr_noise_std', 1.),
noise_seed=getattr(args, 'seed', 42),
)
elif args.sched == 'plateau':
mode = 'min' if 'loss' in getattr(args, 'eval_metric', '') else 'max'
lr_scheduler = PlateauLRScheduler(
optimizer,
decay_rate=args.decay_rate,
patience_t=args.patience_epochs,
lr_min=args.min_lr,
mode=mode,
warmup_lr_init=args.warmup_lr,
warmup_t=args.warmup_epochs,
cooldown_t=0,
noise_range_t=noise_range,
noise_pct=getattr(args, 'lr_noise_pct', 0.67),
noise_std=getattr(args, 'lr_noise_std', 1.),
noise_seed=getattr(args, 'seed', 42),
)
return lr_scheduler, num_epochs | null |
18,814 | import json
import numpy as np
import time
import logging
import os
import random
from torch.utils.data import Dataset
from PIL import Image
from PIL import ImageFile
import oss2
from io import BytesIO
from dataset.utils import pre_caption
def decode_int32(ann):
ann = str(ann)
server = str(int(ann[-1]) + 1)
id_ = "0"*(9-len(ann[:-1]))+ann[:-1]
assert len(id_) == 9
ann = server+"/"+id_
return ann | null |
18,815 | import re
from vqaTools.vqaEval import VQAEval
import json
import os
import numpy as np
import torch
import torch.distributed as dist
import torch.nn.functional as F
import utils
from tqdm import tqdm
def pre_question(question,max_ques_words):
question = re.sub(
r"([,.'!?\"()*#:;~])",
'',
question.lower(),
).replace('-', ' ').replace('/', ' ')
question = question.rstrip(' ')
#truncate question
question_words = question.split(' ')
if len(question_words)>max_ques_words:
question = ' '.join(question_words[:max_ques_words])
return question | null |
18,816 | import re
from vqaTools.vqaEval import VQAEval
import json
import os
import numpy as np
import torch
import torch.distributed as dist
import torch.nn.functional as F
import utils
from tqdm import tqdm
def pre_caption(caption,max_words):
caption = re.sub(
r"([,.'!?\"()*#:;~])",
'',
caption.lower(),
).replace('-', ' ').replace('/', ' ').replace('<person>', 'person')
caption = re.sub(
r"\s{2,}",
' ',
caption,
)
caption = caption.rstrip('\n')
caption = caption.strip(' ')
#truncate caption
caption_words = caption.split(' ')
if len(caption_words)>max_words:
caption = ' '.join(caption_words[:max_words])
return caption | null |
18,817 | import re
from vqaTools.vqaEval import VQAEval
import json
import os
import numpy as np
import torch
import torch.distributed as dist
import torch.nn.functional as F
import utils
from tqdm import tqdm
class VQAEval:
def __init__(self, vqa, vqaRes, n=2):
self.n = n
self.accuracy = {}
self.evalQA = {}
self.evalQuesType = {}
self.evalAnsType = {}
self.vqa = vqa
self.vqaRes = vqaRes
self.params = {'question_id': vqa.getQuesIds()}
self.contractions = {"aint": "ain't", "arent": "aren't", "cant": "can't", "couldve": "could've", "couldnt": "couldn't",
"couldn'tve": "couldn't've", "couldnt've": "couldn't've", "didnt": "didn't", "doesnt": "doesn't", "dont": "don't", "hadnt": "hadn't",
"hadnt've": "hadn't've", "hadn'tve": "hadn't've", "hasnt": "hasn't", "havent": "haven't", "hed": "he'd", "hed've": "he'd've",
"he'dve": "he'd've", "hes": "he's", "howd": "how'd", "howll": "how'll", "hows": "how's", "Id've": "I'd've", "I'dve": "I'd've",
"Im": "I'm", "Ive": "I've", "isnt": "isn't", "itd": "it'd", "itd've": "it'd've", "it'dve": "it'd've", "itll": "it'll", "let's": "let's",
"maam": "ma'am", "mightnt": "mightn't", "mightnt've": "mightn't've", "mightn'tve": "mightn't've", "mightve": "might've",
"mustnt": "mustn't", "mustve": "must've", "neednt": "needn't", "notve": "not've", "oclock": "o'clock", "oughtnt": "oughtn't",
"ow's'at": "'ow's'at", "'ows'at": "'ow's'at", "'ow'sat": "'ow's'at", "shant": "shan't", "shed've": "she'd've", "she'dve": "she'd've",
"she's": "she's", "shouldve": "should've", "shouldnt": "shouldn't", "shouldnt've": "shouldn't've", "shouldn'tve": "shouldn't've",
"somebody'd": "somebodyd", "somebodyd've": "somebody'd've", "somebody'dve": "somebody'd've", "somebodyll": "somebody'll",
"somebodys": "somebody's", "someoned": "someone'd", "someoned've": "someone'd've", "someone'dve": "someone'd've",
"someonell": "someone'll", "someones": "someone's", "somethingd": "something'd", "somethingd've": "something'd've",
"something'dve": "something'd've", "somethingll": "something'll", "thats": "that's", "thered": "there'd", "thered've": "there'd've",
"there'dve": "there'd've", "therere": "there're", "theres": "there's", "theyd": "they'd", "theyd've": "they'd've",
"they'dve": "they'd've", "theyll": "they'll", "theyre": "they're", "theyve": "they've", "twas": "'twas", "wasnt": "wasn't",
"wed've": "we'd've", "we'dve": "we'd've", "weve": "we've", "werent": "weren't", "whatll": "what'll", "whatre": "what're",
"whats": "what's", "whatve": "what've", "whens": "when's", "whered": "where'd", "wheres": "where's", "whereve": "where've",
"whod": "who'd", "whod've": "who'd've", "who'dve": "who'd've", "wholl": "who'll", "whos": "who's", "whove": "who've", "whyll": "why'll",
"whyre": "why're", "whys": "why's", "wont": "won't", "wouldve": "would've", "wouldnt": "wouldn't", "wouldnt've": "wouldn't've",
"wouldn'tve": "wouldn't've", "yall": "y'all", "yall'll": "y'all'll", "y'allll": "y'all'll", "yall'd've": "y'all'd've",
"y'alld've": "y'all'd've", "y'all'dve": "y'all'd've", "youd": "you'd", "youd've": "you'd've", "you'dve": "you'd've",
"youll": "you'll", "youre": "you're", "youve": "you've"}
self.manualMap = { 'none': '0',
'zero': '0',
'one': '1',
'two': '2',
'three': '3',
'four': '4',
'five': '5',
'six': '6',
'seven': '7',
'eight': '8',
'nine': '9',
'ten': '10'
}
self.articles = ['a',
'an',
'the'
]
self.periodStrip = re.compile("(?!<=\d)(\.)(?!\d)")
self.commaStrip = re.compile("(\d)(,)(\d)")
self.punct = [';', r"/", '[', ']', '"', '{', '}',
'(', ')', '=', '+', '\\', '_', '-',
'>', '<', '@', '`', ',', '?', '!']
def evaluate(self, quesIds=None):
if quesIds == None:
quesIds = [quesId for quesId in self.params['question_id']]
gts = {}
res = {}
for quesId in quesIds:
gts[quesId] = self.vqa.qa[quesId]
res[quesId] = self.vqaRes.qa[quesId]
# =================================================
# Compute accuracy
# =================================================
accQA = []
accQuesType = {}
accAnsType = {}
print ("computing accuracy")
step = 0
for quesId in quesIds:
resAns = res[quesId]['answer']
resAns = resAns.replace('\n', ' ')
resAns = resAns.replace('\t', ' ')
resAns = resAns.strip()
resAns = self.processPunctuation(resAns)
resAns = self.processDigitArticle(resAns)
gtAcc = []
gtAnswers = [ans['answer'] for ans in gts[quesId]['answers']]
if len(set(gtAnswers)) > 1:
for ansDic in gts[quesId]['answers']:
ansDic['answer'] = self.processPunctuation(ansDic['answer'])
for gtAnsDatum in gts[quesId]['answers']:
otherGTAns = [item for item in gts[quesId]['answers'] if item!=gtAnsDatum]
matchingAns = [item for item in otherGTAns if item['answer']==resAns]
acc = min(1, float(len(matchingAns))/3)
gtAcc.append(acc)
quesType = gts[quesId]['question_type']
ansType = gts[quesId]['answer_type']
avgGTAcc = float(sum(gtAcc))/len(gtAcc)
accQA.append(avgGTAcc)
if quesType not in accQuesType:
accQuesType[quesType] = []
accQuesType[quesType].append(avgGTAcc)
if ansType not in accAnsType:
accAnsType[ansType] = []
accAnsType[ansType].append(avgGTAcc)
self.setEvalQA(quesId, avgGTAcc)
self.setEvalQuesType(quesId, quesType, avgGTAcc)
self.setEvalAnsType(quesId, ansType, avgGTAcc)
if step%100 == 0:
self.updateProgress(step/float(len(quesIds)))
step = step + 1
self.setAccuracy(accQA, accQuesType, accAnsType)
print ("Done computing accuracy")
def processPunctuation(self, inText):
outText = inText
for p in self.punct:
if (p + ' ' in inText or ' ' + p in inText) or (re.search(self.commaStrip, inText) != None):
outText = outText.replace(p, '')
else:
outText = outText.replace(p, ' ')
outText = self.periodStrip.sub("",
outText,
re.UNICODE)
return outText
def processDigitArticle(self, inText):
outText = []
tempText = inText.lower().split()
for word in tempText:
word = self.manualMap.setdefault(word, word)
if word not in self.articles:
outText.append(word)
else:
pass
for wordId, word in enumerate(outText):
if word in self.contractions:
outText[wordId] = self.contractions[word]
outText = ' '.join(outText)
return outText
def setAccuracy(self, accQA, accQuesType, accAnsType):
self.accuracy['overall'] = round(100*float(sum(accQA))/len(accQA), self.n)
self.accuracy['perQuestionType'] = {quesType: round(100*float(sum(accQuesType[quesType]))/len(accQuesType[quesType]), self.n) for quesType in accQuesType}
self.accuracy['perAnswerType'] = {ansType: round(100*float(sum(accAnsType[ansType]))/len(accAnsType[ansType]), self.n) for ansType in accAnsType}
def setEvalQA(self, quesId, acc):
self.evalQA[quesId] = round(100*acc, self.n)
def setEvalQuesType(self, quesId, quesType, acc):
if quesType not in self.evalQuesType:
self.evalQuesType[quesType] = {}
self.evalQuesType[quesType][quesId] = round(100*acc, self.n)
def setEvalAnsType(self, quesId, ansType, acc):
if ansType not in self.evalAnsType:
self.evalAnsType[ansType] = {}
self.evalAnsType[ansType][quesId] = round(100*acc, self.n)
def updateProgress(self, progress):
barLength = 20
status = ""
if isinstance(progress, int):
progress = float(progress)
if not isinstance(progress, float):
progress = 0
status = "error: progress var must be float\r\n"
if progress < 0:
progress = 0
status = "Halt...\r\n"
if progress >= 1:
progress = 1
status = "Done...\r\n"
block = int(round(barLength*progress))
text = "\rFinshed Percent: [{0}] {1}% {2}".format( "#"*block + "-"*(barLength-block), int(progress*100), status)
sys.stdout.write(text)
sys.stdout.flush()
def vqa_eval(vqa, result_file, test_ques_path):
vqaRes = vqa.loadRes(result_file, test_ques_path)
# create vqaEval object by taking vqa and vqaRes
vqaEval = VQAEval(vqa, vqaRes, n=2) # n is precision of accuracy (number of places after decimal), default is 2
# evaluate results
vqaEval.evaluate()
# print accuracies
print("\n")
print("Overall Accuracy is: %.02f\n" % (vqaEval.accuracy['overall']))
print("Per Answer Type Accuracy is the following:")
for ansType in vqaEval.accuracy['perAnswerType']:
print("%s : %.02f" % (ansType, vqaEval.accuracy['perAnswerType'][ansType]))
print("\n")
return vqaEval | null |
18,818 | import re
from vqaTools.vqaEval import VQAEval
import json
import os
import numpy as np
import torch
import torch.distributed as dist
import torch.nn.functional as F
import utils
from tqdm import tqdm
import utils
def collect_result(result, result_dir, filename, is_json=True, is_list=True):
if is_json:
result_file = os.path.join(result_dir, '%s_rank%d.json'%(filename,utils.get_rank()))
final_result_file = os.path.join(result_dir, '%s.json'%filename)
json.dump(result,open(result_file,'w'))
else:
result_file = os.path.join(result_dir, '%s_rank%d.pth'%(filename,utils.get_rank()))
final_result_file = os.path.join(result_dir, '%s.pth'%filename)
torch.save(result,result_file)
dist.barrier()
result = None
if utils.is_main_process():
# combine results from all processes
if is_list:
result = []
else:
result = {}
for rank in range(utils.get_world_size()):
if is_json:
result_file = os.path.join(result_dir, '%s_rank%d.json'%(filename,rank))
res = json.load(open(result_file,'r'))
else:
result_file = os.path.join(result_dir, '%s_rank%d.pth'%(filename,rank))
res = torch.load(result_file)
if is_list:
result += res
else:
result.update(res)
return result | null |
18,819 | import re
from vqaTools.vqaEval import VQAEval
import json
import os
import numpy as np
import torch
import torch.distributed as dist
import torch.nn.functional as F
import utils
from tqdm import tqdm
def computeIoU(box1, box2):
# each box is of [x1, y1, w, h]
inter_x1 = max(box1[0], box2[0])
inter_y1 = max(box1[1], box2[1])
inter_x2 = min(box1[0]+box1[2]-1, box2[0]+box2[2]-1)
inter_y2 = min(box1[1]+box1[3]-1, box2[1]+box2[3]-1)
if inter_x1 < inter_x2 and inter_y1 < inter_y2:
inter = (inter_x2-inter_x1+1)*(inter_y2-inter_y1+1)
else:
inter = 0
union = box1[2]*box1[3] + box2[2]*box2[3] - inter
return float(inter)/union
def grounding_eval(results,dets,cocos,refer,alpha,mask_size=24):
correct_A_d, correct_B_d, correct_val_d = 0, 0, 0
correct_A, correct_B, correct_val = 0, 0, 0
num_A,num_B,num_val = 0,0,0
for res in tqdm(results):
ref_id = res['ref_id']
ref = refer.Refs[ref_id]
ref_box = refer.refToAnn[ref_id]['bbox']
image = refer.Imgs[ref['image_id']]
mask = res['pred'].cuda().view(1,1,mask_size,mask_size)
mask = F.interpolate(mask,size = (image['height'],image['width']), mode='bicubic').squeeze()
# rank detection boxes
max_score = 0
for det in dets[str(ref['image_id'])]:
score = mask[int(det[1]):int(det[1]+det[3]),int(det[0]):int(det[0]+det[2])]
area = det[2]*det[3]
score = score.sum() / area**alpha
if score>max_score:
pred_box = det[:4]
max_score = score
IoU_det = computeIoU(ref_box, pred_box)
if ref['split']=='testA':
num_A += 1
if IoU_det >= 0.5:
correct_A_d += 1
elif ref['split']=='testB':
num_B += 1
if IoU_det >= 0.5:
correct_B_d += 1
elif ref['split']=='val':
num_val += 1
if IoU_det >= 0.5:
correct_val_d += 1
eval_result = {'val_d':correct_val_d/num_val,'testA_d':correct_A_d/num_A,'testB_d':correct_B_d/num_B}
for metric, acc in eval_result.items():
print(f'{metric}: {acc:.3f}')
return eval_result | null |
18,820 | import cv2
import numpy as np
def identity_func(img):
return img | null |
18,821 | import cv2
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `autocontrast_func` function. Write a Python function `def autocontrast_func(img, cutoff=0)` to solve the following problem:
same output as PIL.ImageOps.autocontrast
Here is the function:
def autocontrast_func(img, cutoff=0):
'''
same output as PIL.ImageOps.autocontrast
'''
n_bins = 256
def tune_channel(ch):
n = ch.size
cut = cutoff * n // 100
if cut == 0:
high, low = ch.max(), ch.min()
else:
hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins])
low = np.argwhere(np.cumsum(hist) > cut)
low = 0 if low.shape[0] == 0 else low[0]
high = np.argwhere(np.cumsum(hist[::-1]) > cut)
high = n_bins - 1 if high.shape[0] == 0 else n_bins - 1 - high[0]
if high <= low:
table = np.arange(n_bins)
else:
scale = (n_bins - 1) / (high - low)
offset = -low * scale
table = np.arange(n_bins) * scale + offset
table[table < 0] = 0
table[table > n_bins - 1] = n_bins - 1
table = table.clip(0, 255).astype(np.uint8)
return table[ch]
channels = [tune_channel(ch) for ch in cv2.split(img)]
out = cv2.merge(channels)
return out | same output as PIL.ImageOps.autocontrast |
18,822 | import cv2
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `equalize_func` function. Write a Python function `def equalize_func(img)` to solve the following problem:
same output as PIL.ImageOps.equalize PIL's implementation is different from cv2.equalize
Here is the function:
def equalize_func(img):
'''
same output as PIL.ImageOps.equalize
PIL's implementation is different from cv2.equalize
'''
n_bins = 256
def tune_channel(ch):
hist = cv2.calcHist([ch], [0], None, [n_bins], [0, n_bins])
non_zero_hist = hist[hist != 0].reshape(-1)
step = np.sum(non_zero_hist[:-1]) // (n_bins - 1)
if step == 0: return ch
n = np.empty_like(hist)
n[0] = step // 2
n[1:] = hist[:-1]
table = (np.cumsum(n) // step).clip(0, 255).astype(np.uint8)
return table[ch]
channels = [tune_channel(ch) for ch in cv2.split(img)]
out = cv2.merge(channels)
return out | same output as PIL.ImageOps.equalize PIL's implementation is different from cv2.equalize |
18,823 | import cv2
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `rotate_func` function. Write a Python function `def rotate_func(img, degree, fill=(0, 0, 0))` to solve the following problem:
like PIL, rotate by degree, not radians
Here is the function:
def rotate_func(img, degree, fill=(0, 0, 0)):
'''
like PIL, rotate by degree, not radians
'''
H, W = img.shape[0], img.shape[1]
center = W / 2, H / 2
M = cv2.getRotationMatrix2D(center, degree, 1)
out = cv2.warpAffine(img, M, (W, H), borderValue=fill)
return out | like PIL, rotate by degree, not radians |
18,824 | import cv2
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `solarize_func` function. Write a Python function `def solarize_func(img, thresh=128)` to solve the following problem:
same output as PIL.ImageOps.posterize
Here is the function:
def solarize_func(img, thresh=128):
'''
same output as PIL.ImageOps.posterize
'''
table = np.array([el if el < thresh else 255 - el for el in range(256)])
table = table.clip(0, 255).astype(np.uint8)
out = table[img]
return out | same output as PIL.ImageOps.posterize |
18,825 | import cv2
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `color_func` function. Write a Python function `def color_func(img, factor)` to solve the following problem:
same output as PIL.ImageEnhance.Color
Here is the function:
def color_func(img, factor):
'''
same output as PIL.ImageEnhance.Color
'''
## implementation according to PIL definition, quite slow
# degenerate = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)[:, :, np.newaxis]
# out = blend(degenerate, img, factor)
# M = (
# np.eye(3) * factor
# + np.float32([0.114, 0.587, 0.299]).reshape(3, 1) * (1. - factor)
# )[np.newaxis, np.newaxis, :]
M = (
np.float32([
[0.886, -0.114, -0.114],
[-0.587, 0.413, -0.587],
[-0.299, -0.299, 0.701]]) * factor
+ np.float32([[0.114], [0.587], [0.299]])
)
out = np.matmul(img, M).clip(0, 255).astype(np.uint8)
return out | same output as PIL.ImageEnhance.Color |
18,826 | import cv2
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `contrast_func` function. Write a Python function `def contrast_func(img, factor)` to solve the following problem:
same output as PIL.ImageEnhance.Contrast
Here is the function:
def contrast_func(img, factor):
"""
same output as PIL.ImageEnhance.Contrast
"""
mean = np.sum(np.mean(img, axis=(0, 1)) * np.array([0.114, 0.587, 0.299]))
table = np.array([(
el - mean) * factor + mean
for el in range(256)
]).clip(0, 255).astype(np.uint8)
out = table[img]
return out | same output as PIL.ImageEnhance.Contrast |
18,827 | import cv2
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `brightness_func` function. Write a Python function `def brightness_func(img, factor)` to solve the following problem:
same output as PIL.ImageEnhance.Contrast
Here is the function:
def brightness_func(img, factor):
'''
same output as PIL.ImageEnhance.Contrast
'''
table = (np.arange(256, dtype=np.float32) * factor).clip(0, 255).astype(np.uint8)
out = table[img]
return out | same output as PIL.ImageEnhance.Contrast |
18,828 | import cv2
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `sharpness_func` function. Write a Python function `def sharpness_func(img, factor)` to solve the following problem:
The differences the this result and PIL are all on the 4 boundaries, the center areas are same
Here is the function:
def sharpness_func(img, factor):
'''
The differences the this result and PIL are all on the 4 boundaries, the center
areas are same
'''
kernel = np.ones((3, 3), dtype=np.float32)
kernel[1][1] = 5
kernel /= 13
degenerate = cv2.filter2D(img, -1, kernel)
if factor == 0.0:
out = degenerate
elif factor == 1.0:
out = img
else:
out = img.astype(np.float32)
degenerate = degenerate.astype(np.float32)[1:-1, 1:-1, :]
out[1:-1, 1:-1, :] = degenerate + factor * (out[1:-1, 1:-1, :] - degenerate)
out = out.astype(np.uint8)
return out | The differences the this result and PIL are all on the 4 boundaries, the center areas are same |
18,829 | import cv2
import numpy as np
def shear_x_func(img, factor, fill=(0, 0, 0)):
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, factor, 0], [0, 1, 0]])
out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8)
return out | null |
18,830 | import cv2
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `translate_x_func` function. Write a Python function `def translate_x_func(img, offset, fill=(0, 0, 0))` to solve the following problem:
same output as PIL.Image.transform
Here is the function:
def translate_x_func(img, offset, fill=(0, 0, 0)):
'''
same output as PIL.Image.transform
'''
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, 0, -offset], [0, 1, 0]])
out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8)
return out | same output as PIL.Image.transform |
18,831 | import cv2
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `translate_y_func` function. Write a Python function `def translate_y_func(img, offset, fill=(0, 0, 0))` to solve the following problem:
same output as PIL.Image.transform
Here is the function:
def translate_y_func(img, offset, fill=(0, 0, 0)):
'''
same output as PIL.Image.transform
'''
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, 0, 0], [0, 1, -offset]])
out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8)
return out | same output as PIL.Image.transform |
18,832 | import cv2
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `posterize_func` function. Write a Python function `def posterize_func(img, bits)` to solve the following problem:
same output as PIL.ImageOps.posterize
Here is the function:
def posterize_func(img, bits):
'''
same output as PIL.ImageOps.posterize
'''
out = np.bitwise_and(img, np.uint8(255 << (8 - bits)))
return out | same output as PIL.ImageOps.posterize |
18,833 | import cv2
import numpy as np
def shear_y_func(img, factor, fill=(0, 0, 0)):
H, W = img.shape[0], img.shape[1]
M = np.float32([[1, 0, 0], [factor, 1, 0]])
out = cv2.warpAffine(img, M, (W, H), borderValue=fill, flags=cv2.INTER_LINEAR).astype(np.uint8)
return out | null |
18,834 | import cv2
import numpy as np
def cutout_func(img, pad_size, replace=(0, 0, 0)):
replace = np.array(replace, dtype=np.uint8)
H, W = img.shape[0], img.shape[1]
rh, rw = np.random.random(2)
pad_size = pad_size // 2
ch, cw = int(rh * H), int(rw * W)
x1, x2 = max(ch - pad_size, 0), min(ch + pad_size, H)
y1, y2 = max(cw - pad_size, 0), min(cw + pad_size, W)
out = img.copy()
out[x1:x2, y1:y2, :] = replace
return out | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.