id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
18,328 | import collections
import os
import unicodedata
from typing import List, Optional, Tuple
from ...utils.tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
from ...utils.file_utils import logging
The provided code snippet includes necessary dependencies for implementing the `whitespace_tokenize` function. Write a Python function `def whitespace_tokenize(text)` to solve the following problem:
Runs basic whitespace cleaning and splitting on a piece of text.
Here is the function:
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens | Runs basic whitespace cleaning and splitting on a piece of text. |
18,329 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
from .modeling_sbert import SbertConfig, SbertModel, load_tf_weights_in_bert
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
import tensorflow as tf
import re
def var_name_replace(var_name: str):
return var_name.replace("/LayerNorm/beta", "/LayerNorm/bias") \
.replace("/LayerNorm/gamma", "/LayerNorm/weight")
# Load weights from TF model
path = tf_checkpoint_path
logger.info("Converting TensorFlow checkpoint from {}".format(path))
init_vars = tf.train.list_variables(path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading {} with shape {}".format(name, shape))
array = tf.train.load_variable(path, name)
logger.info("Numpy array shape {}".format(array.shape))
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = var_name_replace(name)
logger.info("Loading {}".format(name))
if name == "output_bias":
logger.info(model.classifier.bias.data.size())
logger.info(torch.from_numpy(array).size())
assert model.classifier.bias.data.size() == torch.from_numpy(array).size()
model.classifier.bias.data = torch.from_numpy(array)
elif name == "output_weights":
assert model.classifier.weight.data.size() == torch.from_numpy(array).size()
model.classifier.weight.data = torch.from_numpy(array)
else:
name = name[5:] # skip "bert/"
if name == "global_step" or name == "bad_steps" or name == "good_steps" or name.endswith(
"adam_v") or name.endswith("adam_m") or name == "loss_scale":
continue
name = name.split("/")
if name[0] in ["redictions", "eq_relationship", "teps", "l_step", "steps", "scale"]:
logger.info("Skipping")
continue
if "adam" in name[-1].lower():
logger.info("Skipping adam")
continue
if "_power" in name[-1].lower():
logger.info(f"Skipping exponential decay")
continue
pointer = model.bert
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == "kernel":
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
pointer.data = torch.from_numpy(array)
return model
)
class SbertModel(SbertPreTrainedModel):
"""
The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
cross-attention is added between the self-attention layers, following the architecture described in `Attention is
all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration
set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder`
argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an
input to the forward pass.
"""
def __init__(self, config: SbertConfig, add_pooling_layer=True):
super().__init__(config)
self.config = config
self.embeddings = SbertEmbeddings(config)
self.encoder = SbertEncoder(config)
self.pooler = SbertPooler(config) if add_pooling_layer else None
self.init_weights()
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(heads)
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=BaseModelOutputWithPoolingAndCrossAttentions,
config_class=_CONFIG_FOR_DOC,
)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`):
Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
the model is configured as a decoder.
encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids`
(those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)`
instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`.
use_cache (:obj:`bool`, `optional`):
If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up
decoding (see :obj:`past_key_values`).
"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if self.config.is_decoder:
use_cache = use_cache if use_cache is not None else self.config.use_cache
else:
use_cache = False
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
# past_key_values_length
past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
if attention_mask is None:
attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)
# If a 2D or 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
else:
encoder_extended_attention_mask = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
embedding_output, orignal_embeds = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
return_inputs_embeds=True,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not return_dict:
return (sequence_output, pooled_output) + encoder_outputs[1:] + (orignal_embeds,)
return BaseModelOutputWithPoolingAndCrossAttentionsWithEmbedding(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
past_key_values=encoder_outputs.past_key_values,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
cross_attentions=encoder_outputs.cross_attentions,
embedding_output=orignal_embeds
)
"""
Sbert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next
sentence prediction (classification)` head.
""",
SBERT_START_DOCSTRING,
)
)
)
)
)
)
)
The provided code snippet includes necessary dependencies for implementing the `convert` function. Write a Python function `def convert(tf_checkpoint_path, sbert_config_file, pytorch_dump_path)` to solve the following problem:
Convert a basic backbone ckpt from tf to pt. :param tf_checkpoint_path: The tf checkpoint local dir. :param sbert_config_file: The sbert config file local dir. :param pytorch_dump_path: The local file path of the generated pytorch bin file. :return: None
Here is the function:
def convert(tf_checkpoint_path, sbert_config_file, pytorch_dump_path):
"""
Convert a basic backbone ckpt from tf to pt.
:param tf_checkpoint_path: The tf checkpoint local dir.
:param sbert_config_file: The sbert config file local dir.
:param pytorch_dump_path: The local file path of the generated pytorch bin file.
:return: None
"""
# Initialise PyTorch model
config = SbertConfig.from_json_file(sbert_config_file)
model = SbertModel(config)
load_tf_weights_in_bert(model, sbert_config_file, tf_checkpoint_path)
# Save pytorch-model
torch.save(model.state_dict(), pytorch_dump_path) | Convert a basic backbone ckpt from tf to pt. :param tf_checkpoint_path: The tf checkpoint local dir. :param sbert_config_file: The sbert config file local dir. :param pytorch_dump_path: The local file path of the generated pytorch bin file. :return: None |
18,330 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
from .modeling_sbert import SbertConfig, load_tf_weights_in_bert
def load_tf_weights_in_bert(model, config, tf_checkpoint_path):
import tensorflow as tf
import re
def var_name_replace(var_name: str):
return var_name.replace("/LayerNorm/beta", "/LayerNorm/bias") \
.replace("/LayerNorm/gamma", "/LayerNorm/weight")
# Load weights from TF model
path = tf_checkpoint_path
logger.info("Converting TensorFlow checkpoint from {}".format(path))
init_vars = tf.train.list_variables(path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading {} with shape {}".format(name, shape))
array = tf.train.load_variable(path, name)
logger.info("Numpy array shape {}".format(array.shape))
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = var_name_replace(name)
logger.info("Loading {}".format(name))
if name == "output_bias":
logger.info(model.classifier.bias.data.size())
logger.info(torch.from_numpy(array).size())
assert model.classifier.bias.data.size() == torch.from_numpy(array).size()
model.classifier.bias.data = torch.from_numpy(array)
elif name == "output_weights":
assert model.classifier.weight.data.size() == torch.from_numpy(array).size()
model.classifier.weight.data = torch.from_numpy(array)
else:
name = name[5:] # skip "bert/"
if name == "global_step" or name == "bad_steps" or name == "good_steps" or name.endswith(
"adam_v") or name.endswith("adam_m") or name == "loss_scale":
continue
name = name.split("/")
if name[0] in ["redictions", "eq_relationship", "teps", "l_step", "steps", "scale"]:
logger.info("Skipping")
continue
if "adam" in name[-1].lower():
logger.info("Skipping adam")
continue
if "_power" in name[-1].lower():
logger.info(f"Skipping exponential decay")
continue
pointer = model.bert
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == "kernel":
pointer = getattr(pointer, "weight")
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == "_embeddings":
pointer = getattr(pointer, "weight")
elif m_name == "kernel":
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
pointer.data = torch.from_numpy(array)
return model
)
)
)
)
)
)
)
)
The provided code snippet includes necessary dependencies for implementing the `convert_all` function. Write a Python function `def convert_all(tf_checkpoint_path, sbert_config_file, pytorch_dump_path, module_clz)` to solve the following problem:
Convert a checkpoint from tf to pt. Only support backbones with a linear part called classifier. :param tf_checkpoint_path: The tf checkpoint local dir. :param sbert_config_file: The sbert config file local dir. :param pytorch_dump_path: The local file path of the generated pytorch bin file. :param module_clz: The model class. :return: None
Here is the function:
def convert_all(tf_checkpoint_path, sbert_config_file, pytorch_dump_path, module_clz):
"""
Convert a checkpoint from tf to pt.
Only support backbones with a linear part called classifier.
:param tf_checkpoint_path: The tf checkpoint local dir.
:param sbert_config_file: The sbert config file local dir.
:param pytorch_dump_path: The local file path of the generated pytorch bin file.
:param module_clz: The model class.
:return: None
"""
# Initialise PyTorch model
config = SbertConfig.from_json_file(sbert_config_file)
model = module_clz(config)
# Load weights from TF model
load_tf_weights_in_bert(model, sbert_config_file, tf_checkpoint_path)
# Save pytorch-model
torch.save(model.state_dict(), pytorch_dump_path) | Convert a checkpoint from tf to pt. Only support backbones with a linear part called classifier. :param tf_checkpoint_path: The tf checkpoint local dir. :param sbert_config_file: The sbert config file local dir. :param pytorch_dump_path: The local file path of the generated pytorch bin file. :param module_clz: The model class. :return: None |
18,331 | from torch import nn
import torch
from ...utils import logging
logger = logging.get_logger(__name__)
def _symmetric_kl_div(logits1, logits2, attention_mask=None):
"""
Calclate two logits' the KL div value symmetrically.
:param logits1: The first logit.
:param logits2: The second logit.
:param attention_mask: An optional attention_mask which is used to mask some element out.
This is usually useful in token_classification tasks.
If the shape of logits is [N1, N2, ... Nn, D], the shape of attention_mask should be [N1, N2, ... Nn]
:return: The mean loss.
"""
labels_num = logits1.shape[-1]
KLDiv = nn.KLDivLoss(reduction='none')
loss = torch.sum(KLDiv(nn.LogSoftmax(dim=-1)(logits1), nn.Softmax(dim=-1)(logits2)), dim=-1) + \
torch.sum(KLDiv(nn.LogSoftmax(dim=-1)(logits2), nn.Softmax(dim=-1)(logits1)), dim=-1)
if attention_mask is not None:
loss = torch.sum(loss * attention_mask)/torch.sum(attention_mask)/labels_num
else:
loss = torch.mean(loss)/labels_num
return loss
The provided code snippet includes necessary dependencies for implementing the `compute_adv_loss` function. Write a Python function `def compute_adv_loss(embedding, model, ori_logits, ori_loss, adv_grad_factor, adv_bound=None, sigma=5e-6, **kwargs)` to solve the following problem:
Calculate the adv loss of the model. :param embedding: Original sentense embedding :param model: The model, or the forward function(including decoder/classifier), accept kwargs as input, output logits :param ori_logits: The original logits outputed from the model function :param ori_loss: The original loss :param adv_grad_factor: This factor will be multipled by the KL loss grad and then the result will be added to the original embedding. More details please check:https://arxiv.org/abs/1908.04577 The range of this value always be 1e-3~1e-7 :param adv_bound: adv_bound is used to cut the top and the bottom bound of the produced embedding. If not proveded, 2 * sigma will be used as the adv_bound factor :param sigma: The std factor used to produce a 0 mean normal distribution. If adv_bound not proveded, 2 * sigma will be used as the adv_bound factor :param kwargs: the input param used in model function :return: The original loss adds the adv loss
Here is the function:
def compute_adv_loss(embedding, model, ori_logits, ori_loss, adv_grad_factor,
adv_bound=None, sigma=5e-6, **kwargs):
"""
Calculate the adv loss of the model.
:param embedding: Original sentense embedding
:param model: The model, or the forward function(including decoder/classifier), accept kwargs as input, output logits
:param ori_logits: The original logits outputed from the model function
:param ori_loss: The original loss
:param adv_grad_factor: This factor will be multipled by the KL loss grad and then the result will be added to
the original embedding.
More details please check:https://arxiv.org/abs/1908.04577
The range of this value always be 1e-3~1e-7
:param adv_bound: adv_bound is used to cut the top and the bottom bound of the produced embedding.
If not proveded, 2 * sigma will be used as the adv_bound factor
:param sigma: The std factor used to produce a 0 mean normal distribution.
If adv_bound not proveded, 2 * sigma will be used as the adv_bound factor
:param kwargs: the input param used in model function
:return: The original loss adds the adv loss
"""
adv_bound = adv_bound if adv_bound is not None else 2 * sigma
embedding_1 = embedding + embedding.data.new(embedding.size()).normal_(0, sigma) # 95% in +- 1e-5
kwargs.pop("input_ids")
if "inputs_embeds" in kwargs:
kwargs.pop("inputs_embeds")
with_attention_mask = False if "with_attention_mask" not in kwargs else kwargs["with_attention_mask"]
attention_mask = kwargs["attention_mask"]
if not with_attention_mask:
attention_mask = None
if "with_attention_mask" in kwargs:
kwargs.pop("with_attention_mask")
outputs = model(**kwargs, inputs_embeds=embedding_1)
v1_logits = outputs.logits
loss = _symmetric_kl_div(ori_logits, v1_logits, attention_mask)
emb_grad = torch.autograd.grad(loss, embedding_1)[0].data
emb_grad_norm = emb_grad.norm(dim=2, keepdim=True, p=float("inf")).max(1, keepdim=True)[0]
is_nan = torch.any(torch.isnan(emb_grad_norm))
if is_nan:
logger.warning("Nan occured when calculating adv loss.")
return ori_loss
emb_grad = emb_grad / emb_grad_norm
embedding_2 = embedding_1 + adv_grad_factor * emb_grad
embedding_2 = torch.max(embedding_1 - adv_bound, embedding_2)
embedding_2 = torch.min(embedding_1 + adv_bound, embedding_2)
outputs = model(**kwargs, inputs_embeds=embedding_2)
adv_logits = outputs.logits
adv_loss = _symmetric_kl_div(ori_logits, adv_logits, attention_mask)
return ori_loss + adv_loss | Calculate the adv loss of the model. :param embedding: Original sentense embedding :param model: The model, or the forward function(including decoder/classifier), accept kwargs as input, output logits :param ori_logits: The original logits outputed from the model function :param ori_loss: The original loss :param adv_grad_factor: This factor will be multipled by the KL loss grad and then the result will be added to the original embedding. More details please check:https://arxiv.org/abs/1908.04577 The range of this value always be 1e-3~1e-7 :param adv_bound: adv_bound is used to cut the top and the bottom bound of the produced embedding. If not proveded, 2 * sigma will be used as the adv_bound factor :param sigma: The std factor used to produce a 0 mean normal distribution. If adv_bound not proveded, 2 * sigma will be used as the adv_bound factor :param kwargs: the input param used in model function :return: The original loss adds the adv loss |
18,332 | from torch import nn
import torch
from ...utils import logging
logger = logging.get_logger(__name__)
def _symmetric_kl_div(logits1, logits2, attention_mask=None):
"""
Calclate two logits' the KL div value symmetrically.
:param logits1: The first logit.
:param logits2: The second logit.
:param attention_mask: An optional attention_mask which is used to mask some element out.
This is usually useful in token_classification tasks.
If the shape of logits is [N1, N2, ... Nn, D], the shape of attention_mask should be [N1, N2, ... Nn]
:return: The mean loss.
"""
labels_num = logits1.shape[-1]
KLDiv = nn.KLDivLoss(reduction='none')
loss = torch.sum(KLDiv(nn.LogSoftmax(dim=-1)(logits1), nn.Softmax(dim=-1)(logits2)), dim=-1) + \
torch.sum(KLDiv(nn.LogSoftmax(dim=-1)(logits2), nn.Softmax(dim=-1)(logits1)), dim=-1)
if attention_mask is not None:
loss = torch.sum(loss * attention_mask)/torch.sum(attention_mask)/labels_num
else:
loss = torch.mean(loss)/labels_num
return loss
The provided code snippet includes necessary dependencies for implementing the `compute_adv_loss_pair` function. Write a Python function `def compute_adv_loss_pair(embedding, model, start_logits, end_logits, ori_loss, adv_grad_factor, adv_bound=None, sigma=5e-6, **kwargs)` to solve the following problem:
Calculate the adv loss of the model. This function is used in the pair logits scenerio. :param embedding: Original sentense embedding :param model: The model, or the forward function(including decoder/classifier), accept kwargs as input, output logits :param start_logits: The original start logits outputed from the model function :param end_logits: The original end logits outputed from the model function :param ori_loss: The original loss :param adv_grad_factor: This factor will be multipled by the KL loss grad and then the result will be added to the original embedding. More details please check:https://arxiv.org/abs/1908.04577 The range of this value always be 1e-3~1e-7 :param adv_bound: adv_bound is used to cut the top and the bottom bound of the produced embedding. If not proveded, 2 * sigma will be used as the adv_bound factor :param sigma: The std factor used to produce a 0 mean normal distribution. If adv_bound not proveded, 2 * sigma will be used as the adv_bound factor :param kwargs: the input param used in model function :return: The original loss adds the adv loss
Here is the function:
def compute_adv_loss_pair(embedding, model, start_logits, end_logits, ori_loss, adv_grad_factor,
adv_bound=None, sigma=5e-6, **kwargs):
"""
Calculate the adv loss of the model. This function is used in the pair logits scenerio.
:param embedding: Original sentense embedding
:param model: The model, or the forward function(including decoder/classifier), accept kwargs as input, output logits
:param start_logits: The original start logits outputed from the model function
:param end_logits: The original end logits outputed from the model function
:param ori_loss: The original loss
:param adv_grad_factor: This factor will be multipled by the KL loss grad and then the result will be added to
the original embedding.
More details please check:https://arxiv.org/abs/1908.04577
The range of this value always be 1e-3~1e-7
:param adv_bound: adv_bound is used to cut the top and the bottom bound of the produced embedding.
If not proveded, 2 * sigma will be used as the adv_bound factor
:param sigma: The std factor used to produce a 0 mean normal distribution.
If adv_bound not proveded, 2 * sigma will be used as the adv_bound factor
:param kwargs: the input param used in model function
:return: The original loss adds the adv loss
"""
adv_bound = adv_bound if adv_bound is not None else 2 * sigma
embedding_1 = embedding + embedding.data.new(embedding.size()).normal_(0, sigma) # 95% in +- 1e-5
kwargs.pop("input_ids")
if "inputs_embeds" in kwargs:
kwargs.pop("inputs_embeds")
outputs = model(**kwargs, inputs_embeds=embedding_1)
v1_logits_start, v1_logits_end = outputs.logits
loss = _symmetric_kl_div(start_logits, v1_logits_start) + _symmetric_kl_div(end_logits, v1_logits_end)
loss = loss/2
emb_grad = torch.autograd.grad(loss, embedding_1)[0].data
emb_grad_norm = emb_grad.norm(dim=2, keepdim=True, p=float("inf")).max(1, keepdim=True)[0]
is_nan = torch.any(torch.isnan(emb_grad_norm))
if is_nan:
logger.warning("Nan occured when calculating pair adv loss.")
return ori_loss
emb_grad = emb_grad / emb_grad_norm
embedding_2 = embedding_1 + adv_grad_factor * emb_grad
embedding_2 = torch.max(embedding_1 - adv_bound, embedding_2)
embedding_2 = torch.min(embedding_1 + adv_bound, embedding_2)
outputs = model(**kwargs, inputs_embeds=embedding_2)
adv_logits_start, adv_logits_end = outputs.logits
adv_loss = _symmetric_kl_div(start_logits, adv_logits_start) + _symmetric_kl_div(end_logits, adv_logits_end)
return ori_loss + adv_loss | Calculate the adv loss of the model. This function is used in the pair logits scenerio. :param embedding: Original sentense embedding :param model: The model, or the forward function(including decoder/classifier), accept kwargs as input, output logits :param start_logits: The original start logits outputed from the model function :param end_logits: The original end logits outputed from the model function :param ori_loss: The original loss :param adv_grad_factor: This factor will be multipled by the KL loss grad and then the result will be added to the original embedding. More details please check:https://arxiv.org/abs/1908.04577 The range of this value always be 1e-3~1e-7 :param adv_bound: adv_bound is used to cut the top and the bottom bound of the produced embedding. If not proveded, 2 * sigma will be used as the adv_bound factor :param sigma: The std factor used to produce a 0 mean normal distribution. If adv_bound not proveded, 2 * sigma will be used as the adv_bound factor :param kwargs: the input param used in model function :return: The original loss adds the adv loss |
18,333 | import os
import random
import time
import numpy as np
import torch
def get_log_constant(user_log):
return '[user log]' if user_log else '' | null |
18,334 | import os
import random
import time
import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `print_args` function. Write a Python function `def print_args(args)` to solve the following problem:
Print arguments.
Here is the function:
def print_args(args):
"""Print arguments."""
print('arguments:', flush=True)
for arg in vars(args):
dots = '.' * (29 - len(arg))
print(' {} {} {}'.format(arg, dots, getattr(args, arg)), flush=True) | Print arguments. |
18,335 | import os
import random
import time
import numpy as np
import torch
def print_rank_0(message):
if torch.distributed.is_initialized():
if torch.distributed.get_rank() == 0:
print(message, flush=True)
else:
print(message, flush=True)
The provided code snippet includes necessary dependencies for implementing the `report_memory` function. Write a Python function `def report_memory(name)` to solve the following problem:
Simple GPU memory report.
Here is the function:
def report_memory(name):
"""Simple GPU memory report."""
mega_bytes = 1024.0 * 1024.0
string = name + ' memory (MB)'
string += ' | allocated: {}'.format(
torch.cuda.memory_allocated() / mega_bytes)
string += ' | max allocated: {}'.format(
torch.cuda.max_memory_allocated() / mega_bytes)
string += ' | cached: {}'.format(torch.cuda.memory_cached() / mega_bytes)
string += ' | max cached: {}'.format(
torch.cuda.max_memory_cached()/ mega_bytes)
print_rank_0(string) | Simple GPU memory report. |
18,336 | import re
import os
import sys
from .compat import _report_compat_error
PT_SAMPLE_DOCSTRINGS = {
"SequenceClassification": PT_SEQUENCE_CLASSIFICATION_SAMPLE,
"QuestionAnswering": PT_QUESTION_ANSWERING_SAMPLE,
"TokenClassification": PT_TOKEN_CLASSIFICATION_SAMPLE,
"MultipleChoice": PT_MULTIPLE_CHOICE_SAMPLE,
"MaskedLM": PT_MASKED_LM_SAMPLE,
"LMHead": PT_CAUSAL_LM_SAMPLE,
"BaseModel": PT_BASE_MODEL_SAMPLE,
"SpeechBaseModel": PT_SPEECH_BASE_MODEL_SAMPLE,
"CTC": PT_SPEECH_CTC_SAMPLE,
"AudioClassification": PT_SPEECH_SEQ_CLASS_SAMPLE,
"SpanClassification": PT_SPAN_CLASS_SAMPLE,
}
TF_SAMPLE_DOCSTRINGS = {
"SequenceClassification": TF_SEQUENCE_CLASSIFICATION_SAMPLE,
"QuestionAnswering": TF_QUESTION_ANSWERING_SAMPLE,
"TokenClassification": TF_TOKEN_CLASSIFICATION_SAMPLE,
"MultipleChoice": TF_MULTIPLE_CHOICE_SAMPLE,
"MaskedLM": TF_MASKED_LM_SAMPLE,
"LMHead": TF_CAUSAL_LM_SAMPLE,
"BaseModel": TF_BASE_MODEL_SAMPLE,
}
FLAX_SAMPLE_DOCSTRINGS = {
"SequenceClassification": FLAX_SEQUENCE_CLASSIFICATION_SAMPLE,
"QuestionAnswering": FLAX_QUESTION_ANSWERING_SAMPLE,
"TokenClassification": FLAX_TOKEN_CLASSIFICATION_SAMPLE,
"MultipleChoice": FLAX_MULTIPLE_CHOICE_SAMPLE,
"MaskedLM": FLAX_MASKED_LM_SAMPLE,
"BaseModel": FLAX_BASE_MODEL_SAMPLE,
"LMHead": FLAX_CAUSAL_LM_SAMPLE,
}
def _prepare_output_docstrings(output_type, config_class):
"""
Prepares the return part of the docstring using `output_type`.
"""
docstrings = output_type.__doc__
# Remove the head of the docstring to keep the list of args only
lines = docstrings.split("\n")
i = 0
while i < len(lines) and re.search(r"^\s*(Args|Parameters):\s*$", lines[i]) is None:
i += 1
if i < len(lines):
docstrings = "\n".join(lines[(i + 1) :])
docstrings = _convert_output_args_doc(docstrings)
# Add the return introduction
full_output_type = f"{output_type.__module__}.{output_type.__name__}"
intro = TF_RETURN_INTRODUCTION if output_type.__name__.startswith("TF") else PT_RETURN_INTRODUCTION
intro = intro.format(full_output_type=full_output_type, config_class=config_class)
return intro + docstrings
def _prepare_output_docstrings(output_type, config_class):
"""
Prepares the return part of the docstring using `output_type`.
"""
docstrings = output_type.__doc__
# Remove the head of the docstring to keep the list of args only
lines = docstrings.split("\n")
i = 0
while i < len(lines) and re.search(r"^\s*(Args|Parameters):\s*$", lines[i]) is None:
i += 1
if i < len(lines):
docstrings = "\n".join(lines[(i + 1) :])
docstrings = _convert_output_args_doc(docstrings)
# Add the return introduction
full_output_type = f"{output_type.__module__}.{output_type.__name__}"
intro = TF_RETURN_INTRODUCTION if output_type.__name__.startswith("TF") else PT_RETURN_INTRODUCTION
intro = intro.format(full_output_type=full_output_type, config_class=config_class)
return intro + docstrings
def add_code_sample_docstrings(
*docstr,
processor_class=None,
checkpoint=None,
output_type=None,
config_class=None,
mask=None,
model_cls=None,
modality=None
):
def docstring_decorator(fn):
# model_class defaults to function's class if not specified otherwise
model_class = fn.__qualname__.split(".")[0] if model_cls is None else model_cls
if model_class[:2] == "TF":
sample_docstrings = TF_SAMPLE_DOCSTRINGS
elif model_class[:4] == "Flax":
sample_docstrings = FLAX_SAMPLE_DOCSTRINGS
else:
sample_docstrings = PT_SAMPLE_DOCSTRINGS
doc_kwargs = dict(model_class=model_class, processor_class=processor_class, checkpoint=checkpoint)
if "SequenceClassification" in model_class and modality == "audio":
code_sample = sample_docstrings["AudioClassification"]
elif "SequenceClassification" in model_class:
code_sample = sample_docstrings["SequenceClassification"]
elif "QuestionAnswering" in model_class:
code_sample = sample_docstrings["QuestionAnswering"]
elif "TokenClassification" in model_class:
code_sample = sample_docstrings["TokenClassification"]
elif "MultipleChoice" in model_class:
code_sample = sample_docstrings["MultipleChoice"]
elif "MaskedLM" in model_class or model_class in ["FlaubertWithLMHeadModel", "XLMWithLMHeadModel"]:
doc_kwargs["mask"] = "[MASK]" if mask is None else mask
code_sample = sample_docstrings["MaskedLM"]
elif "LMHead" in model_class or "CausalLM" in model_class:
code_sample = sample_docstrings["LMHead"]
elif "CTC" in model_class:
code_sample = sample_docstrings["CTC"]
elif "Model" in model_class and modality == "audio":
code_sample = sample_docstrings["SpeechBaseModel"]
elif "Model" in model_class or "Encoder" in model_class:
code_sample = sample_docstrings["BaseModel"]
elif "Span" in model_class:
code_sample = sample_docstrings["SpanClassification"]
else:
raise ValueError(f"Docstring can't be built for model {model_class}")
output_doc = _prepare_output_docstrings(output_type, config_class) if output_type is not None else ""
built_doc = code_sample.format(**doc_kwargs)
fn.__doc__ = (fn.__doc__ or "") + "".join(docstr) + output_doc + built_doc
return fn
return docstring_decorator | null |
18,337 | import re
import os
import sys
from .compat import _report_compat_error
def _prepare_output_docstrings(output_type, config_class):
"""
Prepares the return part of the docstring using `output_type`.
"""
docstrings = output_type.__doc__
# Remove the head of the docstring to keep the list of args only
lines = docstrings.split("\n")
i = 0
while i < len(lines) and re.search(r"^\s*(Args|Parameters):\s*$", lines[i]) is None:
i += 1
if i < len(lines):
docstrings = "\n".join(lines[(i + 1) :])
docstrings = _convert_output_args_doc(docstrings)
# Add the return introduction
full_output_type = f"{output_type.__module__}.{output_type.__name__}"
intro = TF_RETURN_INTRODUCTION if output_type.__name__.startswith("TF") else PT_RETURN_INTRODUCTION
intro = intro.format(full_output_type=full_output_type, config_class=config_class)
return intro + docstrings
def _prepare_output_docstrings(output_type, config_class):
"""
Prepares the return part of the docstring using `output_type`.
"""
docstrings = output_type.__doc__
# Remove the head of the docstring to keep the list of args only
lines = docstrings.split("\n")
i = 0
while i < len(lines) and re.search(r"^\s*(Args|Parameters):\s*$", lines[i]) is None:
i += 1
if i < len(lines):
docstrings = "\n".join(lines[(i + 1) :])
docstrings = _convert_output_args_doc(docstrings)
# Add the return introduction
full_output_type = f"{output_type.__module__}.{output_type.__name__}"
intro = TF_RETURN_INTRODUCTION if output_type.__name__.startswith("TF") else PT_RETURN_INTRODUCTION
intro = intro.format(full_output_type=full_output_type, config_class=config_class)
return intro + docstrings
def replace_return_docstrings(output_type=None, config_class=None):
def docstring_decorator(fn):
docstrings = fn.__doc__
lines = docstrings.split("\n")
i = 0
while i < len(lines) and re.search(r"^\s*Returns?:\s*$", lines[i]) is None:
i += 1
if i < len(lines):
lines[i] = _prepare_output_docstrings(output_type, config_class)
docstrings = "\n".join(lines)
else:
raise ValueError(
f"The function {fn} should have an empty 'Return:' or 'Returns:' in its docstring as placeholder, current docstring is:\n{docstrings}"
)
fn.__doc__ = docstrings
return fn
return docstring_decorator | null |
18,338 | import re
import os
import sys
from .compat import _report_compat_error
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "")
return fn
return docstring_decorator | null |
18,339 | import re
import os
import sys
from .compat import _report_compat_error
def add_start_docstrings_to_model_forward(*docstr):
def docstring_decorator(fn):
class_name = f":class:`~transformers.{fn.__qualname__.split('.')[0]}`"
intro = f" The {class_name} forward method, overrides the :func:`__call__` special method."
note = r"""
.. note::
Although the recipe for forward pass needs to be defined within this function, one should call the
:class:`Module` instance afterwards instead of this since the former takes care of running the pre and post
processing steps while the latter silently ignores them.
"""
fn.__doc__ = intro + note + "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "")
return fn
return docstring_decorator | null |
18,340 | import re
import os
import sys
from .compat import _report_compat_error
def add_end_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = fn.__doc__ + "".join(docstr)
return fn
return docstring_decorator | null |
18,341 | import re
import os
import sys
from .compat import _report_compat_error
def _get_indent(t):
"""Returns the indentation in the first line of t"""
search = re.search(r"^(\s*)\S", t)
return "" if search is None else search.groups()[0]
The provided code snippet includes necessary dependencies for implementing the `_convert_output_args_doc` function. Write a Python function `def _convert_output_args_doc(output_args_doc)` to solve the following problem:
Convert output_args_doc to display properly.
Here is the function:
def _convert_output_args_doc(output_args_doc):
"""Convert output_args_doc to display properly."""
# Split output_arg_doc in blocks argument/description
indent = _get_indent(output_args_doc)
blocks = []
current_block = ""
for line in output_args_doc.split("\n"):
# If the indent is the same as the beginning, the line is the name of new arg.
if _get_indent(line) == indent:
if len(current_block) > 0:
blocks.append(current_block[:-1])
current_block = f"{line}\n"
else:
# Otherwise it's part of the description of the current arg.
# We need to remove 2 spaces to the indentation.
current_block += f"{line[2:]}\n"
blocks.append(current_block[:-1])
# Format each block for proper rendering
for i in range(len(blocks)):
blocks[i] = re.sub(r"^(\s+)(\S+)(\s+)", r"\1- **\2**\3", blocks[i])
blocks[i] = re.sub(r":\s*\n\s*(\S)", r" -- \1", blocks[i])
return "\n".join(blocks) | Convert output_args_doc to display properly. |
18,342 | import argparse
import json
import sys
import zipfile
import numpy as np
from collections import Counter, defaultdict
import copy
import math, re
The provided code snippet includes necessary dependencies for implementing the `my_lcs` function. Write a Python function `def my_lcs(string, sub)` to solve the following problem:
Calculates longest common subsequence for a pair of tokenized strings :param string : list of str : tokens from a string split using whitespace :param sub : list of str : shorter string, also split using whitespace :returns: length (list of int): length of the longest common subsequence between the two strings Note: my_lcs only gives length of the longest common subsequence, not the actual LCS
Here is the function:
def my_lcs(string, sub):
"""
Calculates longest common subsequence for a pair of tokenized strings
:param string : list of str : tokens from a string split using whitespace
:param sub : list of str : shorter string, also split using whitespace
:returns: length (list of int): length of the longest common subsequence between the two strings
Note: my_lcs only gives length of the longest common subsequence, not the actual LCS
"""
if(len(string)< len(sub)):
sub, string = string, sub
lengths = [[0 for i in range(0,len(sub)+1)] for j in range(0,len(string)+1)]
for j in range(1,len(sub)+1):
for i in range(1,len(string)+1):
if(string[i-1] == sub[j-1]):
lengths[i][j] = lengths[i-1][j-1] + 1
else:
lengths[i][j] = max(lengths[i-1][j] , lengths[i][j-1])
return lengths[len(string)][len(sub)] | Calculates longest common subsequence for a pair of tokenized strings :param string : list of str : tokens from a string split using whitespace :param sub : list of str : shorter string, also split using whitespace :returns: length (list of int): length of the longest common subsequence between the two strings Note: my_lcs only gives length of the longest common subsequence, not the actual LCS |
18,343 | import argparse
import json
import sys
import zipfile
import numpy as np
from collections import Counter, defaultdict
import copy
import math, re
def precook(s, n=4, out=False):
"""Takes a string as input and returns an object that can be given to
either cook_refs or cook_test. This is optional: cook_refs and cook_test
can take string arguments as well."""
words = s.split()
counts = defaultdict(int)
for k in range(1,n+1):
for i in range(len(words)-k+1):
ngram = tuple(words[i:i+k])
counts[ngram] += 1
return (len(words), counts)
The provided code snippet includes necessary dependencies for implementing the `cook_refs` function. Write a Python function `def cook_refs(refs, eff=None, n=4)` to solve the following problem:
Takes a list of reference sentences for a single segment and returns an object that encapsulates everything that BLEU needs to know about them.
Here is the function:
def cook_refs(refs, eff=None, n=4): ## lhuang: oracle will call with "average"
'''Takes a list of reference sentences for a single segment
and returns an object that encapsulates everything that BLEU
needs to know about them.'''
reflen = []
maxcounts = {}
for ref in refs:
rl, counts = precook(ref, n)
reflen.append(rl)
for (ngram,count) in counts.items():
maxcounts[ngram] = max(maxcounts.get(ngram,0), count)
# Calculate effective reference sentence length.
if eff == "shortest":
reflen = min(reflen)
elif eff == "average":
reflen = float(sum(reflen))/len(reflen)
## lhuang: N.B.: leave reflen computaiton to the very end!!
## lhuang: N.B.: in case of "closest", keep a list of reflens!! (bad design)
return (reflen, maxcounts) | Takes a list of reference sentences for a single segment and returns an object that encapsulates everything that BLEU needs to know about them. |
18,344 | import argparse
import json
import sys
import zipfile
import numpy as np
from collections import Counter, defaultdict
import copy
import math, re
def precook(s, n=4, out=False):
"""Takes a string as input and returns an object that can be given to
either cook_refs or cook_test. This is optional: cook_refs and cook_test
can take string arguments as well."""
words = s.split()
counts = defaultdict(int)
for k in range(1,n+1):
for i in range(len(words)-k+1):
ngram = tuple(words[i:i+k])
counts[ngram] += 1
return (len(words), counts)
The provided code snippet includes necessary dependencies for implementing the `cook_test` function. Write a Python function `def cook_test(test, xxx_todo_changeme, eff=None, n=4)` to solve the following problem:
Takes a test sentence and returns an object that encapsulates everything that BLEU needs to know about it.
Here is the function:
def cook_test(test, xxx_todo_changeme, eff=None, n=4):
'''Takes a test sentence and returns an object that
encapsulates everything that BLEU needs to know about it.'''
(reflen, refmaxcounts) = xxx_todo_changeme
testlen, counts = precook(test, n, True)
result = {}
# Calculate effective reference sentence length.
if eff == "closest":
result["reflen"] = min((abs(l-testlen), l) for l in reflen)[1]
else: ## i.e., "average" or "shortest" or None
result["reflen"] = reflen
result["testlen"] = testlen
result["guess"] = [max(0,testlen-k+1) for k in range(1,n+1)]
result['correct'] = [0]*n
for (ngram, count) in counts.items():
result["correct"][len(ngram)-1] += min(refmaxcounts.get(ngram,0), count)
return result | Takes a test sentence and returns an object that encapsulates everything that BLEU needs to know about it. |
18,345 | import argparse
import json
import sys
import zipfile
import numpy as np
from collections import Counter, defaultdict
import copy
import math, re
def data_check(obj, task):
"""
Check data.
Raises:
Raises AssertionError when data is not legal.
"""
assert 'question_id' in obj, "Missing 'question_id' field."
assert 'question_type' in obj, \
"Missing 'question_type' field. question_id: {}".format(obj['question_type'])
assert 'yesno_answers' in obj, \
"Missing 'yesno_answers' field. question_id: {}".format(obj['question_id'])
assert isinstance(obj['yesno_answers'], list), \
r"""'yesno_answers' field must be a list, if the 'question_type' is not
'YES_NO', then this field should be an empty list.
question_id: {}""".format(obj['question_id'])
assert 'entity_answers' in obj, \
"Missing 'entity_answers' field. question_id: {}".format(obj['question_id'])
assert isinstance(obj['entity_answers'], list) \
and len(obj['entity_answers']) > 0, \
r"""'entity_answers' field must be a list, and has at least one element,
which can be a empty list. question_id: {}""".format(obj['question_id'])
The provided code snippet includes necessary dependencies for implementing the `read_file` function. Write a Python function `def read_file(file_name, task, is_ref=False)` to solve the following problem:
Read predict answers or reference answers from file. Args: file_name: the name of the file containing predict result or reference result. Returns: A dictionary mapping question_id to the result information. The result information itself is also a dictionary with has four keys: - question_type: type of the query. - yesno_answers: A list of yesno answers corresponding to 'answers'. - answers: A list of predicted answers. - entity_answers: A list, each element is also a list containing the entities tagged out from the corresponding answer string.
Here is the function:
def read_file(file_name, task, is_ref=False):
"""
Read predict answers or reference answers from file.
Args:
file_name: the name of the file containing predict result or reference
result.
Returns:
A dictionary mapping question_id to the result information. The result
information itself is also a dictionary with has four keys:
- question_type: type of the query.
- yesno_answers: A list of yesno answers corresponding to 'answers'.
- answers: A list of predicted answers.
- entity_answers: A list, each element is also a list containing the entities
tagged out from the corresponding answer string.
"""
def _open(file_name, mode, zip_obj=None):
if zip_obj is not None:
return zip_obj.open(file_name, mode)
return open(file_name, mode)
results = {}
keys = ['answers', 'yesno_answers', 'entity_answers', 'question_type']
if is_ref:
keys += ['source']
zf = zipfile.ZipFile(file_name, 'r') if file_name.endswith('.zip') else None
file_list = [file_name] if zf is None else zf.namelist()
for fn in file_list:
for line in _open(fn, 'r', zip_obj=zf):
try:
obj = json.loads(line.strip())
except ValueError:
raise ValueError("Every line of data should be legal json")
data_check(obj, task)
qid = obj['question_id']
assert qid not in results, "Duplicate question_id: {}".format(qid)
results[qid] = {}
for k in keys:
results[qid][k] = obj[k]
return results | Read predict answers or reference answers from file. Args: file_name: the name of the file containing predict result or reference result. Returns: A dictionary mapping question_id to the result information. The result information itself is also a dictionary with has four keys: - question_type: type of the query. - yesno_answers: A list of yesno answers corresponding to 'answers'. - answers: A list of predicted answers. - entity_answers: A list, each element is also a list containing the entities tagged out from the corresponding answer string. |
18,346 | import argparse
import json
import sys
import zipfile
import numpy as np
from collections import Counter, defaultdict
import copy
import math, re
def compute_bleu_rouge(pred_dict, ref_dict, bleu_order=4):
"""
Compute bleu and rouge scores.
"""
assert set(pred_dict.keys()) == set(ref_dict.keys()), \
"missing keys: {}".format(set(ref_dict.keys()) - set(pred_dict.keys()))
scores = {}
bleu_scores, _ = Bleu(bleu_order).compute_score(ref_dict, pred_dict)
for i, bleu_score in enumerate(bleu_scores):
scores['Bleu-%d' % (i + 1)] = bleu_score
rouge_score, _ = Rouge().compute_score(ref_dict, pred_dict)
scores['Rouge-L'] = rouge_score
return scores
def compute_prf(pred_dict, ref_dict):
"""
Compute precision recall and f1-score.
"""
pred_question_ids = set(pred_dict.keys())
ref_question_ids = set(ref_dict.keys())
correct_preds, total_correct, total_preds = 0, 0, 0
for question_id in ref_question_ids:
pred_entity_list = pred_dict.get(question_id, [[]])
assert len(pred_entity_list) == 1, \
'the number of entity list for question_id {} is not 1.'.format(question_id)
pred_entity_list = pred_entity_list[0]
all_ref_entity_lists = ref_dict[question_id]
best_local_f1 = 0
best_ref_entity_list = None
for ref_entity_list in all_ref_entity_lists:
local_f1 = local_prf(pred_entity_list, ref_entity_list)[2]
if local_f1 > best_local_f1:
best_ref_entity_list = ref_entity_list
best_local_f1 = local_f1
if best_ref_entity_list is None:
if len(all_ref_entity_lists) > 0:
best_ref_entity_list = sorted(all_ref_entity_lists,
key=lambda x: len(x))[0]
else:
best_ref_entity_list = []
gold_entities = set(best_ref_entity_list)
pred_entities = set(pred_entity_list)
correct_preds += len(gold_entities & pred_entities)
total_preds += len(pred_entities)
total_correct += len(gold_entities)
p = float(correct_preds) / total_preds if correct_preds > 0 else 0
r = float(correct_preds) / total_correct if correct_preds > 0 else 0
f1 = 2 * p * r / (p + r) if correct_preds > 0 else 0
return {'Precision': p, 'Recall': r, 'F1': f1}
def prepare_prf(pred_dict, ref_dict):
"""
Prepares data for calculation of prf scores.
"""
preds = {k: v['entity_answers'] for k, v in pred_dict.items()}
refs = {k: v['entity_answers'] for k, v in ref_dict.items()}
return preds, refs
def filter_dict(result_dict, key_tag):
"""
Filter a subset of the result_dict, where keys ends with 'key_tag'.
"""
filtered = {}
for k, v in result_dict.items():
if k.endswith(key_tag):
filtered[k] = v
return filtered
def prepare_bleu(pred_result, ref_result, task):
"""
Prepares data for calculation of bleu and rouge scores.
"""
pred_list, ref_list = [], []
qids = ref_result.keys()
for qid in qids:
if task == 'main':
pred, ref = get_main_result(qid, pred_result, ref_result)
elif task == 'yesno':
pred, ref = get_yesno_result(qid, pred_result, ref_result)
elif task == 'all':
pred, ref = get_all_result(qid, pred_result, ref_result)
elif task == 'entity':
pred, ref = get_entity_result(qid, pred_result, ref_result)
elif task == 'description':
pred, ref = get_desc_result(qid, pred_result, ref_result)
else:
raise ValueError("Illegal task name: {}".format(task))
if pred and ref:
pred_list += pred
ref_list += ref
pred_dict = dict(pred_list)
ref_dict = dict(ref_list)
for qid, ans in ref_dict.items():
ref_dict[qid] = normalize(ref_dict[qid])
pred_dict[qid] = normalize(pred_dict.get(qid, [EMPTY]))
if not ans or ans == [EMPTY]:
del ref_dict[qid]
del pred_dict[qid]
for k, v in pred_dict.items():
assert len(v) == 1, \
"There should be only one predict answer. question_id: {}".format(k)
return pred_dict, ref_dict
The provided code snippet includes necessary dependencies for implementing the `get_metrics` function. Write a Python function `def get_metrics(pred_result, ref_result, task, source)` to solve the following problem:
Computes metrics.
Here is the function:
def get_metrics(pred_result, ref_result, task, source):
"""
Computes metrics.
"""
metrics = {}
ref_result_filtered = {}
pred_result_filtered = {}
if source == 'both':
ref_result_filtered = ref_result
pred_result_filtered = pred_result
else:
for question_id, info in ref_result.items():
if info['source'] == source:
ref_result_filtered[question_id] = info
if question_id in pred_result:
pred_result_filtered[question_id] = pred_result[question_id]
if task == 'main' or task == 'all' \
or task == 'description':
pred_dict, ref_dict = prepare_bleu(pred_result_filtered,
ref_result_filtered,
task)
metrics = compute_bleu_rouge(pred_dict, ref_dict)
elif task == 'yesno':
pred_dict, ref_dict = prepare_bleu(pred_result_filtered,
ref_result_filtered,
task)
keys = ['Yes', 'No', 'Depends']
preds = [filter_dict(pred_dict, k) for k in keys]
refs = [filter_dict(ref_dict, k) for k in keys]
metrics = compute_bleu_rouge(pred_dict, ref_dict)
for k, pred, ref in zip(keys, preds, refs):
m = compute_bleu_rouge(pred, ref)
k_metric = [(k + '|' + key, v) for key, v in m.items()]
metrics.update(k_metric)
elif task == 'entity':
pred_dict, ref_dict = prepare_prf(pred_result_filtered,
ref_result_filtered)
pred_dict_bleu, ref_dict_bleu = prepare_bleu(pred_result_filtered,
ref_result_filtered,
task)
metrics = compute_prf(pred_dict, ref_dict)
metrics.update(compute_bleu_rouge(pred_dict_bleu, ref_dict_bleu))
else:
raise ValueError("Illegal task name: {}".format(task))
return metrics | Computes metrics. |
18,347 | import argparse
import json
import sys
import zipfile
import numpy as np
from collections import Counter, defaultdict
import copy
import math, re
The provided code snippet includes necessary dependencies for implementing the `format_metrics` function. Write a Python function `def format_metrics(metrics, task, err_msg)` to solve the following problem:
Format metrics. 'err' field returns any error occured during evaluation. Args: metrics: A dict object contains metrics for different tasks. task: Task name. err_msg: Exception raised during evaluation. Returns: Formatted result.
Here is the function:
def format_metrics(metrics, task, err_msg):
"""
Format metrics. 'err' field returns any error occured during evaluation.
Args:
metrics: A dict object contains metrics for different tasks.
task: Task name.
err_msg: Exception raised during evaluation.
Returns:
Formatted result.
"""
result = {}
sources = ["both", "search", "zhidao"]
if err_msg is not None:
return {'errorMsg': str(err_msg), 'errorCode': 1, 'data': []}
data = []
if task != 'all' and task != 'main':
sources = ["both"]
if task == 'entity':
metric_names = ["Bleu-4", "Rouge-L"]
metric_names_prf = ["F1", "Precision", "Recall"]
for name in metric_names + metric_names_prf:
for src in sources:
obj = {
"name": name,
"value": round(metrics[src].get(name, 0) * 100, 2),
"type": src,
}
data.append(obj)
elif task == 'yesno':
metric_names = ["Bleu-4", "Rouge-L"]
details = ["Yes", "No", "Depends"]
src = sources[0]
for name in metric_names:
obj = {
"name": name,
"value": round(metrics[src].get(name, 0) * 100, 2),
"type": 'All',
}
data.append(obj)
for d in details:
obj = {
"name": name,
"value": \
round(metrics[src].get(d + '|' + name, 0) * 100, 2),
"type": d,
}
data.append(obj)
else:
metric_names = ["Bleu-4", "Rouge-L"]
for name in metric_names:
for src in sources:
obj = {
"name": name,
"value": \
round(metrics[src].get(name, 0) * 100, 2),
"type": src,
}
data.append(obj)
result["data"] = data
result["errorCode"] = 0
result["errorMsg"] = "success"
return result | Format metrics. 'err' field returns any error occured during evaluation. Args: metrics: A dict object contains metrics for different tasks. task: Task name. err_msg: Exception raised during evaluation. Returns: Formatted result. |
18,348 | import os
import random
import numpy as np
import torch
from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP
from sofa.utils import mpu,print_rank_0
def _get_ckpt_name(mpu, checkpoints_path, tag):
mp_rank = 0 if mpu is None else mpu.get_model_parallel_rank()
ckpt_name = os.path.join(checkpoints_path,
str(tag),
'mp_rank_{:02d}'.format(mp_rank) + '_model_states.pt')
return ckpt_name
def pre_load(mpu,
load_dir,
tag):
load_path = _get_ckpt_name(mpu, load_dir, tag)
checkpoint = torch.load(load_path, map_location=lambda storage, loc: storage)
return checkpoint['module'] | null |
18,349 | import os
import random
import numpy as np
import torch
from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP
from sofa.utils import mpu,print_rank_0
def get_checkpoint_name(checkpoints_path, iteration, release=False, zero=False):
if release:
d = 'release'
else:
d = 'iter_{:07d}'.format(iteration)
if zero:
dp_rank = mpu.get_data_parallel_rank()
d += '_zero_dp_rank_{}'.format(dp_rank)
return os.path.join(checkpoints_path, d,
'mp_rank_{:02d}'.format(mpu.get_model_parallel_rank()),
'model_optim_rng.pt')
def ensure_directory_exists(filename):
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
def save_zero_checkpoint(args, iteration, optimizer):
zero_sd = {'iteration': iteration,
'optimizer_state_dict': optimizer.state_dict()}
zero_checkpoint_name = get_checkpoint_name(args.save, iteration, zero=True)
ensure_directory_exists(zero_checkpoint_name)
torch.save(zero_sd, zero_checkpoint_name)
print(' successfully saved {}'.format(zero_checkpoint_name)) | null |
18,350 | import os
import random
import numpy as np
import torch
from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP
from sofa.utils import mpu,print_rank_0
def load_checkpoint(model,
load_dir,
tag,
load_module_strict=True,
load_optimizer_states=True,
load_lr_scheduler_states=True):
r"""Load training checkpoint
Arguments:
load_dir: Required. Directory to load the checkpoint from
tag: Required. Checkpoint tag used as a unique identifier for the checkpoint. Ex. Global Step.
load_module_strict: Optional. Boolean to strictly enforce that the keys in state_dict of module and checkpoint match.
load_optimizer_states: Optional. Boolean to load the training optimizer states from Checkpoint. Ex. ADAM's momentum and variance
load_lr_scheduler_states: Optional. Boolean to add the learning rate scheduler states from Checkpoint.
Return:
load_path: Path of the loaded checkpoint. None if loading the checkpoint failed
client_state: State dictionary used for loading required training states in the client code.
"""
load_path, client_states = _load_checkpoint(model,
load_dir,
tag,
load_module_strict=load_module_strict,
load_optimizer_states=load_optimizer_states,
load_lr_scheduler_states=load_lr_scheduler_states)
if load_optimizer_states:
if model.zero_optimization() and load_path is not None:
model._load_zero_checkpoint(load_dir,
tag,
load_optimizer_states=load_optimizer_states)
return load_path, client_states
def get_checkpoint_name(checkpoints_path, iteration, release=False, zero=False):
if release:
d = 'release'
else:
d = 'iter_{:07d}'.format(iteration)
if zero:
dp_rank = mpu.get_data_parallel_rank()
d += '_zero_dp_rank_{}'.format(dp_rank)
return os.path.join(checkpoints_path, d,
'mp_rank_{:02d}'.format(mpu.get_model_parallel_rank()),
'model_optim_rng.pt')
def get_checkpoint_iteration(args):
# Read the tracker file and set the iteration.
tracker_filename = get_checkpoint_tracker_filename(args.load)
if not os.path.isfile(tracker_filename):
print_rank_0('WARNING: could not find the metadata file {} '.format(
tracker_filename))
print_rank_0(' will not load any checkpoints and will start from '
'random')
return 0, False, False
iteration = 0
release = False
with open(tracker_filename, 'r') as f:
metastring = f.read().strip()
try:
iteration = int(metastring)
except ValueError:
release = metastring == 'release'
if not release:
print_rank_0('ERROR: Invalid metadata file {}. Exiting'.format(
tracker_filename))
exit()
assert iteration > 0 or release, 'error parsing metadata file {}'.format(
tracker_filename)
return iteration, release, True
The provided code snippet includes necessary dependencies for implementing the `load_deepspeed_checkpoint` function. Write a Python function `def load_deepspeed_checkpoint(model, optimizer, lr_scheduler, args)` to solve the following problem:
Load a model checkpoint.
Here is the function:
def load_deepspeed_checkpoint(model, optimizer, lr_scheduler, args):
"""Load a model checkpoint."""
iteration, release, success = get_checkpoint_iteration(args)
if not success:
return 0
if args.deepspeed:
#checkpoint_name, sd = model.load_checkpoint(args.load, iteration, load_optimizer_states=False, load_lr_scheduler_states=False)
checkpoint_name, sd = load_checkpoint(model, args.load, iteration, load_optimizer_states=not args.no_load_optim, load_lr_scheduler_states=not args.no_load_lr)
if checkpoint_name is None:
if mpu.get_data_parallel_rank() == 0:
print("Unable to load checkpoint.")
return iteration
else:
# Checkpoint.
checkpoint_name = get_checkpoint_name(args.load, iteration, release)
if mpu.get_data_parallel_rank() == 0:
print('global rank {} is loading checkpoint {}'.format(
torch.distributed.get_rank(), checkpoint_name))
# Load the checkpoint.
sd = torch.load(checkpoint_name, map_location='cpu')
if isinstance(model, torchDDP):
model = model.module
# Model.
try:
model.load_state_dict(sd['model'])
except KeyError:
print_rank_0('A metadata file exists but unable to load model '
'from checkpoint {}, exiting'.format(checkpoint_name))
exit()
# Optimizer.
if not release and not args.finetune and not args.no_load_optim:
try:
if optimizer is not None:
optimizer.load_state_dict(sd['optimizer'])
if lr_scheduler is not None:
lr_scheduler.load_state_dict(sd['lr_scheduler'])
except KeyError:
print_rank_0('Unable to load optimizer from checkpoint {}, exiting. '
'Specify --no-load-optim or --finetune to prevent '
'attempting to load the optimizer '
'state.'.format(checkpoint_name))
exit()
# Iterations.
if args.finetune or release:
iteration = 0
else:
try:
iteration = sd['iteration']
except KeyError:
try: # Backward compatible with older checkpoints
iteration = sd['total_iters']
except KeyError:
print_rank_0('A metadata file exists but Unable to load iteration '
' from checkpoint {}, exiting'.format(checkpoint_name))
exit()
# rng states.
if not release and not args.finetune and not args.no_load_rng:
try:
random.setstate(sd['random_rng_state'])
np.random.set_state(sd['np_rng_state'])
torch.set_rng_state(sd['torch_rng_state'])
torch.cuda.set_rng_state(sd['cuda_rng_state'])
mpu.get_cuda_rng_tracker().set_states(sd['rng_tracker_states'])
except KeyError:
print_rank_0('Unable to load optimizer from checkpoint {}, exiting. '
'Specify --no-load-optim or --finetune to prevent '
'attempting to load the optimizer '
'state.'.format(checkpoint_name))
exit()
torch.distributed.barrier()
if mpu.get_data_parallel_rank() == 0:
print(' successfully loaded {}'.format(checkpoint_name))
return iteration | Load a model checkpoint. |
18,351 | import os
import random
import numpy as np
import torch
from torch.nn.parallel.distributed import DistributedDataParallel as torchDDP
from sofa.utils import mpu,print_rank_0
def load_weights(src, dst, dst2src=False):
"""
Loads weights from src to dst via in place copy.
src is a huggingface gpt2model, while dst is one of our models.
dst2src=True loads parameters from our models into huggingface's.
^dst2src is still untested
"""
conv_layer = 'Conv1D' in str(type(src))
for n, p in src.named_parameters():
if dst2src:
data = dst._parameters[n].data
load = p.data
else:
data = p.data
load = dst._parameters[n].data
if conv_layer and 'weight' in n:
data = data.t().contiguous()
load.copy_(data)
def load_transformer_layer(our, oai, dst2src=False):
load_weights(oai.ln_1, our.input_layernorm, dst2src)
load_weights(oai.ln_2, our.post_attention_layernorm, dst2src)
load_mlp(our.mlp, oai.mlp, dst2src)
load_attention(our.attention, oai.attn, dst2src)
The provided code snippet includes necessary dependencies for implementing the `move_weights` function. Write a Python function `def move_weights(our, oai, dst2src=False)` to solve the following problem:
Loads weights from `oai` to `our` via in place copy. `oai` is a huggingface gpt2model, while `our` is one of our models. dst2src=True loads parameters from our models into huggingface's. ^dst2src=True is still untested
Here is the function:
def move_weights(our, oai, dst2src=False):
"""
Loads weights from `oai` to `our` via in place copy.
`oai` is a huggingface gpt2model, while `our` is one of our models.
dst2src=True loads parameters from our models into huggingface's.
^dst2src=True is still untested
"""
# while isinstance(our, (torchDDP, model.distributed.DistributedDataParallel, FP16_Module)):
# our=our.module
transformer_model = oai.transformer
load_weights(transformer_model.ln_f, our.transformer.final_layernorm, dst2src)
load_weights(transformer_model.wte, our.word_embeddings, dst2src)
load_weights(transformer_model.wpe, our.position_embeddings, dst2src)
for our_layer, oai_layer in zip(our.transformer.layers, oai.transformer.h):
load_transformer_layer(our_layer, oai_layer, dst2src) | Loads weights from `oai` to `our` via in place copy. `oai` is a huggingface gpt2model, while `our` is one of our models. dst2src=True loads parameters from our models into huggingface's. ^dst2src=True is still untested |
18,352 | import torch
from .utils import ensure_divisibility
_MODEL_PARALLEL_GROUP = None
_DATA_PARALLEL_GROUP = None
def ensure_divisibility(numerator, denominator):
"""Ensure that numerator is divisible by the denominator."""
assert numerator % denominator == 0, '{} is not divisible by {}'.format(
numerator, denominator)
The provided code snippet includes necessary dependencies for implementing the `initialize_model_parallel` function. Write a Python function `def initialize_model_parallel(model_parallel_size_)` to solve the following problem:
Initialize model data parallel groups. Arguments: model_parallel_size: number of GPUs used to parallelize model. Let's say we have a total of 8 GPUs denoted by g0 ... g7 and we use 2 GPUs to parallelize the model. The present function will create 4 model parallel groups and 2 data parallel grous as: 4 model parallel groups: [g0, g1], [g2, g3], [g4, g5], [g6, g7] 2 data parallel groups: [g0, g2, g4, g6], [g1, g3, g5, g7] Note that for efficiency, the caller should make sure adjacent ranks are on the same DGX box. For example if we are using 2 DGX-1 boxes with a total of 16 GPUs, rank 0 to 7 belong to the first box and ranks 8 to 15 belong to the second box.
Here is the function:
def initialize_model_parallel(model_parallel_size_):
"""
Initialize model data parallel groups.
Arguments:
model_parallel_size: number of GPUs used to parallelize model.
Let's say we have a total of 8 GPUs denoted by g0 ... g7 and we
use 2 GPUs to parallelize the model. The present function will
create 4 model parallel groups and 2 data parallel grous as:
4 model parallel groups:
[g0, g1], [g2, g3], [g4, g5], [g6, g7]
2 data parallel groups:
[g0, g2, g4, g6], [g1, g3, g5, g7]
Note that for efficiency, the caller should make sure adjacent ranks
are on the same DGX box. For example if we are using 2 DGX-1 boxes
with a total of 16 GPUs, rank 0 to 7 belong to the first box and
ranks 8 to 15 belong to the second box.
"""
if torch.distributed.get_rank() == 0:
print('> initializing model parallel with size {}'.format(
model_parallel_size_))
# Get world size and rank. Ensure some consistencies.
assert torch.distributed.is_initialized()
world_size = torch.distributed.get_world_size()
model_parallel_size = min(model_parallel_size_, world_size)
ensure_divisibility(world_size, model_parallel_size)
rank = torch.distributed.get_rank()
# Build the data parallel groups.
global _DATA_PARALLEL_GROUP
assert _DATA_PARALLEL_GROUP is None, \
'data parallel group is already initialized'
for i in range(model_parallel_size):
ranks = range(i, world_size, model_parallel_size)
group = torch.distributed.new_group(ranks)
if i == (rank % model_parallel_size):
_DATA_PARALLEL_GROUP = group
# Build the model parallel groups.
global _MODEL_PARALLEL_GROUP
assert _MODEL_PARALLEL_GROUP is None, \
'model parallel group is already initialized'
for i in range(world_size // model_parallel_size):
ranks = range(i * model_parallel_size,
(i + 1) * model_parallel_size)
group = torch.distributed.new_group(ranks)
if i == (rank // model_parallel_size):
_MODEL_PARALLEL_GROUP = group | Initialize model data parallel groups. Arguments: model_parallel_size: number of GPUs used to parallelize model. Let's say we have a total of 8 GPUs denoted by g0 ... g7 and we use 2 GPUs to parallelize the model. The present function will create 4 model parallel groups and 2 data parallel grous as: 4 model parallel groups: [g0, g1], [g2, g3], [g4, g5], [g6, g7] 2 data parallel groups: [g0, g2, g4, g6], [g1, g3, g5, g7] Note that for efficiency, the caller should make sure adjacent ranks are on the same DGX box. For example if we are using 2 DGX-1 boxes with a total of 16 GPUs, rank 0 to 7 belong to the first box and ranks 8 to 15 belong to the second box. |
18,353 | import torch
from .utils import ensure_divisibility
_MODEL_PARALLEL_GROUP = None
_DATA_PARALLEL_GROUP = None
The provided code snippet includes necessary dependencies for implementing the `model_parallel_is_initialized` function. Write a Python function `def model_parallel_is_initialized()` to solve the following problem:
Check if model and data parallel groups are initialized.
Here is the function:
def model_parallel_is_initialized():
"""Check if model and data parallel groups are initialized."""
if _MODEL_PARALLEL_GROUP is None or _DATA_PARALLEL_GROUP is None:
return False
return True | Check if model and data parallel groups are initialized. |
18,354 | import torch
from .utils import ensure_divisibility
def get_data_parallel_group():
"""Get the data parallel group the caller rank belongs to."""
assert _DATA_PARALLEL_GROUP is not None, \
'data parallel group is not initialized'
return _DATA_PARALLEL_GROUP
The provided code snippet includes necessary dependencies for implementing the `get_data_parallel_world_size` function. Write a Python function `def get_data_parallel_world_size()` to solve the following problem:
Return world size for the data parallel group.
Here is the function:
def get_data_parallel_world_size():
"""Return world size for the data parallel group."""
return torch.distributed.get_world_size(group=get_data_parallel_group()) | Return world size for the data parallel group. |
18,355 | import torch
from .utils import ensure_divisibility
_MODEL_PARALLEL_GROUP = None
_DATA_PARALLEL_GROUP = None
The provided code snippet includes necessary dependencies for implementing the `destroy_model_parallel` function. Write a Python function `def destroy_model_parallel()` to solve the following problem:
Set the groups to none.
Here is the function:
def destroy_model_parallel():
"""Set the groups to none."""
global _MODEL_PARALLEL_GROUP
_MODEL_PARALLEL_GROUP = None
global _DATA_PARALLEL_GROUP
_DATA_PARALLEL_GROUP = None | Set the groups to none. |
18,356 | import math
import torch
import torch.nn.init as init
from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
from .initialize import get_model_parallel_world_size
from .layers import ColumnParallelLinear
from .layers import RowParallelLinear
from .mappings import gather_from_model_parallel_region
import deepspeed
from .random import checkpoint
from .random import get_cuda_rng_tracker
from .utils import divide
from .utils import split_tensor_along_last_dim
from ..utils import print_rank_0
from deepspeed.utils.timer import SynchronizedWallClockTimer
def gelu_impl(x):
"""OpenAI's gelu implementation."""
return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x *
(1.0 + 0.044715 * x * x)))
def gelu(x):
return gelu_impl(x) | null |
18,357 | import math
import torch
import torch.nn.init as init
from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
from .initialize import get_model_parallel_world_size
from .layers import ColumnParallelLinear
from .layers import RowParallelLinear
from .mappings import gather_from_model_parallel_region
import deepspeed
from .random import checkpoint
from .random import get_cuda_rng_tracker
from .utils import divide
from .utils import split_tensor_along_last_dim
from ..utils import print_rank_0
from deepspeed.utils.timer import SynchronizedWallClockTimer
The provided code snippet includes necessary dependencies for implementing the `unscaled_init_method` function. Write a Python function `def unscaled_init_method(sigma)` to solve the following problem:
Init method based on N(0, sigma).
Here is the function:
def unscaled_init_method(sigma):
"""Init method based on N(0, sigma)."""
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=sigma)
return init_ | Init method based on N(0, sigma). |
18,358 | import math
import torch
import torch.nn.init as init
from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
from .initialize import get_model_parallel_world_size
from .layers import ColumnParallelLinear
from .layers import RowParallelLinear
from .mappings import gather_from_model_parallel_region
import deepspeed
from .random import checkpoint
from .random import get_cuda_rng_tracker
from .utils import divide
from .utils import split_tensor_along_last_dim
from ..utils import print_rank_0
from deepspeed.utils.timer import SynchronizedWallClockTimer
The provided code snippet includes necessary dependencies for implementing the `scaled_init_method` function. Write a Python function `def scaled_init_method(sigma, num_layers)` to solve the following problem:
Init method based on N(0, sigma/sqrt(2*num_layers).
Here is the function:
def scaled_init_method(sigma, num_layers):
"""Init method based on N(0, sigma/sqrt(2*num_layers)."""
std = sigma / math.sqrt(2.0 * num_layers)
def init_(tensor):
return torch.nn.init.normal_(tensor, mean=0.0, std=std)
return init_ | Init method based on N(0, sigma/sqrt(2*num_layers). |
18,359 | import torch
from .initialize import get_model_parallel_group
from .utils import split_tensor_along_last_dim
from deepspeed.utils.timer import SynchronizedWallClockTimer
def get_model_parallel_group():
"""Get the model parallel group the caller rank belongs to."""
assert _MODEL_PARALLEL_GROUP is not None, \
'model parallel group is not initialized'
return _MODEL_PARALLEL_GROUP
The provided code snippet includes necessary dependencies for implementing the `_reduce` function. Write a Python function `def _reduce(input_)` to solve the following problem:
All-reduce the the input tensor across model parallel group.
Here is the function:
def _reduce(input_):
"""All-reduce the the input tensor across model parallel group."""
group = get_model_parallel_group()
# Bypass the function if we are using only 1 GPU.
if torch.distributed.get_world_size(group=group) == 1:
return input_
# All-reduce.
torch.distributed.all_reduce(input_, group=group)
return input_ | All-reduce the the input tensor across model parallel group. |
18,360 | import torch
from .initialize import get_model_parallel_group
from .utils import split_tensor_along_last_dim
from deepspeed.utils.timer import SynchronizedWallClockTimer
def get_model_parallel_group():
"""Get the model parallel group the caller rank belongs to."""
assert _MODEL_PARALLEL_GROUP is not None, \
'model parallel group is not initialized'
return _MODEL_PARALLEL_GROUP
def split_tensor_along_last_dim(tensor, num_partitions,
contiguous_split_chunks=False):
"""Split a tensor along its last dimension.
Arguments:
tensor: input tensor.
num_partitions: number of partitions to split the tensor
contiguous_split_chunks: If True, make each chunk contiguous
in memory.
"""
# Get the size and dimension.
last_dim = tensor.dim() - 1
last_dim_size = divide(tensor.size()[last_dim], num_partitions)
# Split.
tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
# Note: torch.split does not create contiguous tensors by default.
if contiguous_split_chunks:
return tuple(chunk.contiguous() for chunk in tensor_list)
return tensor_list
The provided code snippet includes necessary dependencies for implementing the `_split` function. Write a Python function `def _split(input_)` to solve the following problem:
Split the tensor along its last dimension and keep the corresponding slice.
Here is the function:
def _split(input_):
"""Split the tensor along its last dimension and keep the
corresponding slice."""
group = get_model_parallel_group()
# Bypass the function if we are using only 1 GPU.
if torch.distributed.get_world_size(group=group) == 1:
return input_
# Split along last dimension.
world_size = torch.distributed.get_world_size(group=group)
input_list = split_tensor_along_last_dim(input_, world_size)
# Note: torch.split does not create contiguous tensors by default.
rank = torch.distributed.get_rank(group=group)
output = input_list[rank].contiguous()
return output | Split the tensor along its last dimension and keep the corresponding slice. |
18,361 | import torch
from .initialize import get_model_parallel_group
from .utils import split_tensor_along_last_dim
from deepspeed.utils.timer import SynchronizedWallClockTimer
def get_model_parallel_group():
"""Get the model parallel group the caller rank belongs to."""
assert _MODEL_PARALLEL_GROUP is not None, \
'model parallel group is not initialized'
return _MODEL_PARALLEL_GROUP
The provided code snippet includes necessary dependencies for implementing the `_gather` function. Write a Python function `def _gather(input_)` to solve the following problem:
Gather tensors and concatinate along the last dimension.
Here is the function:
def _gather(input_):
"""Gather tensors and concatinate along the last dimension."""
group = get_model_parallel_group()
# Bypass the function if we are using only 1 GPU.
if torch.distributed.get_world_size(group=group) == 1:
return input_
# Size and dimension.
last_dim = input_.dim() - 1
rank = torch.distributed.get_rank(group=group)
world_size = torch.distributed.get_world_size(group=group)
tensor_list = [torch.empty_like(input_) for _ in range(world_size)]
tensor_list[rank] = input_
torch.distributed.all_gather(tensor_list, input_, group=group)
# Note: torch.cat already creates a contiguous tensor.
output = torch.cat(tensor_list, dim=last_dim).contiguous()
return output | Gather tensors and concatinate along the last dimension. |
18,362 | import torch
from .initialize import get_model_parallel_group
from .utils import split_tensor_along_last_dim
from deepspeed.utils.timer import SynchronizedWallClockTimer
class _CopyToModelParallelRegion(torch.autograd.Function):
"""Pass the input to the model parallel region."""
def forward(ctx, input_):
return input_
def backward(ctx, grad_output):
#timers = SynchronizedWallClockTimer()
#timers('backward _Copy reduce').start()
out = _reduce(grad_output)
#timers('backward _Copy reduce').stop()
#timers.log(names=['backward _Copy reduce'])
return out
#return _reduce(grad_output)
def copy_to_model_parallel_region(input_):
return _CopyToModelParallelRegion.apply(input_) | null |
18,363 | import torch
from .initialize import get_model_parallel_group
from .utils import split_tensor_along_last_dim
from deepspeed.utils.timer import SynchronizedWallClockTimer
class _ReduceFromModelParallelRegion(torch.autograd.Function):
"""All-redcue the input from the model parallel region."""
def forward(ctx, input_):
#timers = SynchronizedWallClockTimer()
#timers('forward _Reduce reduce').start()
out = _reduce(input_)
#timers('forward _Reduce reduce').stop()
#timers.log(names=['forward _Reduce reduce'])
return out
#return _reduce(input_)
def backward(ctx, grad_output):
return grad_output
def reduce_from_model_parallel_region(input_):
return _ReduceFromModelParallelRegion.apply(input_) | null |
18,364 | import torch
from .initialize import get_model_parallel_group
from .utils import split_tensor_along_last_dim
from deepspeed.utils.timer import SynchronizedWallClockTimer
class _ScatterToModelParallelRegion(torch.autograd.Function):
"""Split the input and keep only the corresponding chuck to the rank."""
def forward(ctx, input_):
return _split(input_)
def backward(ctx, grad_output):
#timers = SynchronizedWallClockTimer()
#timers('backward _Scatter gather').start()
out = _gather(grad_output)
#timers('backward _Scatter gather').stop()
#timers.log(names=['backward _Scatter gather'])
return out
#return _gather(grad_output)
def scatter_to_model_parallel_region(input_):
return _ScatterToModelParallelRegion.apply(input_) | null |
18,365 | import torch
from .initialize import get_model_parallel_group
from .utils import split_tensor_along_last_dim
from deepspeed.utils.timer import SynchronizedWallClockTimer
class _GatherFromModelParallelRegion(torch.autograd.Function):
"""Gather the input from model parallel region and concatinate."""
def forward(ctx, input_):
#timers = SynchronizedWallClockTimer()
#timers('forward _Gather gather').start()
out = _gather(input_)
#timers('forward _Gather gather').stop()
#timers.log(names=['forward _Gather gather'])
return out
#return _gather(input_)
def backward(ctx, grad_output):
return _split(grad_output)
def gather_from_model_parallel_region(input_):
return _GatherFromModelParallelRegion.apply(input_) | null |
18,366 | import torch
from torch._six import inf
from .initialize import get_model_parallel_group
from .initialize import get_model_parallel_rank
def get_model_parallel_group():
"""Get the model parallel group the caller rank belongs to."""
assert _MODEL_PARALLEL_GROUP is not None, \
'model parallel group is not initialized'
return _MODEL_PARALLEL_GROUP
def get_model_parallel_rank():
"""Return my rank for the model parallel group."""
return torch.distributed.get_rank(group=get_model_parallel_group())
The provided code snippet includes necessary dependencies for implementing the `clip_grad_norm` function. Write a Python function `def clip_grad_norm(parameters, max_norm, norm_type=2)` to solve the following problem:
Clips gradient norm of an iterable of parameters. This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and added functionality to handle model parallel parameters. Note that the gradients are modified in place. Arguments: parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a single Tensor that will have gradients normalized max_norm (float or int): max norm of the gradients norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm. Returns: Total norm of the parameters (viewed as a single vector).
Here is the function:
def clip_grad_norm(parameters, max_norm, norm_type=2):
"""Clips gradient norm of an iterable of parameters.
This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and
added functionality to handle model parallel parameters. Note that
the gradients are modified in place.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
max_norm = float(max_norm)
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(p.grad.data.abs().max() for p in parameters)
total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
# Take max across all GPUs.
torch.distributed.all_reduce(total_norm_cuda,
op=torch.distributed.ReduceOp.MAX,
group=get_model_parallel_group())
total_norm = total_norm_cuda[0].item()
else:
total_norm = 0
for p in parameters:
if p.model_parallel or (get_model_parallel_rank() == 0):
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm.item() ** norm_type
# Sum across all model parallel GPUs.
total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
torch.distributed.all_reduce(total_norm_cuda,
op=torch.distributed.ReduceOp.SUM,
group=get_model_parallel_group())
total_norm = total_norm_cuda[0].item() ** (1. / norm_type)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for p in parameters:
p.grad.data.mul_(clip_coef)
return total_norm | Clips gradient norm of an iterable of parameters. This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and added functionality to handle model parallel parameters. Note that the gradients are modified in place. Arguments: parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a single Tensor that will have gradients normalized max_norm (float or int): max norm of the gradients norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm. Returns: Total norm of the parameters (viewed as a single vector). |
18,367 | import torch
from .initialize import get_model_parallel_group
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_world_size
from .utils import VocabUtility
class _VocabParallelCrossEntropy(torch.autograd.Function):
def forward(ctx, vocab_parallel_logits, target):
# Copy so the input remains unchanged.
logits = vocab_parallel_logits.clone()
# Maximum value along vocab dimension across all GPUs.
logits_max = torch.max(logits, dim=-1)[0]
torch.distributed.all_reduce(logits_max,
op=torch.distributed.ReduceOp.MAX,
group=get_model_parallel_group())
# Subtract the maximum value.
logits.sub_(logits_max.unsqueeze(dim=-1))
# Sum of exponential of logits along vocab dimension across all GPUs.
exp_logits = logits.exp()
sum_exp_logits = exp_logits.sum(dim=-1)
torch.distributed.all_reduce(sum_exp_logits,
op=torch.distributed.ReduceOp.SUM,
group=get_model_parallel_group())
# Get the partition's vocab indecies
get_vocab_range = VocabUtility.vocab_range_from_per_partition_vocab_size
partition_vocab_size = vocab_parallel_logits.size()[-1]
rank = get_model_parallel_rank()
world_size = get_model_parallel_world_size()
vocab_start_index, vocab_end_index = get_vocab_range(
partition_vocab_size, rank, world_size)
# Create a mask of valid vocab ids (1 means it needs to be masked).
target_mask = (target < vocab_start_index) | (target >= vocab_end_index)
masked_target = target.clone() - vocab_start_index
masked_target[target_mask] = 0
# Get predicted-logits = logits[target].
# For Simplicity, we convert logits to a 2-D tensor with size
# [*, partition-vocab-size] and target to a 1-D tensor of size [*].
logits_2d = logits.view(-1, partition_vocab_size)
masked_target_1d = masked_target.view(-1)
arange_1d = torch.arange(start=0, end=logits_2d.size()[0],
device=logits_2d.device)
predicted_logits_1d = logits_2d[arange_1d, masked_target_1d]
predicted_logits = predicted_logits_1d.view_as(target)
predicted_logits[target_mask] = 0.0
# All reduce is needed to get the chunks from other GPUs.
torch.distributed.all_reduce(predicted_logits,
op=torch.distributed.ReduceOp.SUM,
group=get_model_parallel_group())
# Loss = log(sum(exp(logits))) - predicted-logit.
loss = torch.log(sum_exp_logits) - predicted_logits
# Store softmax, target-mask and masked-target for backward pass.
exp_logits.div_(sum_exp_logits.unsqueeze(dim=-1))
ctx.save_for_backward(exp_logits, target_mask, masked_target_1d)
return loss
def backward(ctx, grad_output):
# Retreive tensors from the forward path.
softmax, target_mask, masked_target_1d = ctx.saved_tensors
# All the inputs have softmax as thier gradient.
grad_input = softmax
# For simplicity, work with the 2D gradient.
partition_vocab_size = softmax.size()[-1]
grad_2d = grad_input.view(-1, partition_vocab_size)
# Add the gradient from matching classes.
arange_1d = torch.arange(start=0, end=grad_2d.size()[0],
device=grad_2d.device)
grad_2d[arange_1d, masked_target_1d] -= (
1.0 - target_mask.view(-1).float())
# Finally elementwise multiplication with the output gradients.
grad_input.mul_(grad_output.unsqueeze(dim=-1))
return grad_input, None
The provided code snippet includes necessary dependencies for implementing the `vocab_parallel_cross_entropy` function. Write a Python function `def vocab_parallel_cross_entropy(vocab_parallel_logits, target)` to solve the following problem:
Helper function for the cross entropy.
Here is the function:
def vocab_parallel_cross_entropy(vocab_parallel_logits, target):
"""Helper function for the cross entropy."""
return _VocabParallelCrossEntropy.apply(vocab_parallel_logits, target) | Helper function for the cross entropy. |
18,368 | import torch
from .initialize import get_model_parallel_group
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_src_rank
def _check_data_types(keys, data, target_dtype):
"""Check that all the keys have the same target data type."""
for key in keys:
assert data[key].dtype == target_dtype, '{} has data type {} which '\
'is different than {}'.format(key, data[key].dtype, target_dtype)
def _build_key_size_numel_dictionaries(keys, data):
"""Build the size on rank 0 and broadcast."""
max_dim = _MAX_DATA_DIM
sizes = [0 for _ in range(max_dim) for _ in keys]
# Pack the sizes on rank zero.
if get_model_parallel_rank() == 0:
offset = 0
for key in keys:
assert data[key].dim() < max_dim, 'you should increase MAX_DATA_DIM'
size = data[key].size()
for i, s in enumerate(size):
sizes[i + offset] = s
offset += max_dim
# Move to GPU and broadcast.
sizes_cuda = torch.cuda.LongTensor(sizes)
torch.distributed.broadcast(sizes_cuda, get_model_parallel_src_rank(),
group=get_model_parallel_group())
# Move back to cpu and unpack.
sizes_cpu = sizes_cuda.cpu()
key_size = {}
key_numel = {}
total_numel = 0
offset = 0
for key in keys:
i = 0
size = []
numel = 1
while sizes_cpu[offset + i] > 0:
this_size = sizes_cpu[offset + i]
size.append(this_size)
numel *= this_size
i += 1
key_size[key] = size
key_numel[key] = numel
total_numel += numel
offset += max_dim
return key_size, key_numel, total_numel
def get_model_parallel_group():
"""Get the model parallel group the caller rank belongs to."""
assert _MODEL_PARALLEL_GROUP is not None, \
'model parallel group is not initialized'
return _MODEL_PARALLEL_GROUP
def get_model_parallel_rank():
"""Return my rank for the model parallel group."""
return torch.distributed.get_rank(group=get_model_parallel_group())
def get_model_parallel_src_rank():
"""Calculate the global rank corresponding to a local rank zeor
in the model parallel group."""
global_rank = torch.distributed.get_rank()
local_world_size = get_model_parallel_world_size()
return (global_rank // local_world_size) * local_world_size
The provided code snippet includes necessary dependencies for implementing the `broadcast_data` function. Write a Python function `def broadcast_data(keys, data, datatype)` to solve the following problem:
Broadcast data from rank zero of each model parallel group to the members of the same model parallel group. Arguments: keys: list of keys in the data disctionary to be broadcasted data: data dictionary of string keys and cpu tensor values. datatype: torch data type of all tensors in data associated with keys.
Here is the function:
def broadcast_data(keys, data, datatype):
"""Broadcast data from rank zero of each model parallel group to the
members of the same model parallel group.
Arguments:
keys: list of keys in the data disctionary to be broadcasted
data: data dictionary of string keys and cpu tensor values.
datatype: torch data type of all tensors in data associated
with keys.
"""
# Build (key, size) and (key, number of elements) dictionaries along
# with the total number of elements on all ranks.
key_size, key_numel, total_numel = _build_key_size_numel_dictionaries(keys,
data)
# Pack on rank zero.
if get_model_parallel_rank() == 0:
# Check that all keys have the same data type.
_check_data_types(keys, data, datatype)
# Flatten the data associated with the keys
flatten_data = torch.cat(
[data[key].contiguous().view(-1) for key in keys], dim=0).cuda()
else:
flatten_data = torch.empty(total_numel,
device=torch.cuda.current_device(),
dtype=datatype)
# Boradcast
torch.distributed.broadcast(flatten_data, get_model_parallel_src_rank(),
group=get_model_parallel_group())
# Unpack
output = {}
offset = 0
for key in keys:
size = key_size[key]
numel = key_numel[key]
output[key] = flatten_data.narrow(0, offset, numel).view(size)
offset += numel
return output | Broadcast data from rank zero of each model parallel group to the members of the same model parallel group. Arguments: keys: list of keys in the data disctionary to be broadcasted data: data dictionary of string keys and cpu tensor values. datatype: torch data type of all tensors in data associated with keys. |
18,369 | import contextlib
import torch.distributed as dist
import torch
from torch import _C
from torch.cuda import _lazy_call, device as device_ctx_manager
from ..utils import print_rank_0
import torch.distributed as dist
from .initialize import get_data_parallel_rank
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_world_size
from .initialize import get_model_parallel_group
def see_memory_usage(message, force=False):
if not force:
return
dist.barrier()
if dist.get_rank() == 0:
print(message)
print("Memory Allocated ", torch.cuda.memory_allocated()/(1024*1024*1024), "GigaBytes")
print("Max Memory Allocated ", torch.cuda.max_memory_allocated()/(1024*1024*1024), "GigaBytes")
print("Cache Allocated ", torch.cuda.memory_cached()/(1024*1024*1024), "GigaBytes")
print("Max cache Allocated ", torch.cuda.max_memory_cached()/(1024*1024*1024), "GigaBytes")
print(" ")
#input("Press Any Key To Continue ..") | null |
18,370 | import contextlib
import torch.distributed as dist
import torch
from torch import _C
from torch.cuda import _lazy_call, device as device_ctx_manager
from ..utils import print_rank_0
import torch.distributed as dist
from .initialize import get_data_parallel_rank
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_world_size
from .initialize import get_model_parallel_group
def detach_variable(inputs, device=None):
if isinstance(inputs, tuple):
out = []
for inp in inputs:
if not isinstance(inp, torch.Tensor):
out.append(inp)
continue
requires_grad = inp.requires_grad
if device is not None:
x = inp.to(device=device)
else:
x = inp
x = x.detach()
x.requires_grad = requires_grad
out.append(x)
return tuple(out)
else:
raise RuntimeError(
"Only tuple of tensors is supported. Got Unsupported input type: ", type(inputs).__name__) | null |
18,371 | import contextlib
import torch.distributed as dist
import torch
from torch import _C
from torch.cuda import _lazy_call, device as device_ctx_manager
from ..utils import print_rank_0
import torch.distributed as dist
from .initialize import get_data_parallel_rank
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_world_size
from .initialize import get_model_parallel_group
The provided code snippet includes necessary dependencies for implementing the `_set_cuda_rng_state` function. Write a Python function `def _set_cuda_rng_state(new_state, device=-1)` to solve the following problem:
Sets the random number generator state of the current GPU. Argumentss: new_state (torch.ByteTensor): The desired state This function is adapted from PyTorch repo (torch.cuda.set_rng_state) with a single change: the input state is not cloned. Cloning caused major performance issues for +4 GPU cases.
Here is the function:
def _set_cuda_rng_state(new_state, device=-1):
"""Sets the random number generator state of the current GPU.
Argumentss:
new_state (torch.ByteTensor): The desired state
This function is adapted from PyTorch repo (torch.cuda.set_rng_state)
with a single change: the input state is not cloned. Cloning caused
major performance issues for +4 GPU cases.
"""
if hasattr(_C, '_cuda_setRNGState') and callable(_C._cuda_setRNGState):
# older PyTorch
def cb():
with device_ctx_manager(device):
_C._cuda_setRNGState(new_state)
else:
# newer PyTorch
if device == -1:
device = torch.device('cuda')
elif isinstance(device, str):
device = torch.device(device)
elif isinstance(device, int):
device = torch.device('cuda', device)
def cb():
idx = device.index
if idx is None:
idx = torch.cuda.current_device()
default_generator = torch.cuda.default_generators[idx]
default_generator.set_state(new_state)
_lazy_call(cb) | Sets the random number generator state of the current GPU. Argumentss: new_state (torch.ByteTensor): The desired state This function is adapted from PyTorch repo (torch.cuda.set_rng_state) with a single change: the input state is not cloned. Cloning caused major performance issues for +4 GPU cases. |
18,372 | import contextlib
import torch.distributed as dist
import torch
from torch import _C
from torch.cuda import _lazy_call, device as device_ctx_manager
from ..utils import print_rank_0
import torch.distributed as dist
from .initialize import get_data_parallel_rank
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_world_size
from .initialize import get_model_parallel_group
_CUDA_RNG_STATE_TRACKER = CudaRNGStatesTracker()
The provided code snippet includes necessary dependencies for implementing the `get_cuda_rng_tracker` function. Write a Python function `def get_cuda_rng_tracker()` to solve the following problem:
Get cuda rng tracker.
Here is the function:
def get_cuda_rng_tracker():
"""Get cuda rng tracker."""
return _CUDA_RNG_STATE_TRACKER | Get cuda rng tracker. |
18,373 | import contextlib
import torch.distributed as dist
import torch
from torch import _C
from torch.cuda import _lazy_call, device as device_ctx_manager
from ..utils import print_rank_0
import torch.distributed as dist
from .initialize import get_data_parallel_rank
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_world_size
from .initialize import get_model_parallel_group
_MODEL_PARALLEL_RNG_TRACKER_NAME = 'model-parallel-rng'
_CUDA_RNG_STATE_TRACKER = CudaRNGStatesTracker()
def get_model_parallel_rank():
"""Return my rank for the model parallel group."""
return torch.distributed.get_rank(group=get_model_parallel_group())
def get_data_parallel_rank():
"""Return my rank for the data parallel group."""
return torch.distributed.get_rank(group=get_data_parallel_group())
The provided code snippet includes necessary dependencies for implementing the `model_parallel_cuda_manual_seed` function. Write a Python function `def model_parallel_cuda_manual_seed(seed)` to solve the following problem:
Initialize model parallel cuda seed. This function should be called after the model parallel is initialized. Also, no torch.cuda.manual_seed should be called after this function. Basically, this is replacement for that function. Two set of RNG states are tracked: default state: This is for data parallelism and is the same among a set of model parallel GPUs but different across different model paralle groups. This is used for example for dropout in the non-model-parallel regions. model-parallel state: This state is different among a set of model parallel GPUs, but the same across data parallel groups. This is used for example for dropout in model parallel regions.
Here is the function:
def model_parallel_cuda_manual_seed(seed):
"""Initialize model parallel cuda seed.
This function should be called after the model parallel is
initialized. Also, no torch.cuda.manual_seed should be called
after this function. Basically, this is replacement for that
function.
Two set of RNG states are tracked:
default state: This is for data parallelism and is the same among a
set of model parallel GPUs but different across
different model paralle groups. This is used for
example for dropout in the non-model-parallel regions.
model-parallel state: This state is different among a set of model
parallel GPUs, but the same across data parallel
groups. This is used for example for dropout in
model parallel regions.
"""
# 2718 is just for fun and any POSITIVE value will work.
offset = seed + 2718
model_parallel_seed = offset + get_model_parallel_rank()
# Data parallel gets the original sedd.
data_parallel_seed = seed
if torch.distributed.get_rank() == 0:
print('> initializing model parallel cuda seeds on global rank {}, '
'model parallel rank {}, and data parallel rank {} with '
'model parallel seed: {} and data parallel seed: {}'.format(
torch.distributed.get_rank(), get_model_parallel_rank(),
get_data_parallel_rank(), model_parallel_seed,
data_parallel_seed), flush=True)
_CUDA_RNG_STATE_TRACKER.reset()
# Set the default state.
torch.cuda.manual_seed(data_parallel_seed)
# and model parallel state.
_CUDA_RNG_STATE_TRACKER.add(_MODEL_PARALLEL_RNG_TRACKER_NAME,
model_parallel_seed) | Initialize model parallel cuda seed. This function should be called after the model parallel is initialized. Also, no torch.cuda.manual_seed should be called after this function. Basically, this is replacement for that function. Two set of RNG states are tracked: default state: This is for data parallelism and is the same among a set of model parallel GPUs but different across different model paralle groups. This is used for example for dropout in the non-model-parallel regions. model-parallel state: This state is different among a set of model parallel GPUs, but the same across data parallel groups. This is used for example for dropout in model parallel regions. |
18,374 | import contextlib
import torch.distributed as dist
import torch
from torch import _C
from torch.cuda import _lazy_call, device as device_ctx_manager
from ..utils import print_rank_0
import torch.distributed as dist
from .initialize import get_data_parallel_rank
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_world_size
from .initialize import get_model_parallel_group
mp_rank = None
mp_size = None
mp_group = None
def get_partition_size(item):
global mp_rank, mp_size, mp_group
size = item.numel()
partition_size = size/mp_size
return int(partition_size)
def get_partition_start(item):
global mp_rank, mp_size, mp_group
partition_size = get_partition_size(item)
start = partition_size * mp_rank
return int(start) | null |
18,375 | import contextlib
import torch.distributed as dist
import torch
from torch import _C
from torch.cuda import _lazy_call, device as device_ctx_manager
from ..utils import print_rank_0
import torch.distributed as dist
from .initialize import get_data_parallel_rank
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_world_size
from .initialize import get_model_parallel_group
mp_rank = None
mp_size = None
mp_group = None
def get_full_inputs(tensors):
inputs=[]
for i in range(int(len(tensors)/2)-1):
item = tensors[2 * i]
size = tensors[2* i + 1]
partition_size = item.numel()
tensor_size = partition_size * mp_size
flat_tensor = torch.zeros([tensor_size], dtype=item.dtype, device=item.device)
partitions=[]
for i in range(mp_size):
part_i = flat_tensor.narrow(0, partition_size * i , partition_size)
if i == mp_rank:
part_i.copy_(item)
partitions.append(part_i)
dist.all_gather(partitions,partitions[mp_rank], group=mp_group)
input_tensor = flat_tensor.view(list(size.numpy()))
item.data=input_tensor.data
inputs.append(item)
inputs.append(tensors[-2])
return tuple(inputs) | null |
18,376 | import contextlib
import torch.distributed as dist
import torch
from torch import _C
from torch.cuda import _lazy_call, device as device_ctx_manager
from ..utils import print_rank_0
import torch.distributed as dist
from .initialize import get_data_parallel_rank
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_world_size
from .initialize import get_model_parallel_group
class CheckpointFunction(torch.autograd.Function):
"""This function is adapted from torch.utils.checkpoint with
two main changes:
1) torch.cuda.set_rng_state is replaced with `_set_cuda_rng_state`
2) the states in the model parallel tracker are also properly
tracked/set/reset.
"""
def forward(ctx, run_function, *args):
ctx.run_function = run_function
global mp_rank, mp_size, mp_group
if mp_rank is None:
mp_rank = get_model_parallel_rank()
mp_size = get_model_parallel_world_size()
mp_group = get_model_parallel_group()
global cuda_device, transport_stream, PARTITION_ACTIVATIONS
if cuda_device is None:
if dist.get_rank() == 0:
print(f"Partition Activations {PARTITION_ACTIVATIONS} and Correctness Check {PA_CORRECTNESS_TEST}")
cuda_device = torch.cuda.current_device()
#The transport stream is used to overlap the allgather communication for the activations
#with the computation in the backward pass
transport_stream = torch.cuda.Stream(device=cuda_device)
if PARTITION_ACTIVATIONS:
#inputs = [item.detach().contiguous().view(-1).narrow(0, get_partition_start(item), get_partition_size(item)).clone() for item in args[:-1]]
#inputs.append(args[-1])
print_rank_0("args: ", args)
inputs = [item.detach().contiguous().view(-1).narrow(0, get_partition_start(item), get_partition_size(item)).clone() for item in args[:2]]
inputs.extend(args[2:])
#just in case something funky is happening such as reuse of inputs
inputs_cuda = [item.to(cuda_device) for item in args]
# Copy the rng states.
ctx.fwd_cpu_rng_state = torch.get_rng_state()
ctx.fwd_cuda_rng_state = torch.cuda.get_rng_state()
ctx.fwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
#ctx.save_for_backward(*args)
with torch.no_grad():
outputs = run_function(*inputs_cuda)
del inputs_cuda
if PARTITION_ACTIVATIONS:
new_args = []
for arg, inp in zip(args,inputs):
size= torch.tensor(arg.size())
arg.data = inp.data
new_args.append(arg)
new_args.append(size)
ctx.save_for_backward(*new_args)
else:
ctx.save_for_backward(*args)
return outputs
def backward(ctx, *args):
if not torch.autograd._is_checkpoint_valid():
raise RuntimeError("Checkpointing is not compatible with .grad(), "
"please use .backward() if possible")
global cuda_device, transport_stream, PARTITION_ACTIVATIONS
if PARTITION_ACTIVATIONS:
with torch.cuda.stream(transport_stream):
inputs = get_full_inputs(ctx.saved_tensors)
detached_inputs = detach_variable(inputs)
else:
inputs = ctx.saved_tensors
detached_inputs = detach_variable(inputs)
# Store the current states.
bwd_cpu_rng_state = torch.get_rng_state()
bwd_cuda_rng_state = torch.cuda.get_rng_state()
bwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states()
# Set the states to what it used to be before the forward pass.
torch.set_rng_state(ctx.fwd_cpu_rng_state)
_set_cuda_rng_state(ctx.fwd_cuda_rng_state)
get_cuda_rng_tracker().set_states(ctx.fwd_cuda_rng_state_tracker)
if PARTITION_ACTIVATIONS:
current_stream=torch.cuda.current_stream()
current_stream.wait_stream(transport_stream)
with torch.enable_grad():
outputs = ctx.run_function(*detached_inputs)
# Set the states back to what it was at the start of this function.
torch.set_rng_state(bwd_cpu_rng_state)
_set_cuda_rng_state(bwd_cuda_rng_state)
get_cuda_rng_tracker().set_states(bwd_cuda_rng_state_tracker)
if isinstance(outputs, torch.Tensor):
outputs = (outputs,)
torch.autograd.backward(outputs, args)
return (None,) + tuple(inp.grad for inp in detached_inputs)
The provided code snippet includes necessary dependencies for implementing the `checkpoint` function. Write a Python function `def checkpoint(function, *args)` to solve the following problem:
Checkpoint a model or part of the model. This has been directly copied from torch.utils.checkpoint.
Here is the function:
def checkpoint(function, *args):
"""Checkpoint a model or part of the model.
This has been directly copied from torch.utils.checkpoint."""
return CheckpointFunction.apply(function, *args) | Checkpoint a model or part of the model. This has been directly copied from torch.utils.checkpoint. |
18,377 | import contextlib
import torch.distributed as dist
import torch
from torch import _C
from torch.cuda import _lazy_call, device as device_ctx_manager
from ..utils import print_rank_0
import torch.distributed as dist
PARTITION_ACTIVATIONS = False
from .initialize import get_data_parallel_rank
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_world_size
from .initialize import get_model_parallel_group
def partition_activations_in_checkpoint(partition_activation):
global PARTITION_ACTIVATIONS
PARTITION_ACTIVATIONS=partition_activation
if dist.get_rank() == 0:
print(f"**************Partition Activations {PARTITION_ACTIVATIONS}************") | null |
18,378 | import math
import torch
import torch.nn.functional as F
import torch.nn.init as init
from torch.nn.parameter import Parameter
from apex.normalization.fused_layer_norm import FusedLayerNorm as LayerNorm
from .initialize import get_model_parallel_rank
from .initialize import get_model_parallel_world_size
from .mappings import copy_to_model_parallel_region
from .mappings import gather_from_model_parallel_region
from .mappings import reduce_from_model_parallel_region
from .mappings import scatter_to_model_parallel_region
from .random import get_cuda_rng_tracker
from .utils import divide
from .utils import split_tensor_along_last_dim
from .utils import VocabUtility
from deepspeed.utils.timer import SynchronizedWallClockTimer
from .binarizer import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer, MaskTaylor
def get_model_parallel_world_size():
"""Return world size for the model parallel group."""
return torch.distributed.get_world_size(group=get_model_parallel_group())
def get_model_parallel_rank():
"""Return my rank for the model parallel group."""
return torch.distributed.get_rank(group=get_model_parallel_group())
def divide(numerator, denominator):
"""Ensure that numerator is divisible by the denominator and return
the division value."""
ensure_divisibility(numerator, denominator)
return numerator // denominator
The provided code snippet includes necessary dependencies for implementing the `_initialize_affine_weight` function. Write a Python function `def _initialize_affine_weight(weight, output_size, input_size, per_partition_size, partition_dim, init_method, stride=1, return_master_weight=False)` to solve the following problem:
Initialize affine weight for model parallel. Build the master weight on all processes and scatter the relevant chunk.
Here is the function:
def _initialize_affine_weight(weight, output_size, input_size,
per_partition_size, partition_dim, init_method,
stride=1, return_master_weight=False):
"""Initialize affine weight for model parallel.
Build the master weight on all processes and scatter
the relevant chunk."""
# If we only use 1 process for model parallelism, bypass scatter.
world_size = get_model_parallel_world_size()
if world_size == 1:
init_method(weight)
if return_master_weight:
return weight
return None
# Initialize master weight
master_weight = torch.empty(output_size, input_size,
dtype=weight.dtype,
requires_grad=False)
init_method(master_weight)
# Split and copy
per_partition_per_stride_size = divide(per_partition_size, stride)
weight_list = torch.split(master_weight, per_partition_per_stride_size,
dim=partition_dim)
rank = get_model_parallel_rank()
my_weight_list = weight_list[rank::world_size]
with torch.no_grad():
torch.cat(my_weight_list, dim=partition_dim, out=weight)
if return_master_weight:
return master_weight
return None | Initialize affine weight for model parallel. Build the master weight on all processes and scatter the relevant chunk. |
18,379 | import os
import types
from typing import Iterable
import torch
from typing import Callable, Tuple
from tqdm import tqdm
import numpy as np
import math
from torch.distributions.bernoulli import Bernoulli
from torch.nn.utils import clip_grad_norm_
from torch.optim import Optimizer
from .compat import _report_compat_error
from .file_utils import logging
_report_compat_error()
sofa_backend = os.environ["SOFA_BACKEND"]
def step_adamw(self, closure: Callable = None):
"""
Performs a single optimization step.
Arguments:
closure (:obj:`Callable`, `optional`): A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError("Adam does not support sparse gradients, please consider SparseAdam instead")
# =================== HACK BEGIN =====================
if self.mode is not None:
if self.mode == 'ChildTuning-D':
if p in self.gradient_mask:
grad *= self.gradient_mask[p]
else:
# ChildTuning-F
grad_mask = Bernoulli(grad.new_full(size=grad.size(), fill_value=self.reserve_p))
grad *= grad_mask.sample() / self.reserve_p
# =================== HACK END =======================
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
exp_avg.mul_(beta1).add_(grad, alpha=(1.0 - beta1))
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
denom = exp_avg_sq.sqrt().add_(group["eps"])
step_size = group["lr"]
if group["correct_bias"]: # No bias correction for Bert
bias_correction1 = 1.0 - beta1 ** state["step"]
bias_correction2 = 1.0 - beta2 ** state["step"]
step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(exp_avg, denom, value=-step_size)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
# Add weight decay at the end (fixed version)
if group["weight_decay"] > 0.0:
p.data.add_(p.data, alpha=(-group["lr"] * group["weight_decay"]))
return loss
def step_adam(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'Adam does not support sparse gradients, please consider SparseAdam instead'
)
# =================== HACK BEGIN =====================
if self.mode is not None:
if self.mode == 'ChildTuning-D':
if p in self.gradient_mask:
grad *= self.gradient_mask[p]
else:
# ChildTuning-F
grad_mask = Bernoulli(grad.new_full(size=grad.size(), fill_value=self.reserve_p))
grad *= grad_mask.sample() / self.reserve_p
# =================== HACK END =======================
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(grad, alpha=1.0 - beta1)
next_v.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
# step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
# No bias correction
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
return loss
def _report_compat_error():
if "SOFA_BACKEND" not in os.environ:
os.environ["SOFA_BACKEND"] = "sofa"
sofa_backend = os.environ["SOFA_BACKEND"]
if sofa_backend not in ["huggingface", "easytexminer", "easynlp", "sofa"]:
raise RuntimeError(f"Sofa backend {sofa_backend} not supported.")
The provided code snippet includes necessary dependencies for implementing the `apply_child_tuning` function. Write a Python function `def apply_child_tuning(mode="ChildTuning-F", reserve_p=0.2)` to solve the following problem:
Apply child tuning to all trainer classes. :param mode: The child_tuning type. Support: "ChildTuning-F" or "ChildTuning-D" :param reserve_p: The reserved gradiant ratio. :return: None
Here is the function:
def apply_child_tuning(mode="ChildTuning-F", reserve_p=0.2):
"""
Apply child tuning to all trainer classes.
:param mode: The child_tuning type. Support: "ChildTuning-F" or "ChildTuning-D"
:param reserve_p: The reserved gradiant ratio.
:return: None
"""
if mode == "ChildTuning-D":
raise RuntimeError("Mode ChildTuning-D is task related, please use ChildTuning-F mode.")
if sofa_backend == "huggingface":
from transformers import AdamW
AdamW.mode = mode
AdamW.reserve_p = reserve_p
AdamW.step = step_adamw
elif sofa_backend in ["easytexminer", "easynlp"]:
if sofa_backend == "easytexminer":
from easytexminer.core.optimizers import BertAdam
else:
from easynlp.core.optimizers import BertAdam
BertAdam.mode = mode
BertAdam.reserve_p = reserve_p
BertAdam.step = step_adam
elif sofa_backend == "sofa":
from .backend import AdamW
AdamW.mode = mode
AdamW.reserve_p = reserve_p
AdamW.step = step_adamw
else:
_report_compat_error() | Apply child tuning to all trainer classes. :param mode: The child_tuning type. Support: "ChildTuning-F" or "ChildTuning-D" :param reserve_p: The reserved gradiant ratio. :return: None |
18,380 | import os
import types
from typing import Iterable
import torch
from typing import Callable, Tuple
from tqdm import tqdm
import numpy as np
import math
from torch.distributions.bernoulli import Bernoulli
from torch.nn.utils import clip_grad_norm_
from torch.optim import Optimizer
from .compat import _report_compat_error
from .file_utils import logging
_report_compat_error()
sofa_backend = os.environ["SOFA_BACKEND"]
def calculate_fisher(trainer, reserve_p):
'''
Calculate Fisher Information for different parameters
'''
_report_compat_error()
if sofa_backend == "huggingface":
def report_trainer_param(trainer_, element):
if not hasattr(trainer_, element):
raise RuntimeError(f"No {element} attr found in trainer, please make sure"
"trainer is transformers.Trainer or the version of huggingface is right.")
report_trainer_param(trainer, "_prepare_inputs")
report_trainer_param(trainer, "compute_loss")
report_trainer_param(trainer, "model")
report_trainer_param(trainer, "get_train_dataloader")
prepare_inputs = trainer._prepare_inputs
compute_loss = trainer.compute_loss
model = trainer.model
train_dataloader = trainer.get_train_dataloader()
max_grad_norm = 1.0 # a default value
if hasattr(trainer, "args") and hasattr(trainer.args, "max_grad_norm"):
max_grad_norm = trainer.args.max_grad_norm if trainer.args.max_grad_norm is not None \
and trainer.args.max_grad_norm > 0 else max_grad_norm
elif sofa_backend in ["easytexminer", "easynlp"]:
if sofa_backend == "easytexminer":
from easytexminer.utils import get_args
else:
from easynlp.utils import get_args
def report_trainer_param(trainer_, element):
if not hasattr(trainer_, element):
raise RuntimeError(f"No {element} attr found in trainer, please make sure"
f"trainer is {sofa_backend}.Trainer.")
def prepare_inputs(batch):
args = get_args()
batch = {
key: val.to(args.local_rank) if isinstance(val, torch.Tensor) else val
for key, val in batch.items()
}
return batch
def compute_loss(model_, inputs_):
label_ids = inputs_.pop("label_ids")
forward_outputs = model_(inputs)
return model_.compute_loss(forward_outputs, label_ids)
report_trainer_param(trainer, "_model")
report_trainer_param(trainer, "_train_loader")
model = trainer._model
train_dataloader = trainer._train_loader
args = get_args()
max_grad_norm = 1.0 # a default value
if hasattr(args, "max_grad_norm"):
max_grad_norm = args.max_grad_norm if args.max_grad_norm is not None \
and args.max_grad_norm > 0 else max_grad_norm
else:
return
gradient_mask = dict()
model.train()
for name, params in model.named_parameters():
if 'layer' in name:
gradient_mask[params] = params.new_zeros(params.size())
N = len(train_dataloader)
for inputs in tqdm(train_dataloader):
if sofa_backend == "huggingface":
if "idx" in inputs:
inputs.pop("idx")
inputs = prepare_inputs(inputs)
loss = compute_loss(model, inputs)
elif sofa_backend in ["easytexminer", "easynlp"]:
inputs = prepare_inputs(inputs)
outputs = compute_loss(model, inputs)
loss = outputs["loss"]
else:
return
loss.backward()
for name, params in model.named_parameters():
if 'layer' in name:
torch.nn.utils.clip_grad_norm_(params, max_grad_norm)
gradient_mask[params] += (params.grad ** 2) / N
model.zero_grad()
logger.info('Calculate Fisher Information...')
# Numpy
r = None
for k, v in gradient_mask.items():
v = v.view(-1).cpu().numpy()
if r is None:
r = v
else:
r = np.append(r, v)
polar = np.percentile(r, (1 - reserve_p) * 100)
for k in gradient_mask:
gradient_mask[k] = gradient_mask[k] >= polar
print('Polar => {}'.format(polar))
# TODO: pytorch: torch.kthvalue
return gradient_mask
def step_adamw(self, closure: Callable = None):
"""
Performs a single optimization step.
Arguments:
closure (:obj:`Callable`, `optional`): A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError("Adam does not support sparse gradients, please consider SparseAdam instead")
# =================== HACK BEGIN =====================
if self.mode is not None:
if self.mode == 'ChildTuning-D':
if p in self.gradient_mask:
grad *= self.gradient_mask[p]
else:
# ChildTuning-F
grad_mask = Bernoulli(grad.new_full(size=grad.size(), fill_value=self.reserve_p))
grad *= grad_mask.sample() / self.reserve_p
# =================== HACK END =======================
state = self.state[p]
# State initialization
if len(state) == 0:
state["step"] = 0
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
exp_avg.mul_(beta1).add_(grad, alpha=(1.0 - beta1))
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
denom = exp_avg_sq.sqrt().add_(group["eps"])
step_size = group["lr"]
if group["correct_bias"]: # No bias correction for Bert
bias_correction1 = 1.0 - beta1 ** state["step"]
bias_correction2 = 1.0 - beta2 ** state["step"]
step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
p.data.addcdiv_(exp_avg, denom, value=-step_size)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
# Add weight decay at the end (fixed version)
if group["weight_decay"] > 0.0:
p.data.add_(p.data, alpha=(-group["lr"] * group["weight_decay"]))
return loss
def step_adam(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'Adam does not support sparse gradients, please consider SparseAdam instead'
)
# =================== HACK BEGIN =====================
if self.mode is not None:
if self.mode == 'ChildTuning-D':
if p in self.gradient_mask:
grad *= self.gradient_mask[p]
else:
# ChildTuning-F
grad_mask = Bernoulli(grad.new_full(size=grad.size(), fill_value=self.reserve_p))
grad *= grad_mask.sample() / self.reserve_p
# =================== HACK END =======================
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['next_m'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['next_v'] = torch.zeros_like(p.data)
next_m, next_v = state['next_m'], state['next_v']
beta1, beta2 = group['b1'], group['b2']
# Add grad clipping
if group['max_grad_norm'] > 0:
clip_grad_norm_(p, group['max_grad_norm'])
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
next_m.mul_(beta1).add_(grad, alpha=1.0 - beta1)
next_v.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
update = next_m / (next_v.sqrt() + group['e'])
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if group['weight_decay'] > 0.0:
update += group['weight_decay'] * p.data
lr_scheduled = group['lr']
lr_scheduled *= group['schedule'].get_lr(state['step'])
update_with_lr = lr_scheduled * update
p.data.add_(-update_with_lr)
state['step'] += 1
# step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1
# No bias correction
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
return loss
def _report_compat_error():
if "SOFA_BACKEND" not in os.environ:
os.environ["SOFA_BACKEND"] = "sofa"
sofa_backend = os.environ["SOFA_BACKEND"]
if sofa_backend not in ["huggingface", "easytexminer", "easynlp", "sofa"]:
raise RuntimeError(f"Sofa backend {sofa_backend} not supported.")
The provided code snippet includes necessary dependencies for implementing the `apply_child_tuning_to_trainer` function. Write a Python function `def apply_child_tuning_to_trainer(trainer, mode="ChildTuning-F", reserve_p=0.2)` to solve the following problem:
Apply child tuning to a trainer instance. :param trainer: The trainer instance. :param mode: The child_tuning type. Support: "ChildTuning-F" or "ChildTuning-D" :param reserve_p: The reserved gradiant ratio. :return: None
Here is the function:
def apply_child_tuning_to_trainer(trainer,
mode="ChildTuning-F",
reserve_p=0.2):
"""
Apply child tuning to a trainer instance.
:param trainer: The trainer instance.
:param mode: The child_tuning type. Support: "ChildTuning-F" or "ChildTuning-D"
:param reserve_p: The reserved gradiant ratio.
:return: None
"""
gradient_mask = None
if mode == "ChildTuning-D":
gradient_mask = calculate_fisher(trainer, reserve_p)
if sofa_backend in ["huggingface", "sofa"]:
if sofa_backend == "huggingface":
from transformers import AdamW, TrainerCallback
else:
from .backend import AdamW, TrainerCallback
class OnTrainBeginCallback(TrainerCallback):
def on_train_begin(self, *args, **kwargs):
optimizer = kwargs["optimizer"]
if type(optimizer) != AdamW:
raise RuntimeError(f"Only AdamW is supported, not {type(optimizer)}.")
optimizer.mode = mode
optimizer.reserve_p = reserve_p
optimizer.gradient_mask = gradient_mask
optimizer.step = types.MethodType(step_adamw, optimizer)
trainer.callback_handler.callbacks.append(OnTrainBeginCallback())
elif sofa_backend in ["easytexminer", "easynlp"]:
if sofa_backend == "easytexminer":
from easytexminer.core.optimizers import BertAdam
else:
from easynlp.core.optimizers import BertAdam
if not hasattr(trainer, "_optimizer"):
raise RuntimeError("No optimizer found in trainer, please check the input param "
"or the version of easytexminer.")
optimizer = getattr(trainer, "_optimizer")
if type(optimizer) != BertAdam:
raise RuntimeError(f"Only BertAdam is supported, not {type(optimizer)}.")
optimizer.mode = mode
optimizer.reserve_p = reserve_p
optimizer.gradient_mask = gradient_mask
optimizer.step = types.MethodType(step_adam, optimizer)
else:
_report_compat_error() | Apply child tuning to a trainer instance. :param trainer: The trainer instance. :param mode: The child_tuning type. Support: "ChildTuning-F" or "ChildTuning-D" :param reserve_p: The reserved gradiant ratio. :return: None |
18,381 | import queue
import threading
import tensorflow as tf
import torch
import numpy as np
def convert_tf_example_to_torch_tensors(example):
def _multiproc_iter(dl, output_queue):
data_iter = iter(dl)
for item in data_iter:
tensors = convert_tf_example_to_torch_tensors(item)
output_queue.put(tensors, block=True) | null |
18,382 | from __future__ import (absolute_import, division, print_function, unicode_literals)
import json
import logging
import os
import shutil
import tempfile
from functools import wraps
from hashlib import sha256
import sys
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `filename_to_url` function. Write a Python function `def filename_to_url(filename, cache_dir=None)` to solve the following problem:
Return the url and etag (which may be ``None``) stored for `filename`. Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
Here is the function:
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag | Return the url and etag (which may be ``None``) stored for `filename`. Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist. |
18,383 | from __future__ import (absolute_import, division, print_function, unicode_literals)
import json
import logging
import os
import shutil
import tempfile
from functools import wraps
from hashlib import sha256
import sys
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w', encoding="utf-8") as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
The provided code snippet includes necessary dependencies for implementing the `cached_path` function. Write a Python function `def cached_path(url_or_filename, cache_dir=None)` to solve the following problem:
Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path.
Here is the function:
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename)) | Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path. |
18,384 | from __future__ import (absolute_import, division, print_function, unicode_literals)
import json
import logging
import os
import shutil
import tempfile
from functools import wraps
from hashlib import sha256
import sys
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `s3_request` function. Write a Python function `def s3_request(func)` to solve the following problem:
Wrapper function for s3 requests in order to create more helpful error messages.
Here is the function:
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper | Wrapper function for s3 requests in order to create more helpful error messages. |
18,385 | from __future__ import (absolute_import, division, print_function, unicode_literals)
import json
import logging
import os
import shutil
import tempfile
from functools import wraps
from hashlib import sha256
import sys
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `read_set_from_file` function. Write a Python function `def read_set_from_file(filename)` to solve the following problem:
Extract a de-duped collection (set) of text from a file. Expected file format is one item per line.
Here is the function:
def read_set_from_file(filename):
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection | Extract a de-duped collection (set) of text from a file. Expected file format is one item per line. |
18,386 | from __future__ import (absolute_import, division, print_function, unicode_literals)
import json
import logging
import os
import shutil
import tempfile
from functools import wraps
from hashlib import sha256
import sys
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext | null |
18,387 | import os
import time
from operator import itemgetter
from bisect import bisect_right
import json
import csv
import math
import random
from itertools import accumulate
from torch.utils import data
import pandas as pd
import numpy as np
import nltk
from nltk import tokenize
from .lazy_loader import lazy_array_loader, exists_lazy, make_lazy
from .tokenization import Tokenization
class SplitDataset(data.Dataset):
"""
Dataset wrapper to access a subset of another dataset.
Purpose: useful to index into existing datasets, possibly
large-scale datasets as the subindexing operation is done in an
on-the-fly manner.
Arguments:
ds (Dataset or array-like): List of datasets to be subindexed
split_inds (1D array-like): List of indices part of subset
"""
def __init__(self, ds, split_inds, **kwargs):
self.split_inds = list(split_inds)
self.wrapped_data = ds
self.is_lazy = isinstance(ds, lazy_array_loader) or (hasattr(ds, 'is_lazy') and ds.is_lazy)
if self.is_lazy:
self.lens = itemgetter(*self.split_inds)(list(self.wrapped_data.lens))
self._X = None
self._Y = None
def __len__(self):
return len(self.split_inds)
def __getitem__(self, index):
return self.wrapped_data[self.split_inds[index]]
def SetTokenizer(self, tokenizer):
self.wrapped_data.SetTokenizer(tokenizer)
def GetTokenizer(self):
return self.wrapped_data.GetTokenizer()
def X(self):
if self._X is None:
self._X = itemgetter(*self.split_inds)(self.wrapped_data.X)
return self._X
def Y(self):
if self._Y is None:
self._Y = np.array(itemgetter(*self.split_inds)(self.wrapped_data.Y))
return self._Y
def __iter__(self):
for idx in self.split_inds:
yield self.wrapped_data[idx]
The provided code snippet includes necessary dependencies for implementing the `split_ds` function. Write a Python function `def split_ds(ds, split=[.8,.2,.0], shuffle=True)` to solve the following problem:
Split a dataset into subsets given proportions of how much to allocate per split. If a split is 0% returns None for that split. Purpose: Useful for creating train/val/test splits Arguments: ds (Dataset or array-like): Data to be split. split (1D array-like): proportions to split `ds`. `sum(splits) != 0` shuffle (boolean): Randomly split dataset. Default: True
Here is the function:
def split_ds(ds, split=[.8,.2,.0], shuffle=True):
"""
Split a dataset into subsets given proportions of how
much to allocate per split. If a split is 0% returns None for that split.
Purpose: Useful for creating train/val/test splits
Arguments:
ds (Dataset or array-like): Data to be split.
split (1D array-like): proportions to split `ds`. `sum(splits) != 0`
shuffle (boolean): Randomly split dataset. Default: True
"""
split_sum = sum(split)
if split_sum == 0:
raise Exception('Split cannot sum to 0.')
split = np.array(split)
split /= split_sum
ds_len = len(ds)
inds = np.arange(ds_len)
if shuffle:
np.random.shuffle(inds)
start_idx = 0
residual_idx = 0
rtn_ds = [None]*len(split)
for i, f in enumerate(split):
if f != 0:
proportion = ds_len*split[i]
residual_idx += proportion % 1
split_ = int(int(proportion) + residual_idx)
split_inds = inds[start_idx:start_idx+max(split_, 1)]
rtn_ds[i] = SplitDataset(ds, split_inds)
start_idx += split_
residual_idx %= 1
return rtn_ds | Split a dataset into subsets given proportions of how much to allocate per split. If a split is 0% returns None for that split. Purpose: Useful for creating train/val/test splits Arguments: ds (Dataset or array-like): Data to be split. split (1D array-like): proportions to split `ds`. `sum(splits) != 0` shuffle (boolean): Randomly split dataset. Default: True |
18,388 | from collections import namedtuple
import random
import os
import csv
import torch
import nltk
from nltk import tokenize as nltk_tokenize
import sentencepiece as spm
from .wordpiece import BertTokenizer, PRETRAINED_VOCAB_ARCHIVE_MAP
from .tokenization_gpt2 import GPT2Tokenizer
import regex as re
class Tokenizer(object):
"""
Tokenizer object that handles text tokenization, command tokens, and type tokens.
Command tokens and text tokens are stored together in one mapping of size
`len(text_tokenizer)+len(command_tokens)`. Command tokens are stored as first
`len(command_tokens)` tokens. Token idx is stored at `idx+len(command_tokens)`.
Token types are stored in a separate mapping of size `len(type_tokens)`.
"""
def __init__(self, text_tokenizer, command_tokens=None, type_tokens=None):
# set text tokenizer
self.text_tokenizer = text_tokenizer
if not hasattr(self, 'num_text_tokens'):
self.num_text_tokens = len(self.text_tokenizer)
# set command tokens
if command_tokens is None:
command_tokens = DEFAULT_COMMAND_TOKENS
self._command_tokens = command_tokens
self.command_name_map = {tok.name: tok for tok in self._command_tokens}
self.command_token_map = {tok.token: tok for tok in self._command_tokens}
self.command_id_map = {tok.Id: tok for tok in self._command_tokens}
if not hasattr(self, 'num_command_tokens'):
self.num_command_tokens = len(self._command_tokens)
if not hasattr(self, 'num_tokens'):
self.num_tokens = self.num_command_tokens + self.num_text_tokens
# set type tokens
if type_tokens is None:
type_tokens = DEFAULT_TYPE_TOKENS
self.type_tokens = type_tokens
self.type_name_map = {tok.name: tok for tok in self.type_tokens}
self.type_token_map = {tok.token: tok for tok in self.type_tokens}
self.type_id_map = {tok.Id: tok for tok in self.type_tokens}
if not hasattr(self, 'num_type_tokens'):
self.num_type_tokens = len(self.type_tokens)
# parse tokens and vocabs from tokenizer
self._tokens = list(self.command_token_map.keys()) + list(self.text_tokenizer.tokens)
self._vocab = {t:Id for Id,t in self.command_id_map.items()}
self._vocab.update({t:Id+self.num_command_tokens for t,Id in self.text_tokenizer.vocab.items()})
self._text_tokens = list(self.text_tokenizer.tokens)
self._text_token_vocab = {t:Id+self.num_command_tokens for t,Id in self.text_tokenizer.vocab.items()}
self._command_token_tokens = list(self.command_token_map.keys())
self._command_token_vocab = {t:Id for Id,t in self.command_id_map.items()}
self._token_types = list(self.type_token_map.keys())
self._token_type_vocab = {t:Id for Id, t in self.type_id_map.items()}
def __call__(self, text, process_fn=None):
"""run preprocessing and encode text as Ids"""
return self.EncodeAsIds(text, process_fn=process_fn)
def __len__(self):
"""total number of tokens"""
return self.num_tokens
def get_command(self, name):
"""get command token corresponding to `name`"""
return self.command_name_map[name]
def get_type(self, name):
"""get type token corresponding to `name`"""
return self.type_name_map[name]
def tokens(self):
"""list (or iterable) of all tokens for tokenizer"""
return self._tokens
def vocab(self):
"""dictionary mapping tokens to ids for tokenizer"""
return self._vocab
def token_types(self):
"""list (or iterable) of all token types for tokenizer"""
return self._token_types
def token_type_vocab(self):
"""dictionary mapping token types to ids for tokenizer"""
return self._token_type_vocab
def command_tokens(self):
"""list (or iterable) of all command tokens for tokenizer"""
return self._command_token_tokens
def command_token_vocab(self):
"""dictionary mapping command tokens to ids for tokenizer"""
return self._command_token_vocab
def text_tokens(self):
"""list (or iterable) of text tokens for text tokenizer"""
return self._text_tokens
def text_token_vocab(self):
"""dictionary mapping text tokens to ids for text tokenizer"""
return self._text_token_vocab
def EncodeAsIds(self, text, process_fn=None):
"""
encode text using text tokenizer and shift Id values for command tokens
"""
tokenization = self.text_tokenizer.EncodeAsIds(text, process_fn=process_fn)
tokenization.tokenization = [t+self.num_command_tokens for t in tokenization.tokenization]
tokenization.set_command_tokens(self._command_tokens)
return tokenization
def EncodeAsTokens(self, text, process_fn=None):
"""
encode text as tokens using text tokenizer
"""
tokenization = self.text_tokenizer.EncodeAsTokens(text, process_fn=process_fn)
tokenization.set_command_tokens(self._command_tokens)
return tokenization
def IdToToken(self, Id, type_token=False):
"""convert Id to token accounting for command and type tokens"""
if isinstance(Id, (TypeToken, CommandToken)):
return Id.token
if type_token:
return self.type_id_map[Id].token
if Id < self.num_command_tokens:
return self.command_id_map[Id].token
return self.text_tokenizer.IdToToken(Id-self.num_command_tokens)
def TokenToId(self, token, type_token=False):
"""convert token to Id accounting for command and type tokens"""
if isinstance(token, (TypeToken, CommandToken)):
return token.Id
if type_token:
return self.type_token_map[token].Id
if token in self.command_token_map:
return self.command_token_map[token].Id
return self.text_tokenizer.TokenToId(token)+self.num_command_tokens
def DecodeIds(self, Ids, type_token=False):
"""
convert Ids to tokens accounting for command and type tokens, tokens
are joined and returned as a string.
"""
if type_token:
return ' '.join(Id.token if isinstance(Id, TypeToken) else self.type_id_map[Id].token for Id in Ids)
rtn_strs = []
current_str = []
if isinstance(Ids, Tokenization):
Ids = Ids.tokenization
for Id in Ids:
if isinstance(Id, CommandToken):
rtn_strs.append(self.text_tokenizer.DecodeIds(current_str))
current_str = []
rtn_strs.append(t.token)
elif Id < self.num_command_tokens:
rtn_strs.append(self.text_tokenizer.DecodeIds(current_str))
current_str = []
rtn_strs.append(self.command_id_map[Id].token)
else:
current_str.append(Id - self.num_command_tokens)
if current_str != []:
rtn_strs.append(self.text_tokenizer.DecodeIds(current_str))
return ' '.join(rtn_strs)
def DecodeTokens(self, Tokens, type_token=False):
"""
convert tokens to a string accounting for command and type tokens.
"""
if type_token:
return ' '.join(t.token if isinstance(t, TypeToken) else t for t in Tokens)
rtn_strs = []
current_str = []
if isinstance(Tokens, Tokenization):
Tokens = Tokens.tokenization
for t in Tokens:
if isinstance(t, CommandToken):
rtn_strs.append(self.text_tokenizer.DecodeTokens(current_str))
current_str = []
rtn_strs.append(t.token)
elif t in self.command_token_map:
rtn_strs.append(self.text_tokenizer.DecodeTokens(current_str))
current_str = []
rtn_strs.append(t)
else:
current_str.append(t)
if current_str != []:
rtn_strs.append(self.text_tokenizer.DecodeTokens(current_str))
return ' '.join(rtn_strs)
class BertWordPieceTokenizer(Tokenizer):
"""
Loads a pretrained WordPiece tokenizer from `cache_dir` for tokenization
in BERT training. Default to bert-large-uncased tokenizer.
"""
def __init__(self, tokenizer_model_type=None, cache_dir=None, **kwargs):
# default to bert-large-uncased tokenizer
if tokenizer_model_type not in PRETRAINED_VOCAB_ARCHIVE_MAP:
tokenizer_model_type = 'bert-large-uncased'
if torch.distributed.get_rank() == 0:
print('loading BertWordPieceTokenizer (', tokenizer_model_type, ') from cache_dir ', cache_dir)
do_lower_case = not ('-cased' in tokenizer_model_type or 'chinese' in tokenizer_model_type)
self.text_tokenizer = BertTokenizer.from_pretrained(tokenizer_model_type, do_lower_case=do_lower_case, cache_dir=cache_dir)
if torch.distributed.get_rank() == 0:
print('loaded', tokenizer_model_type)
# disable max len warnings by increasing max len
self.text_tokenizer.max_len = int(1e12)
# set command tokens from wordpiece tokenizer values
self.num_command_tokens = 5
self.num_tokens = len(self.text_tokenizer.vocab)
self.num_text_tokens = self.num_tokens-5
self.num_type_tokens = 2
self._command_tokens = [
CommandToken('pad', '[PAD]', self.text_tokenizer.vocab['[PAD]']),
CommandToken('ENC', '[CLS]', self.text_tokenizer.vocab['[CLS]']),
CommandToken('MASK', '[MASK]', self.text_tokenizer.vocab['[MASK]']),
CommandToken('unk', '[UNK]', self.text_tokenizer.vocab['[UNK]']),
CommandToken('sep', '[SEP]', self.text_tokenizer.vocab['[SEP]']),
]
self.command_name_map = {tok.name: tok for tok in self._command_tokens}
self.command_token_map = {tok.token: tok for tok in self._command_tokens}
self.command_id_map = {tok.Id: tok for tok in self._command_tokens}
# set type tokens
self.type_tokens = [
TypeToken('str0', '<str0>', 0),
TypeToken('str1', '<str1>', 1),
]
self.type_name_map = {tok.name: tok for tok in self.type_tokens}
self.type_token_map = {tok.token: tok for tok in self.type_tokens}
self.type_id_map = {tok.Id: tok for tok in self.type_tokens}
# parse tokens and vocabs from tokenizer
self._tokens = list(self.text_tokenizer.vocab.keys())
self._vocab = {k:v for k,v in self.text_tokenizer.vocab.items()}
self._text_tokens = list(self._tokens)
self._text_token_vocab = {k:v for k,v in self.text_tokenizer.vocab.items()}
self._command_token_tokens = list(self.command_token_map.keys())
self._command_token_vocab = {t:Id for Id,t in self.command_id_map.items()}
self._token_types = list(self.type_token_map.keys())
self._token_type_vocab = {t:Id for Id, t in self.type_id_map.items()}
def EncodeAsIds(self, text, process_fn=None):
"""convert text to wordpiece Ids"""
processed_text = text
if process_fn is not None:
processed_text = process_fn(processed_text)
tokens = self.text_tokenizer.tokenize(processed_text)
Ids = self.text_tokenizer.convert_tokens_to_ids(tokens)
return Tokenization(Ids, processed_text, text)
def EncodeAsTokens(self, text, process_fn=None):
"""convert wordpiece token to Id"""
processed_text = text
if process_fn is not None:
processed_text = process_fn(processed_text)
tokens = self.text_tokenizer.tokenize(processed_text)
return Tokenization(tokens, processed_text, text, asIds=False)
def IdToToken(self, Id, type_token=False):
"""convert Id to sentencpiece token"""
if isinstance(Id, (TypeToken, CommandToken)):
return Id.token
if type_token:
return self.type_id_map[Id].token
return self.text_tokenizer.ids_to_tokens[Id]
def TokenToId(self, token, type_token=False):
"""convert sentencpiece token to Id"""
if isinstance(token, (TypeToken, CommandToken)):
return token.Id
if type_token:
return self.type_token_map[token].Id
return self.text_tokenizer.vocab[token]
def DecodeIds(self, Ids, type_token=False):
"""converts ids to wordpiece tokens and joins them as a text string"""
if type_token:
return ' '.join(Id.token if isinstance(Id, TypeToken) else self.type_id_map[Id].token for Id in Ids)
if isinstance(Ids, Tokenization):
Ids = Ids.tokenization
Tokens = []
for Id in Ids:
Tokens.append(self.text_tokenizer.ids_to_tokens[Id] if Id != -1 else '-1')
Tokens = self.text_tokenizer.convert_ids_to_tokens(Ids)
return ' '.join(Tokens)
def DecodeTokens(self, Tokens, type_token=False):
"""converts wordpiece tokens to a text string"""
if type_token:
return ' '.join(t.token if isinstance(t, TypeToken) else t for t in Tokens)
if isinstance(Tokens, Tokenization):
Tokens = Tokens.tokenization
return ' '.join(Tokens)
class GPT2BPETokenizer(Tokenizer):
def __init__(self, cache_dir=None, **kwargs):
self.text_tokenizer = GPT2Tokenizer.from_pretrained('gpt2',
cache_dir=cache_dir)
#disable max len warnings by increasing max len
self.text_tokenizer.max_len = int(1e12)
self.num_command_tokens = 2
self.num_tokens = len(self.text_tokenizer.encoder)
self.num_text_tokens = self.num_tokens-1
self.num_type_tokens = 2
self._command_tokens = [
CommandToken('pad', '<|endoftext|>', self.text_tokenizer.encoder['<|endoftext|>']),
CommandToken('eos', '<|endoftext|>', self.text_tokenizer.encoder['<|endoftext|>']),
]
self.command_name_map = {tok.name: tok for tok in self._command_tokens}
self.command_token_map = {tok.token: tok for tok in self._command_tokens}
self.command_id_map = {tok.Id: tok for tok in self._command_tokens}
self.type_tokens = [
TypeToken('str0', '<str0>', 0),
TypeToken('str1', '<str1>', 1),
]
self.type_name_map = {tok.name: tok for tok in self.type_tokens}
self.type_token_map = {tok.token: tok for tok in self.type_tokens}
self.type_id_map = {tok.Id: tok for tok in self.type_tokens}
self._tokens = list(self.text_tokenizer.encoder.keys())
self._vocab = {k:v for k,v in self.text_tokenizer.encoder.items()}
self._text_tokens = list(self._tokens)
self._text_token_vocab = {k:v for k,v in self.text_tokenizer.encoder.items()}
self._command_token_tokens = list(self.command_token_map.keys())
self._command_token_vocab = {t:Id for Id,t in self.command_id_map.items()}
self._token_types = list(self.type_token_map.keys())
self._token_type_vocab = {t:Id for Id, t in self.type_id_map.items()}
def EncodeAsIds(self, text, process_fn=None):
processed_text = text
if process_fn is not None:
processed_text = process_fn(processed_text)
Ids = self.text_tokenizer.encode(processed_text)
#return Tokenization(Ids, processed_text, text)
tokenization = Tokenization(Ids, processed_text, text)
tokenization.set_command_tokens(self._command_tokens)
return tokenization
def EncodeAsTokens(self, text, process_fn=None):
processed_text = text
if process_fn is not None:
processed_text = process_fn(processed_text)
tokens = []
for token in re.findall(self.text_tokenizer.pat, processed_text):
token = ''.join(self.text_tokenizer.bye_encoder[b] for b in token.encode('utf-8'))
tokens.extend(bpe_token for bpe_token in self.text_tokenizer.bpe(token).split(' '))
tokenization=Tokenization(tokens, processed_text, text, asIds=False)
tokenization.set_command_tokens(self._command_tokens)
return tokenization
#return Tokenization(tokens, processed_text, text, asIds=False)
def IdToToken(self, Id, type_token=False):
if isinstance(Id, (TypeToken, CommandToken)):
return Id.token
if type_token:
return self.type_id_map[Id].token
return self.text_tokenizer.decoder[Id]
def TokenToId(self, token, type_token=False):
if isinstance(token, (TypeToken, CommandToken)):
return token.Id
if type_token:
return self.type_token_map[token].Id
return self.text_tokenizer.encoder[token]
def DecodeIds(self, Ids, type_token=False):
if type_token:
return ' '.join(Id.token if isinstance(Id, TypeToken) else self.type_id_map[Id].token for Id in Ids)
if isinstance(Ids, Tokenization):
Ids = Ids.tokenization
return self.text_tokenizer.decode(Ids)
def DecodeTokens(self, Tokens, type_token=False):
if type_token:
return ' '.join(t.token if isinstance(t, TypeToken) else t for t in Tokens)
if isinstance(Tokens, Tokenization):
Tokens = Tokens.tokenization
return self.text_tokenizer.decode([self.TokenToId(tok) for tok in Tokens])
The provided code snippet includes necessary dependencies for implementing the `make_tokenizer` function. Write a Python function `def make_tokenizer(tokenizer_type, corpus, model_path=None, vocab_size=None, model_type='bpe', pad_token=0, character_coverage=1.0, command_tokens=None, type_tokens=None, **kwargs)` to solve the following problem:
Helper function to instantiate a tokenizer given common combinations of options.
Here is the function:
def make_tokenizer(tokenizer_type, corpus, model_path=None, vocab_size=None, model_type='bpe', pad_token=0, character_coverage=1.0, command_tokens=None, type_tokens=None, **kwargs):
"""
Helper function to instantiate a tokenizer given common combinations of options.
"""
tokenizer_class = tokenizer_type
if isinstance(tokenizer_class, str):
tokenizer_class = eval(tokenizer_class)
if tokenizer_class is BertWordPieceTokenizer:
return BertWordPieceTokenizer(model_type, **kwargs)
elif tokenizer_class is GPT2BPETokenizer:
return GPT2BPETokenizer(**kwargs)
text_tokenizer = tokenizer_class(corpus=corpus, vocab_size=vocab_size, model_path=model_path, model_type=model_type,
pad_token=pad_token, character_coverage=character_coverage)
return Tokenizer(text_tokenizer, command_tokens, type_tokens) | Helper function to instantiate a tokenizer given common combinations of options. |
18,389 | from collections import namedtuple
import random
import os
import csv
import torch
import nltk
from nltk import tokenize as nltk_tokenize
import sentencepiece as spm
from .wordpiece import BertTokenizer, PRETRAINED_VOCAB_ARCHIVE_MAP
from .tokenization_gpt2 import GPT2Tokenizer
import regex as re
class CommandToken(object):
def __init__(self, name, token, Id):
self.name = name
self.token = token
self.Id = Id
def __str__(self):
return str(COMMAND_TUPLE(self.name, self.token, self.Id))
def prep_command_tokens(tokenlist, token_format=token_format):
return [CommandToken(tok[0], token_format.format(tok[0]), tok[1]) for tok in tokenlist] | null |
18,390 | from collections import namedtuple
import random
import os
import csv
import torch
import nltk
from nltk import tokenize as nltk_tokenize
import sentencepiece as spm
from .wordpiece import BertTokenizer, PRETRAINED_VOCAB_ARCHIVE_MAP
from .tokenization_gpt2 import GPT2Tokenizer
import regex as re
class TypeToken(object):
def __init__(self, name, token, Id):
def __str__(self):
def prep_type_tokens(tokenlist, token_format=token_format):
return [TypeToken(tok[0], token_format.format(tok[0]), tok[1]) for tok in tokenlist] | null |
18,391 | from collections import namedtuple
import random
import os
import csv
import torch
import nltk
from nltk import tokenize as nltk_tokenize
import sentencepiece as spm
from .wordpiece import BertTokenizer, PRETRAINED_VOCAB_ARCHIVE_MAP
from .tokenization_gpt2 import GPT2Tokenizer
import regex as re
MAX_SENTENCEPIECE_SENTENCES = 100000000
The provided code snippet includes necessary dependencies for implementing the `get_corpus_freq` function. Write a Python function `def get_corpus_freq(dataset, filepath, filetype='tsv')` to solve the following problem:
Take corpus, split it into sentences, and extract word frequencies. Write frequencies to `filepath` as a tsv. Only write the first MAX_SENTENCEPIECE_SENTENCES most common words to the file.
Here is the function:
def get_corpus_freq(dataset, filepath, filetype='tsv'):
"""
Take corpus, split it into sentences, and extract word frequencies.
Write frequencies to `filepath` as a tsv. Only write the first
MAX_SENTENCEPIECE_SENTENCES most common words to the file.
"""
nltk.download('punkt', download_dir="./nltk")
if filetype == 'tsv':
delimiter = '\t'
else:
delimiter = ','
print("compute corpus frequency\n", flush=True)
total_sentence_count = 0
maxlen = 0
freqs = {}
for entry in dataset:
if isinstance(entry, dict):
entry = entry['text']
lines = entry.strip().split('\n')
for line in lines:
sentences = nltk_tokenize.sent_tokenize(line)
total_sentence_count += len(sentences)
for sentence in sentences:
maxlen = max(len(line), maxlen)
for word in sentence.split():
if word not in freqs:
freqs[word] = 0
freqs[word] += 1
print("length of freqs before truncating " + str(len(freqs)), flush=True)
print("file path for freq " + str(filepath), flush=True)
freqs_sorted = {}
counter=0
for word, count in sorted(freqs.items(), key=lambda x: x[1], reverse=True):
if counter >= MAX_SENTENCEPIECE_SENTENCES:
break
counter+=1
freqs_sorted[word] = count
print("length of freqs after trancating " + str(len(freqs_sorted)), flush=True)
with open(filepath, 'w') as f:
writer = csv.writer(f, delimiter=delimiter)
for k, v in freqs_sorted.items():
writer.writerow([str(k), str(v)])
return total_sentence_count, maxlen | Take corpus, split it into sentences, and extract word frequencies. Write frequencies to `filepath` as a tsv. Only write the first MAX_SENTENCEPIECE_SENTENCES most common words to the file. |
18,392 | import os
import mmap
import pickle as pkl
import time
from itertools import accumulate
import torch
from torch.multiprocessing import Lock
def get_lazy_path(path):
"""
Gets directory path where lazy files are stored.
"""
return os.path.splitext(path)[0]+'.lazy'
The provided code snippet includes necessary dependencies for implementing the `exists_lazy` function. Write a Python function `def exists_lazy(path, data_type='data')` to solve the following problem:
Check if we've already made a lazy version of this file for the `data_type` field.
Here is the function:
def exists_lazy(path, data_type='data'):
"""
Check if we've already made a lazy version of this file for the `data_type` field.
"""
if not os.path.exists(get_lazy_path(path)):
return False
contents = os.listdir(get_lazy_path(path))
if data_type not in contents:
return False
if data_type+'.len.pkl' not in contents:
return False
return True | Check if we've already made a lazy version of this file for the `data_type` field. |
18,393 | import os
import mmap
import pickle as pkl
import time
from itertools import accumulate
import torch
from torch.multiprocessing import Lock
def get_lazy_path(path):
"""
Gets directory path where lazy files are stored.
"""
return os.path.splitext(path)[0]+'.lazy'
The provided code snippet includes necessary dependencies for implementing the `make_lazy` function. Write a Python function `def make_lazy(path, strs, data_type='data')` to solve the following problem:
Make lazy version of `data_type` field of the file. Byte offsets corresponding to data indices are stored in a `.len.pkl` data file.
Here is the function:
def make_lazy(path, strs, data_type='data'):
"""
Make lazy version of `data_type` field of the file. Byte offsets
corresponding to data indices are stored in a `.len.pkl` data file.
"""
lazypath = get_lazy_path(path)
if not os.path.exists(lazypath):
os.makedirs(lazypath)
datapath = os.path.join(lazypath, data_type)
lenpath = os.path.join(lazypath, data_type+'.len.pkl')
if not torch.distributed.is_initialized() or torch.distributed.get_rank() == 0:
with open(datapath, 'wb') as f:
str_lens = []
str_cnt = 0
for s in strs:
if isinstance(s, dict):
s = s['text']
encoded = s.encode('utf-8')
f.write(encoded)
str_cnt = len(encoded)
str_lens.append(str_cnt)
pkl.dump(str_lens, open(lenpath, 'wb'))
else:
while not os.path.exists(lenpath):
time.sleep(1) | Make lazy version of `data_type` field of the file. Byte offsets corresponding to data indices are stored in a `.len.pkl` data file. |
18,394 | import os
import mmap
import pickle as pkl
import time
from itertools import accumulate
import torch
from torch.multiprocessing import Lock
The provided code snippet includes necessary dependencies for implementing the `split_strings` function. Write a Python function `def split_strings(strings, start, chr_lens)` to solve the following problem:
Split strings based on string lengths and given start.
Here is the function:
def split_strings(strings, start, chr_lens):
"""
Split strings based on string lengths and given start.
"""
return [strings[i-start:j-start] for i, j in zip([start]+chr_lens[:-1], chr_lens)] | Split strings based on string lengths and given start. |
18,395 | from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
from .file_utils import cached_path
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
The provided code snippet includes necessary dependencies for implementing the `load_vocab` function. Write a Python function `def load_vocab(vocab_file)` to solve the following problem:
Loads a vocabulary file into a dictionary.
Here is the function:
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab | Loads a vocabulary file into a dictionary. |
18,396 | from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
from .file_utils import cached_path
The provided code snippet includes necessary dependencies for implementing the `whitespace_tokenize` function. Write a Python function `def whitespace_tokenize(text)` to solve the following problem:
Runs basic whitespace cleaning and splitting on a piece of text.
Here is the function:
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens | Runs basic whitespace cleaning and splitting on a piece of text. |
18,397 | from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
from .file_utils import cached_path
The provided code snippet includes necessary dependencies for implementing the `_is_whitespace` function. Write a Python function `def _is_whitespace(char)` to solve the following problem:
Checks whether `chars` is a whitespace character.
Here is the function:
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False | Checks whether `chars` is a whitespace character. |
18,398 | from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
from .file_utils import cached_path
The provided code snippet includes necessary dependencies for implementing the `_is_control` function. Write a Python function `def _is_control(char)` to solve the following problem:
Checks whether `chars` is a control character.
Here is the function:
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False | Checks whether `chars` is a control character. |
18,399 | from __future__ import absolute_import, division, print_function, unicode_literals
import collections
import logging
import os
import unicodedata
from io import open
from .file_utils import cached_path
The provided code snippet includes necessary dependencies for implementing the `_is_punctuation` function. Write a Python function `def _is_punctuation(char)` to solve the following problem:
Checks whether `chars` is a punctuation character.
Here is the function:
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False | Checks whether `chars` is a punctuation character. |
18,400 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
import json
import logging
import os
import regex as re
from io import open
from .file_utils import cached_path
def lru_cache():
return lambda func: func | null |
18,401 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
import json
import logging
import os
import regex as re
from io import open
from .file_utils import cached_path
The provided code snippet includes necessary dependencies for implementing the `bytes_to_unicode` function. Write a Python function `def bytes_to_unicode()` to solve the following problem:
Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on.
Here is the function:
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
_chr = unichr if sys.version_info[0] == 2 else chr
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [_chr(n) for n in cs]
return dict(zip(bs, cs)) | Returns list of utf-8 byte and a corresponding list of unicode strings. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a signficant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. And avoids mapping to whitespace/control characters the bpe code barfs on. |
18,402 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
import json
import logging
import os
import regex as re
from io import open
from .file_utils import cached_path
The provided code snippet includes necessary dependencies for implementing the `get_pairs` function. Write a Python function `def get_pairs(word)` to solve the following problem:
Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings).
Here is the function:
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs | Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). |
18,403 | import torch
from torch import nn
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from .loss_scaler import DynamicLossScaler, LossScaler
from .fp16util import model_grads_to_master_grads, master_params_to_model_params, clip_grad_norm
FLOAT_TYPES = (torch.FloatTensor, torch.cuda.FloatTensor)
def conversion_helper(val, conversion):
"""Apply conversion to val. Recursively apply conversion if `val` is a nested tuple/list structure."""
if not isinstance(val, (tuple, list)):
return conversion(val)
rtn = [conversion_helper(v, conversion) for v in val]
if isinstance(val, tuple):
rtn = tuple(rtn)
return rtn
The provided code snippet includes necessary dependencies for implementing the `fp32_to_fp16` function. Write a Python function `def fp32_to_fp16(val)` to solve the following problem:
Convert fp32 `val` to fp16
Here is the function:
def fp32_to_fp16(val):
"""Convert fp32 `val` to fp16"""
def half_conversion(val):
val_typecheck = val
if isinstance(val_typecheck, (Parameter, Variable)):
val_typecheck = val.data
if isinstance(val_typecheck, FLOAT_TYPES):
val = val.half()
return val
return conversion_helper(val, half_conversion) | Convert fp32 `val` to fp16 |
18,404 | import torch
from torch import nn
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from .loss_scaler import DynamicLossScaler, LossScaler
from .fp16util import model_grads_to_master_grads, master_params_to_model_params, clip_grad_norm
HALF_TYPES = (torch.HalfTensor, torch.cuda.HalfTensor)
def conversion_helper(val, conversion):
"""Apply conversion to val. Recursively apply conversion if `val` is a nested tuple/list structure."""
if not isinstance(val, (tuple, list)):
return conversion(val)
rtn = [conversion_helper(v, conversion) for v in val]
if isinstance(val, tuple):
rtn = tuple(rtn)
return rtn
The provided code snippet includes necessary dependencies for implementing the `fp16_to_fp32` function. Write a Python function `def fp16_to_fp32(val)` to solve the following problem:
Convert fp16 `val` to fp32
Here is the function:
def fp16_to_fp32(val):
"""Convert fp16 `val` to fp32"""
def float_conversion(val):
val_typecheck = val
if isinstance(val_typecheck, (Parameter, Variable)):
val_typecheck = val.data
if isinstance(val_typecheck, HALF_TYPES):
val = val.float()
return val
return conversion_helper(val, float_conversion) | Convert fp16 `val` to fp32 |
18,405 | import torch
import torch.nn as nn
from torch.autograd import Variable
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from sofa.utils import mpu
class tofp16(nn.Module):
"""
Utility module that implements::
def forward(self, input):
return input.half()
"""
def __init__(self):
super(tofp16, self).__init__()
def forward(self, input):
return input.half()
def BN_convert_float(module):
"""
Utility function for network_to_half().
Retained for legacy purposes.
"""
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and module.affine is True:
module.float()
for child in module.children():
BN_convert_float(child)
return module
The provided code snippet includes necessary dependencies for implementing the `network_to_half` function. Write a Python function `def network_to_half(network)` to solve the following problem:
Convert model to half precision in a batchnorm-safe way. Retained for legacy purposes. It is recommended to use FP16Model.
Here is the function:
def network_to_half(network):
"""
Convert model to half precision in a batchnorm-safe way.
Retained for legacy purposes. It is recommended to use FP16Model.
"""
return nn.Sequential(tofp16(), BN_convert_float(network.half())) | Convert model to half precision in a batchnorm-safe way. Retained for legacy purposes. It is recommended to use FP16Model. |
18,406 | import torch
import torch.nn as nn
from torch.autograd import Variable
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from sofa.utils import mpu
def convert_module(module, dtype):
"""
Converts a module's immediate parameters and buffers to dtype.
"""
for param in module.parameters(recurse=False):
if param is not None:
if param.data.dtype.is_floating_point:
param.data = param.data.to(dtype=dtype)
if param._grad is not None and param._grad.data.dtype.is_floating_point:
param._grad.data = param._grad.data.to(dtype=dtype)
for buf in module.buffers(recurse=False):
if buf is not None and buf.data.dtype.is_floating_point:
buf.data = buf.data.to(dtype=dtype)
The provided code snippet includes necessary dependencies for implementing the `convert_network` function. Write a Python function `def convert_network(network, dtype)` to solve the following problem:
Converts a network's parameters and buffers to dtype.
Here is the function:
def convert_network(network, dtype):
"""
Converts a network's parameters and buffers to dtype.
"""
for module in network.modules():
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and module.affine is True:
continue
convert_module(module, dtype)
return network | Converts a network's parameters and buffers to dtype. |
18,407 | import torch
import torch.nn as nn
from torch.autograd import Variable
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from sofa.utils import mpu
def backwards_debug_hook(grad):
raise RuntimeError("master_params recieved a gradient in the backward pass!") | null |
18,408 | import torch
import torch.nn as nn
from torch.autograd import Variable
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from sofa.utils import mpu
The provided code snippet includes necessary dependencies for implementing the `prep_param_lists` function. Write a Python function `def prep_param_lists(model, flat_master=False)` to solve the following problem:
Creates a list of FP32 master parameters for a given model, as in `Training Neural Networks with Mixed Precision: Real Examples`_. Args: model (torch.nn.Module): Existing Pytorch model flat_master (bool, optional, default=False): Flatten the master parameters into a single tensor, as a performance optimization. Returns: A tuple (``model_params``, ``master_params``). ``model_params`` is a list of the model's parameters for later use with :func:`model_grads_to_master_grads` and :func:`master_params_to_model_params`. ``master_params`` is a list of FP32 master gradients. If ``flat_master=True``, ``master_params`` will be a list with one element. Example:: model_params, master_params = prep_param_lists(model) .. warning:: Currently, if ``flat_master=True``, all the model's parameters must be the same type. If the model has parameters of different types, use ``flat_master=False``, or use :class:`FP16_Optimizer`. .. _`Training Neural Networks with Mixed Precision: Real Examples`: http://on-demand.gputechconf.com/gtc/2018/video/S81012/
Here is the function:
def prep_param_lists(model, flat_master=False):
"""
Creates a list of FP32 master parameters for a given model, as in
`Training Neural Networks with Mixed Precision: Real Examples`_.
Args:
model (torch.nn.Module): Existing Pytorch model
flat_master (bool, optional, default=False): Flatten the master parameters into a single tensor, as a performance optimization.
Returns:
A tuple (``model_params``, ``master_params``). ``model_params`` is a list of the model's parameters for later use with :func:`model_grads_to_master_grads` and :func:`master_params_to_model_params`. ``master_params`` is a list of FP32 master gradients. If ``flat_master=True``, ``master_params`` will be a list with one element.
Example::
model_params, master_params = prep_param_lists(model)
.. warning::
Currently, if ``flat_master=True``, all the model's parameters must be the same type. If the model has parameters of different types, use ``flat_master=False``, or use :class:`FP16_Optimizer`.
.. _`Training Neural Networks with Mixed Precision: Real Examples`:
http://on-demand.gputechconf.com/gtc/2018/video/S81012/
"""
model_params = [param for param in model.parameters() if param.requires_grad]
if flat_master:
# Give the user some more useful error messages
try:
# flatten_dense_tensors returns a contiguous flat array.
# http://pytorch.org/docs/master/_modules/torch/_utils.html
master_params = _flatten_dense_tensors([param.data for param in model_params]).float()
except:
print("Error in prep_param_lists: model may contain a mixture of parameters "
"of different types. Use flat_master=False, or use F16_Optimizer.")
raise
master_params = torch.nn.Parameter(master_params)
master_params.requires_grad = True
# master_params.register_hook(backwards_debug_hook)
if master_params.grad is None:
master_params.grad = master_params.new(*master_params.size())
return model_params, [master_params]
else:
master_params = [param.clone().float().detach() for param in model_params]
for param in master_params:
param.requires_grad = True
return model_params, master_params | Creates a list of FP32 master parameters for a given model, as in `Training Neural Networks with Mixed Precision: Real Examples`_. Args: model (torch.nn.Module): Existing Pytorch model flat_master (bool, optional, default=False): Flatten the master parameters into a single tensor, as a performance optimization. Returns: A tuple (``model_params``, ``master_params``). ``model_params`` is a list of the model's parameters for later use with :func:`model_grads_to_master_grads` and :func:`master_params_to_model_params`. ``master_params`` is a list of FP32 master gradients. If ``flat_master=True``, ``master_params`` will be a list with one element. Example:: model_params, master_params = prep_param_lists(model) .. warning:: Currently, if ``flat_master=True``, all the model's parameters must be the same type. If the model has parameters of different types, use ``flat_master=False``, or use :class:`FP16_Optimizer`. .. _`Training Neural Networks with Mixed Precision: Real Examples`: http://on-demand.gputechconf.com/gtc/2018/video/S81012/ |
18,409 | import torch
import torch.nn as nn
from torch.autograd import Variable
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from sofa.utils import mpu
The provided code snippet includes necessary dependencies for implementing the `model_grads_to_master_grads` function. Write a Python function `def model_grads_to_master_grads(model_params, master_params, flat_master=False)` to solve the following problem:
Copy model gradients to master gradients. Args: model_params: List of model parameters created by :func:`prep_param_lists`. master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`model_grads_to_master_grads`.
Here is the function:
def model_grads_to_master_grads(model_params, master_params, flat_master=False):
"""
Copy model gradients to master gradients.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`model_grads_to_master_grads`.
"""
if flat_master:
# The flattening may incur one more deep copy than is necessary.
master_params[0].grad.data.copy_(
_flatten_dense_tensors([p.grad.data for p in model_params]))
else:
for model, master in zip(model_params, master_params):
if model.grad is not None:
if master.grad is None:
master.grad = Variable(master.data.new(*master.data.size()))
master.grad.data.copy_(model.grad.data)
else:
master.grad = None | Copy model gradients to master gradients. Args: model_params: List of model parameters created by :func:`prep_param_lists`. master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`model_grads_to_master_grads`. |
18,410 | import torch
import torch.nn as nn
from torch.autograd import Variable
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from sofa.utils import mpu
The provided code snippet includes necessary dependencies for implementing the `master_params_to_model_params` function. Write a Python function `def master_params_to_model_params(model_params, master_params, flat_master=False)` to solve the following problem:
Copy master parameters to model parameters. Args: model_params: List of model parameters created by :func:`prep_param_lists`. master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`master_params_to_model_params`.
Here is the function:
def master_params_to_model_params(model_params, master_params, flat_master=False):
"""
Copy master parameters to model parameters.
Args:
model_params: List of model parameters created by :func:`prep_param_lists`.
master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`master_params_to_model_params`.
"""
if flat_master:
for model, master in zip(model_params,
_unflatten_dense_tensors(master_params[0].data, model_params)):
model.data.copy_(master)
else:
for model, master in zip(model_params, master_params):
model.data.copy_(master.data) | Copy master parameters to model parameters. Args: model_params: List of model parameters created by :func:`prep_param_lists`. master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`master_params_to_model_params`. |
18,411 | import torch
import torch.nn as nn
from torch.autograd import Variable
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from sofa.utils import mpu
def to_python_float(t):
if hasattr(t, 'item'):
return t.item()
else:
return t[0] | null |
18,412 | import torch
from sofa.utils import mpu
def to_python_float(t):
if hasattr(t, 'item'):
return t.item()
else:
return t[0] | null |
18,413 | import importlib
import os
from packaging import version
def _report_compat_error():
if "SOFA_BACKEND" not in os.environ:
os.environ["SOFA_BACKEND"] = "sofa"
sofa_backend = os.environ["SOFA_BACKEND"]
if sofa_backend not in ["huggingface", "easytexminer", "easynlp", "sofa"]:
raise RuntimeError(f"Sofa backend {sofa_backend} not supported.")
The provided code snippet includes necessary dependencies for implementing the `inject_pipeline` function. Write a Python function `def inject_pipeline(name, pipeline_clz, automodel_clz)` to solve the following problem:
Inject custom pipeline into transformers pipeline :param name: The pipeline name :param pipeline_clz: The pipeline clz, should be the sub class of InferenceBase :param automodel_clz: The AutoModel class to get the proper model from. :return: None
Here is the function:
def inject_pipeline(name, pipeline_clz, automodel_clz):
"""
Inject custom pipeline into transformers pipeline
:param name: The pipeline name
:param pipeline_clz: The pipeline clz, should be the sub class of InferenceBase
:param automodel_clz: The AutoModel class to get the proper model from.
:return: None
"""
_report_compat_error()
sofa_backend = os.environ["SOFA_BACKEND"]
if sofa_backend == "huggingface":
import transformers.pipelines as pipelines
pipelines.SUPPORTED_TASKS[name] = {
"impl": pipeline_clz,
"tf": (),
"pt": (automodel_clz,),
"default": {},
} | Inject custom pipeline into transformers pipeline :param name: The pipeline name :param pipeline_clz: The pipeline clz, should be the sub class of InferenceBase :param automodel_clz: The AutoModel class to get the proper model from. :return: None |
18,414 | import importlib
import os
from packaging import version
def _report_compat_error():
if "SOFA_BACKEND" not in os.environ:
os.environ["SOFA_BACKEND"] = "sofa"
sofa_backend = os.environ["SOFA_BACKEND"]
if sofa_backend not in ["huggingface", "easytexminer", "easynlp", "sofa"]:
raise RuntimeError(f"Sofa backend {sofa_backend} not supported.")
def _huggingface(name, full_name, config, tokenizer, tokenizer_fast, slow_to_fast_converter="BertTokenizer", **kwargs):
"""
Register a model to hf.
:param name: The model name.
:param full_name: The full name of the model
:param config: The config class of the model.
:param tokenizer: The tokenizer class of the model.
:param tokenizer_fast: The tokenizer fast class of the model.
:param slow_to_fast_converter: The slow_to_fast_converter.
:param kwargs: The specific task class of the model.
Supported:
backbone: The backbone model class
sequence_classification: The sequence classfication model class
token_classification: The token classification model class
question_answering: The question-answering model class
multiple_choice: The multiple choice(e.g. SWAG task) model class
pre_train: The pretrain model class
mlm: The Masked language model class
nsp: The nsp model class
module: The module package
:return: None
"""
import transformers
from transformers.models.auto import configuration_auto
from transformers.models.auto import auto_factory
from transformers.models.auto import modeling_auto
from transformers.models.auto import tokenization_auto
from transformers import \
AutoModelForSequenceClassification, \
AutoModelForTokenClassification, \
AutoModelForPreTraining, \
AutoModelForQuestionAnswering, \
AutoModelForMaskedLM, \
AutoModelForSeq2SeqLM, \
AutoModelForMultipleChoice, \
AutoModelForNextSentencePrediction, \
AutoModel
task_type_mapping = {
"sequence_classification": AutoModelForSequenceClassification,
"token_classification": AutoModelForTokenClassification,
"question_answering": AutoModelForQuestionAnswering,
"multiple_choice": AutoModelForMultipleChoice,
"pre_train": AutoModelForPreTraining,
"mlm": AutoModelForMaskedLM,
"s2slm": AutoModelForSeq2SeqLM,
"nsp": AutoModelForNextSentencePrediction,
"backbone": AutoModel,
}
if version.parse(transformers.__version__) < version.parse(supported_min_version):
print(f"Warning: Your transformers version is {transformers.__version__}, lower than we asked, "
f"the initialization of the framework may possibly be failed, please upgrade your version to "
f"at least {supported_min_version} or contact the maintainer of this framework.")
elif version.parse(transformers.__version__) > version.parse(supported_max_version):
print(f"Warning: Your transformers version is {transformers.__version__}, greater than we tested yet, "
f"if anything goes wrong, please contact the maintainer of this framework.")
config_name = config.__name__.split(".")[-1]
tokenizer_name = tokenizer.__name__.split(".")[-1]
tokenizer_fast_name = tokenizer_fast.__name__.split(".")[-1]
configuration_auto.CONFIG_MAPPING_NAMES[name] = config_name
configuration_auto.CONFIG_MAPPING_NAMES.move_to_end(name, last=False)
configuration_auto.MODEL_NAMES_MAPPING[name] = full_name
configuration_auto.MODEL_NAMES_MAPPING.move_to_end(name, last=False)
configuration_auto.CONFIG_MAPPING._mapping[name] = config_name
configuration_auto.CONFIG_MAPPING._mapping.move_to_end(name, last=False)
configuration_auto.CONFIG_MAPPING._modules[name] = kwargs["module"]
if auto_factory._LazyAutoMapping._load_attr_from_module != _load_attr_from_module_with_extra_modules:
auto_factory._LazyAutoMapping._load_attr_from_module_local = \
auto_factory._LazyAutoMapping._load_attr_from_module
auto_factory._LazyAutoMapping._load_attr_from_module = _load_attr_from_module_with_extra_modules
tokenization_auto.TOKENIZER_MAPPING_NAMES[name] = (tokenizer_name, tokenizer_fast_name)
tokenization_auto.TOKENIZER_MAPPING_NAMES.move_to_end(name, last=False)
from transformers.models.auto.auto_factory import _LazyAutoMapping
def _register(maper: _LazyAutoMapping, config_mapping, model_mapping):
maper._config_mapping = config_mapping
maper._reverse_config_mapping = {v: k for k, v in config_mapping.items()}
maper._model_mapping = model_mapping
if version.parse(transformers.__version__) < version.parse("4.12.0"):
_register(tokenization_auto.TOKENIZER_MAPPING, configuration_auto.CONFIG_MAPPING_NAMES,
tokenization_auto.TOKENIZER_MAPPING_NAMES)
else:
tokenization_auto.TOKENIZER_MAPPING.register(config, (tokenizer, tokenizer_fast))
transformers.SLOW_TO_FAST_CONVERTERS[tokenizer_name] = transformers.SLOW_TO_FAST_CONVERTERS[slow_to_fast_converter]
def auto_inject_task_class(task, modeling_auto_name):
if task not in kwargs:
return
task_class = kwargs[task]
class_name = task_class.__name__.split(".")[-1]
modeling_auto_name[name] = class_name
modeling_auto_name.move_to_end(name, last=False)
if version.parse(transformers.__version__) < version.parse("4.12.0"):
_register(task_type_mapping[task]._model_mapping, configuration_auto.CONFIG_MAPPING_NAMES,
modeling_auto_name)
else:
task_type_mapping[task]._model_mapping.register(config, task_class)
auto_inject_task_class("backbone",
modeling_auto.MODEL_MAPPING_NAMES)
auto_inject_task_class("sequence_classification",
modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES)
auto_inject_task_class("token_classification",
modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES)
auto_inject_task_class("question_answering",
modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES)
auto_inject_task_class("multiple_choice",
modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES)
auto_inject_task_class("pre_train",
modeling_auto.MODEL_FOR_PRETRAINING_MAPPING_NAMES)
auto_inject_task_class("mlm",
modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES)
auto_inject_task_class("s2slm",
modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES)
auto_inject_task_class("nsp",
modeling_auto.MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES)
global _registered_modules
_registered_modules.add(name)
def _easyx(name, full_name, config, tokenizer, tokenizer_fast,
slow_to_fast_converter="BertTokenizer", **kwargs):
"""
Register a model to easyx.
:param name: The model name.
:param full_name: The full name of the model
:param config: The config class of the model.
:param tokenizer: The tokenizer class of the model.
:param tokenizer_fast: The tokenizer fast class of the model.
:param slow_to_fast_converter: The slow_to_fast_converter.
:param kwargs: The specific task class of the model.
Supported:
backbone: The backbone model class
sequence_classification: The sequence classfication model class
token_classification: The token classification model class
question_answering: The question-answering model class
multiple_choice: The multiple choice(e.g. SWAG task) model class
pre_train: The pretrain model class
mlm: The Masked language model class
nsp: The nsp model class
module: The module package
:return: None
"""
sofa_backend = os.environ["SOFA_BACKEND"]
if sofa_backend == "easytexminer":
import easytexminer.model_zoo as modelzoo
import easytexminer.model_zoo.tokenization_utils_fast
from easytexminer.model_zoo.models import auto
from easytexminer.model_zoo.models.auto import configuration_auto
from easytexminer.model_zoo.models.auto import tokenization_auto
else:
import easynlp.modelzoo as modelzoo
import easynlp.modelzoo.tokenization_utils_fast
from easynlp.modelzoo.models import auto
from easynlp.modelzoo.models.auto import configuration_auto
from easynlp.modelzoo.models.auto import tokenization_auto
tokenizer_name = tokenizer.__name__.split(".")[-1]
configuration_auto.CONFIG_MAPPING[name] = config
configuration_auto.CONFIG_MAPPING.move_to_end(name, last=False)
configuration_auto.MODEL_NAMES_MAPPING[name] = full_name
configuration_auto.MODEL_NAMES_MAPPING.move_to_end(name, last=False)
tokenization_auto.TOKENIZER_MAPPING[config] = (tokenizer, tokenizer_fast)
modelzoo.SLOW_TO_FAST_CONVERTERS[tokenizer_name] = modelzoo.SLOW_TO_FAST_CONVERTERS[slow_to_fast_converter]
auto.MODEL_MAPPING[config] = kwargs["backbone"]
if "sequence_classification" in kwargs:
auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING[config] = kwargs["sequence_classification"]
if "token_classification" in kwargs:
auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING[config] = kwargs["token_classification"]
if "question_answering" in kwargs:
auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING[config] = kwargs["question_answering"]
if "multiple_choice" in kwargs:
auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING[config] = kwargs["multiple_choice"]
if "pre_train" in kwargs:
auto.MODEL_FOR_PRETRAINING_MAPPING[config] = kwargs["pre_train"]
if "mlm" in kwargs:
auto.MODEL_FOR_MASKED_LM_MAPPING[config] = kwargs["mlm"]
if "s2slm" in kwargs:
auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING[config] = kwargs["s2slm"]
if "nsp" in kwargs:
auto.MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING[config] = kwargs["nsp"]
The provided code snippet includes necessary dependencies for implementing the `inject_model_backend` function. Write a Python function `def inject_model_backend(name, full_name, config, tokenizer, tokenizer_fast, **kwargs)` to solve the following problem:
Inject some model package into the selected backend framework. :param name: The model name. :param full_name: The full name of the model :param config: The config class of the model. :param tokenizer: The tokenizer class of the model. :param tokenizer_fast: The tokenizer fast class of the model. :param kwargs: The specific task class of the model. Supported: backbone: The backbone model class sequence_classification: The sequence classfication model class token_classification: The token classification model class question_answering: The question-answering model class multiple_choice: The multiple choice(e.g. SWAG task) model class pre_train: The pretrain model class mlm: The Masked language model class nsp: The nsp model class module: The module package :return: None
Here is the function:
def inject_model_backend(name, full_name, config, tokenizer, tokenizer_fast, **kwargs):
"""
Inject some model package into the selected backend framework.
:param name: The model name.
:param full_name: The full name of the model
:param config: The config class of the model.
:param tokenizer: The tokenizer class of the model.
:param tokenizer_fast: The tokenizer fast class of the model.
:param kwargs: The specific task class of the model.
Supported:
backbone: The backbone model class
sequence_classification: The sequence classfication model class
token_classification: The token classification model class
question_answering: The question-answering model class
multiple_choice: The multiple choice(e.g. SWAG task) model class
pre_train: The pretrain model class
mlm: The Masked language model class
nsp: The nsp model class
module: The module package
:return: None
"""
_report_compat_error()
sofa_backend = os.environ["SOFA_BACKEND"]
if sofa_backend == "huggingface":
_huggingface(name, full_name, config, tokenizer, tokenizer_fast, **kwargs)
elif sofa_backend in ["easytexminer", "easynlp"]:
_easyx(name, full_name, config, tokenizer, tokenizer_fast, **kwargs) | Inject some model package into the selected backend framework. :param name: The model name. :param full_name: The full name of the model :param config: The config class of the model. :param tokenizer: The tokenizer class of the model. :param tokenizer_fast: The tokenizer fast class of the model. :param kwargs: The specific task class of the model. Supported: backbone: The backbone model class sequence_classification: The sequence classfication model class token_classification: The token classification model class question_answering: The question-answering model class multiple_choice: The multiple choice(e.g. SWAG task) model class pre_train: The pretrain model class mlm: The Masked language model class nsp: The nsp model class module: The module package :return: None |
18,415 | import functools
import importlib.util
import numbers
import os
import sys
import tempfile
from pathlib import Path
from .file_utils import is_datasets_available
from .utils import logging
from .file_utils import ENV_VARS_TRUE_VALUES, is_torch_tpu_available
from .trainer_callback import ProgressCallback, TrainerCallback
from .trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, IntervalStrategy
def is_fairscale_available():
return importlib.util.find_spec("fairscale") is not None | null |
18,416 | import functools
import importlib.util
import numbers
import os
import sys
import tempfile
from pathlib import Path
from .file_utils import is_datasets_available
from .utils import logging
from .file_utils import ENV_VARS_TRUE_VALUES, is_torch_tpu_available
from .trainer_callback import ProgressCallback, TrainerCallback
from .trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, IntervalStrategy
def is_neptune_available():
return importlib.util.find_spec("neptune") is not None | null |
18,417 | import functools
import importlib.util
import numbers
import os
import sys
import tempfile
from pathlib import Path
from .file_utils import is_datasets_available
from .utils import logging
from .file_utils import ENV_VARS_TRUE_VALUES, is_torch_tpu_available
from .trainer_callback import ProgressCallback, TrainerCallback
from .trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, IntervalStrategy
def is_optuna_available():
return importlib.util.find_spec("optuna") is not None
def is_ray_tune_available():
if not is_ray_available():
return False
return importlib.util.find_spec("ray.tune") is not None
def is_sigopt_available():
return importlib.util.find_spec("sigopt") is not None
def hp_params(trial):
if is_optuna_available():
import optuna
if isinstance(trial, optuna.Trial):
return trial.params
if is_ray_tune_available():
if isinstance(trial, dict):
return trial
if is_sigopt_available():
if isinstance(trial, dict):
return trial
raise RuntimeError(f"Unknown type for trial {trial.__class__}") | null |
18,418 | import functools
import importlib.util
import numbers
import os
import sys
import tempfile
from pathlib import Path
from .file_utils import is_datasets_available
from .utils import logging
from .file_utils import ENV_VARS_TRUE_VALUES, is_torch_tpu_available
from .trainer_callback import ProgressCallback, TrainerCallback
from .trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, IntervalStrategy
def is_optuna_available():
return importlib.util.find_spec("optuna") is not None
def is_ray_tune_available():
if not is_ray_available():
return False
return importlib.util.find_spec("ray.tune") is not None
def is_sigopt_available():
return importlib.util.find_spec("sigopt") is not None
def default_hp_search_backend():
if is_optuna_available():
return "optuna"
elif is_ray_tune_available():
return "ray"
elif is_sigopt_available():
return "sigopt" | null |
18,419 | import functools
import importlib.util
import numbers
import os
import sys
import tempfile
from pathlib import Path
from .file_utils import is_datasets_available
from .utils import logging
from .file_utils import ENV_VARS_TRUE_VALUES, is_torch_tpu_available
from .trainer_callback import ProgressCallback, TrainerCallback
from .trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, IntervalStrategy
PREFIX_CHECKPOINT_DIR = "checkpoint"
class BestRun(NamedTuple):
"""
The best run found by an hyperparameter search (see [`~Trainer.hyperparameter_search`]).
Parameters:
run_id (`str`):
The id of the best run (if models were saved, the corresponding checkpoint will be in the folder ending
with run-{run_id}).
objective (`float`):
The objective that was obtained for this run.
hyperparameters (`Dict[str, Any]`):
The hyperparameters picked to get this run.
"""
run_id: str
objective: float
hyperparameters: Dict[str, Any]
def run_hp_search_optuna(trainer, n_trials: int, direction: str, **kwargs) -> BestRun:
import optuna
def _objective(trial, checkpoint_dir=None):
checkpoint = None
if checkpoint_dir:
for subdir in os.listdir(checkpoint_dir):
if subdir.startswith(PREFIX_CHECKPOINT_DIR):
checkpoint = os.path.join(checkpoint_dir, subdir)
trainer.objective = None
trainer.train(resume_from_checkpoint=checkpoint, trial=trial)
# If there hasn't been any evaluation during the training loop.
if getattr(trainer, "objective", None) is None:
metrics = trainer.evaluate()
trainer.objective = trainer.compute_objective(metrics)
return trainer.objective
timeout = kwargs.pop("timeout", None)
n_jobs = kwargs.pop("n_jobs", 1)
study = optuna.create_study(direction=direction, **kwargs)
study.optimize(_objective, n_trials=n_trials, timeout=timeout, n_jobs=n_jobs)
best_trial = study.best_trial
return BestRun(str(best_trial.number), best_trial.value, best_trial.params) | null |
18,420 | import functools
import importlib.util
import numbers
import os
import sys
import tempfile
from pathlib import Path
from .file_utils import is_datasets_available
from .utils import logging
logger = logging.get_logger(__name__)
from .file_utils import ENV_VARS_TRUE_VALUES, is_torch_tpu_available
from .trainer_callback import ProgressCallback, TrainerCallback
from .trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, IntervalStrategy
class TensorBoardCallback(TrainerCallback):
"""
A [`TrainerCallback`] that sends the logs to [TensorBoard](https://www.tensorflow.org/tensorboard).
Args:
tb_writer (`SummaryWriter`, *optional*):
The writer to use. Will instantiate one if not set.
"""
def __init__(self, tb_writer=None):
has_tensorboard = is_tensorboard_available()
if not has_tensorboard:
raise RuntimeError(
"TensorBoardCallback requires tensorboard to be installed. Either update your PyTorch version or install tensorboardX."
)
if has_tensorboard:
try:
from torch.utils.tensorboard import SummaryWriter # noqa: F401
self._SummaryWriter = SummaryWriter
except ImportError:
try:
from tensorboardX import SummaryWriter
self._SummaryWriter = SummaryWriter
except ImportError:
self._SummaryWriter = None
else:
self._SummaryWriter = None
self.tb_writer = tb_writer
def _init_summary_writer(self, args, log_dir=None):
log_dir = log_dir or args.logging_dir
if self._SummaryWriter is not None:
self.tb_writer = self._SummaryWriter(log_dir=log_dir)
def on_train_begin(self, args, state, control, **kwargs):
if not state.is_world_process_zero:
return
log_dir = None
if state.is_hyper_param_search:
trial_name = state.trial_name
if trial_name is not None:
log_dir = os.path.join(args.logging_dir, trial_name)
if self.tb_writer is None:
self._init_summary_writer(args, log_dir)
if self.tb_writer is not None:
self.tb_writer.add_text("args", args.to_json_string())
if "model" in kwargs:
model = kwargs["model"]
if hasattr(model, "config") and model.config is not None:
model_config_json = model.config.to_json_string()
self.tb_writer.add_text("model_config", model_config_json)
# Version of TensorBoard coming from tensorboardX does not have this method.
if hasattr(self.tb_writer, "add_hparams"):
self.tb_writer.add_hparams(args.to_sanitized_dict(), metric_dict={})
def on_log(self, args, state, control, logs=None, **kwargs):
if not state.is_world_process_zero:
return
if self.tb_writer is None:
self._init_summary_writer(args)
if self.tb_writer is not None:
logs = rewrite_logs(logs)
for k, v in logs.items():
if isinstance(v, (int, float)):
self.tb_writer.add_scalar(k, v, state.global_step)
else:
logger.warning(
"Trainer is attempting to log a value of "
f'"{v}" of type {type(v)} for key "{k}" as a scalar. '
"This invocation of Tensorboard's writer.add_scalar() "
"is incorrect so we dropped this attribute."
)
self.tb_writer.flush()
def on_train_end(self, args, state, control, **kwargs):
if self.tb_writer:
self.tb_writer.close()
self.tb_writer = None
def is_datasets_available():
return _datasets_available
class ProgressCallback(TrainerCallback):
"""
A [`TrainerCallback`] that displays the progress of training or evaluation.
"""
def __init__(self):
self.training_bar = None
self.prediction_bar = None
def on_train_begin(self, args, state, control, **kwargs):
if state.is_local_process_zero:
self.training_bar = tqdm(total=state.max_steps)
self.current_step = 0
def on_step_end(self, args, state, control, **kwargs):
if state.is_local_process_zero:
self.training_bar.update(state.global_step - self.current_step)
self.current_step = state.global_step
def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs):
if state.is_local_process_zero and isinstance(eval_dataloader.dataset, collections.abc.Sized):
if self.prediction_bar is None:
self.prediction_bar = tqdm(total=len(eval_dataloader), leave=self.training_bar is None)
self.prediction_bar.update(1)
def on_evaluate(self, args, state, control, **kwargs):
if state.is_local_process_zero:
if self.prediction_bar is not None:
self.prediction_bar.close()
self.prediction_bar = None
def on_log(self, args, state, control, logs=None, **kwargs):
if state.is_local_process_zero and self.training_bar is not None:
_ = logs.pop("total_flos", None)
self.training_bar.write(str(logs))
def on_train_end(self, args, state, control, **kwargs):
if state.is_local_process_zero:
self.training_bar.close()
self.training_bar = None
PREFIX_CHECKPOINT_DIR = "checkpoint"
class IntervalStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
class BestRun(NamedTuple):
"""
The best run found by an hyperparameter search (see [`~Trainer.hyperparameter_search`]).
Parameters:
run_id (`str`):
The id of the best run (if models were saved, the corresponding checkpoint will be in the folder ending
with run-{run_id}).
objective (`float`):
The objective that was obtained for this run.
hyperparameters (`Dict[str, Any]`):
The hyperparameters picked to get this run.
"""
run_id: str
objective: float
hyperparameters: Dict[str, Any]
class TrainerMemoryTracker:
"""
A helper class that tracks cpu and gpu memory.
This class will silently skip unless `psutil` is available. Install with `pip install psutil`.
When a stage completes, it can pass metrics dict to update with the memory metrics gathered during this stage.
Example :
```python
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
code ...
metrics = {"train_runtime": 10.5}
self._memory_tracker.stop_and_update_metrics(metrics)
```
At the moment GPU tracking is only for `pytorch`, but can be extended to support `tensorflow`.
To understand this class' intricacies please read the documentation of [`~Trainer.log_metrics`].
"""
# map trainer methods to metrics prefix
stages = {
"__init__": "init",
"train": "train",
"evaluate": "eval",
"predict": "test",
}
def __init__(self, skip_memory_metrics=False):
self.skip_memory_metrics = skip_memory_metrics
if not is_psutil_available():
# soft dependency on psutil
self.skip_memory_metrics = True
if self.skip_memory_metrics:
return
import psutil # noqa
if is_torch_cuda_available():
import torch
self.torch = torch
self.gpu = {}
else:
self.torch = None
self.process = psutil.Process()
self.cur_stage = None
self.cpu = {}
self.init_reported = False
def derive_stage(self):
"""derives the stage/caller name automatically"""
caller = inspect.currentframe().f_back.f_back.f_code.co_name
if caller in self.stages:
return self.stages[caller]
else:
raise ValueError(
f"was called from {caller}, but only expect to be called from one of {self.stages.keys()}"
)
def cpu_mem_used(self):
"""get resident set size memory for the current process"""
return self.process.memory_info().rss
def peak_monitor_func(self):
self.cpu_mem_used_peak = -1
while True:
self.cpu_mem_used_peak = max(self.cpu_mem_used(), self.cpu_mem_used_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
# time.sleep(0.001) # 1msec
if not self.peak_monitoring:
break
def start(self):
"""start tracking for the caller's stage"""
if self.skip_memory_metrics:
return
stage = self.derive_stage()
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
self.cur_stage = stage
gc.collect()
if self.torch is not None:
self.torch.cuda.reset_peak_memory_stats()
self.torch.cuda.empty_cache()
# gpu
if self.torch is not None:
self.gpu_mem_used_at_start = self.torch.cuda.memory_allocated()
# cpu
self.cpu_mem_used_at_start = self.cpu_mem_used()
self.peak_monitoring = True
peak_monitor_thread = threading.Thread(target=self.peak_monitor_func)
peak_monitor_thread.daemon = True
peak_monitor_thread.start()
def stop(self, stage):
"""stop tracking for the passed stage"""
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
# this sends a signal to peak_monitor_func to complete its loop
self.peak_monitoring = False
# first ensure all objects get collected and their memory is freed
gc.collect()
if self.torch is not None:
self.torch.cuda.empty_cache()
# concepts:
# - alloc_delta: the difference of allocated memory between the end and the start
# - peaked_delta: the difference between the peak memory and the current memory
# in order to know how much memory the measured code consumed one needs to sum these two
# gpu
if self.torch is not None:
self.gpu_mem_used_now = self.torch.cuda.memory_allocated()
self.gpu_mem_used_peak = self.torch.cuda.max_memory_allocated()
self.gpu[self.cur_stage] = dict(
begin=self.gpu_mem_used_at_start,
end=self.gpu_mem_used_now,
alloc=(self.gpu_mem_used_now - self.gpu_mem_used_at_start),
peaked=max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now),
)
# cpu
self.cpu_mem_used_now = self.cpu_mem_used()
self.cpu[self.cur_stage] = dict(
begin=self.cpu_mem_used_at_start,
end=self.cpu_mem_used_now,
alloc=(self.cpu_mem_used_now - self.cpu_mem_used_at_start),
peaked=max(0, self.cpu_mem_used_peak - self.cpu_mem_used_now),
)
# reset - cycle finished
self.cur_stage = None
def update_metrics(self, stage, metrics):
"""updates the metrics"""
if self.skip_memory_metrics:
return
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
# since we don't have a way to return init metrics, we push them into the first of train/val/predict
stages = [stage]
if not self.init_reported:
stages.insert(0, "init")
self.init_reported = True
for stage in stages:
for t in ["alloc", "peaked"]:
if stage in self.cpu and t in self.cpu[stage]:
metrics[f"{stage}_mem_cpu_{t}_delta"] = self.cpu[stage][t]
if self.torch is not None and stage in self.gpu and t in self.gpu[stage]:
metrics[f"{stage}_mem_gpu_{t}_delta"] = self.gpu[stage][t]
# if we need additional debug info, enable the following
# for t in ["begin", "end"]:
# if stage in self.cpu and t in self.cpu[stage]:
# metrics[f"{stage}_mem_cpu_{t}"] = self.cpu[stage][t]
# if self.torch is not None and stage in self.gpu and t in self.gpu[stage]:
# metrics[f"{stage}_mem_gpu_{t}"] = self.gpu[stage][t]
# since memory can be allocated before init, and it might be difficult to track overall
# memory usage, in particular for GPU, let's report memory usage at the point init was called
if stages[0] == "init":
metrics["before_init_mem_cpu"] = self.cpu["init"]["begin"]
if self.torch is not None:
metrics["before_init_mem_gpu"] = self.gpu["init"]["begin"]
# if we also wanted to report any additional memory allocations in between init and
# whatever the next stage was we could also report this:
# if self.cpu["init"]["end"] != self.cpu[stage]["begin"]:
# metrics[f"after_init_mem_cpu_delta"] = self.cpu[stage]["begin"] - self.cpu["init"]["end"]
# if self.torch is not None and self.gpu["init"]["end"] != self.gpu[stage]["begin"]:
# metrics[f"after_init_mem_gpu_delta"] = self.gpu[stage]["begin"] - self.gpu["init"]["end"]
def stop_and_update_metrics(self, metrics=None):
"""combine stop and metrics update in one call for simpler code"""
if self.skip_memory_metrics:
return
stage = self.derive_stage()
self.stop(stage)
# init doesn't have metrics to update so we just save that data for later stages to retrieve
if metrics is not None:
self.update_metrics(stage, metrics)
class NotebookProgressCallback(TrainerCallback):
"""
A [`TrainerCallback`] that displays the progress of training or evaluation, optimized for
Jupyter Notebooks or Google colab.
"""
def __init__(self):
self.training_tracker = None
self.prediction_bar = None
self._force_next_update = False
def on_train_begin(self, args, state, control, **kwargs):
self.first_column = "Epoch" if args.evaluation_strategy == IntervalStrategy.EPOCH else "Step"
self.training_loss = 0
self.last_log = 0
column_names = [self.first_column] + ["Training Loss"]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("Validation Loss")
self.training_tracker = NotebookTrainingTracker(state.max_steps, column_names)
def on_step_end(self, args, state, control, **kwargs):
epoch = int(state.epoch) if int(state.epoch) == state.epoch else f"{state.epoch:.2f}"
self.training_tracker.update(
state.global_step + 1,
comment=f"Epoch {epoch}/{state.num_train_epochs}",
force_update=self._force_next_update,
)
self._force_next_update = False
def on_prediction_step(self, args, state, control, eval_dataloader=None, **kwargs):
if not isinstance(eval_dataloader.dataset, collections.abc.Sized):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
self.prediction_bar = self.training_tracker.add_child(len(eval_dataloader))
else:
self.prediction_bar = NotebookProgressBar(len(eval_dataloader))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def on_log(self, args, state, control, logs=None, **kwargs):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
values = {"Training Loss": logs["loss"]}
# First column is necessarily Step sine we're not in epoch eval strategy
values["Step"] = state.global_step
self.training_tracker.write_line(values)
def on_evaluate(self, args, state, control, metrics=None, **kwargs):
if self.training_tracker is not None:
values = {"Training Loss": "No log", "Validation Loss": "No log"}
for log in reversed(state.log_history):
if "loss" in log:
values["Training Loss"] = log["loss"]
break
if self.first_column == "Epoch":
values["Epoch"] = int(state.epoch)
else:
values["Step"] = state.global_step
metric_key_prefix = "eval"
for k in metrics:
if k.endswith("_loss"):
metric_key_prefix = re.sub(r"\_loss$", "", k)
_ = metrics.pop("total_flos", None)
_ = metrics.pop("epoch", None)
_ = metrics.pop(f"{metric_key_prefix}_runtime", None)
_ = metrics.pop(f"{metric_key_prefix}_samples_per_second", None)
_ = metrics.pop(f"{metric_key_prefix}_steps_per_second", None)
for k, v in metrics.items():
if k == f"{metric_key_prefix}_loss":
values["Validation Loss"] = v
else:
splits = k.split("_")
name = " ".join([part.capitalize() for part in splits[1:]])
values[name] = v
self.training_tracker.write_line(values)
self.training_tracker.remove_child()
self.prediction_bar = None
# Evaluation takes a long time so we should force the next update.
self._force_next_update = True
def on_train_end(self, args, state, control, **kwargs):
self.training_tracker.update(
state.global_step, comment=f"Epoch {int(state.epoch)}/{state.num_train_epochs}", force_update=True
)
self.training_tracker = None
def run_hp_search_ray(trainer, n_trials: int, direction: str, **kwargs) -> BestRun:
import ray
def _objective(trial, local_trainer, checkpoint_dir=None):
try:
from .utils.notebook import NotebookProgressCallback
if local_trainer.pop_callback(NotebookProgressCallback):
local_trainer.add_callback(ProgressCallback)
except ModuleNotFoundError:
pass
checkpoint = None
if checkpoint_dir:
for subdir in os.listdir(checkpoint_dir):
if subdir.startswith(PREFIX_CHECKPOINT_DIR):
checkpoint = os.path.join(checkpoint_dir, subdir)
local_trainer.objective = None
local_trainer.train(resume_from_checkpoint=checkpoint, trial=trial)
# If there hasn't been any evaluation during the training loop.
if getattr(local_trainer, "objective", None) is None:
metrics = local_trainer.evaluate()
local_trainer.objective = local_trainer.compute_objective(metrics)
local_trainer._tune_save_checkpoint()
ray.tune.report(objective=local_trainer.objective, **metrics, done=True)
if not trainer._memory_tracker.skip_memory_metrics:
from .trainer_utils import TrainerMemoryTracker
logger.warning(
"Memory tracking for your Trainer is currently "
"enabled. Automatically disabling the memory tracker "
"since the memory tracker is not serializable."
)
trainer._memory_tracker = TrainerMemoryTracker(skip_memory_metrics=True)
# The model and TensorBoard writer do not pickle so we have to remove them (if they exists)
# while doing the ray hp search.
_tb_writer = trainer.pop_callback(TensorBoardCallback)
trainer.model = None
# Setup default `resources_per_trial`.
if "resources_per_trial" not in kwargs:
# Default to 1 CPU and 1 GPU (if applicable) per trial.
kwargs["resources_per_trial"] = {"cpu": 1}
if trainer.args.n_gpu > 0:
kwargs["resources_per_trial"]["gpu"] = 1
resource_msg = "1 CPU" + (" and 1 GPU" if trainer.args.n_gpu > 0 else "")
logger.info(
"No `resources_per_trial` arg was passed into "
"`hyperparameter_search`. Setting it to a default value "
f"of {resource_msg} for each trial."
)
# Make sure each trainer only uses GPUs that were allocated per trial.
gpus_per_trial = kwargs["resources_per_trial"].get("gpu", 0)
trainer.args._n_gpu = gpus_per_trial
# Setup default `progress_reporter`.
if "progress_reporter" not in kwargs:
from ray.tune import CLIReporter
kwargs["progress_reporter"] = CLIReporter(metric_columns=["objective"])
if "keep_checkpoints_num" in kwargs and kwargs["keep_checkpoints_num"] > 0:
# `keep_checkpoints_num=0` would disabled checkpointing
trainer.use_tune_checkpoints = True
if kwargs["keep_checkpoints_num"] > 1:
logger.warning(
f"Currently keeping {kwargs['keep_checkpoints_num']} checkpoints for each trial. "
"Checkpoints are usually huge, "
"consider setting `keep_checkpoints_num=1`."
)
if "scheduler" in kwargs:
from ray.tune.schedulers import ASHAScheduler, HyperBandForBOHB, MedianStoppingRule, PopulationBasedTraining
# Check if checkpointing is enabled for PopulationBasedTraining
if isinstance(kwargs["scheduler"], PopulationBasedTraining):
if not trainer.use_tune_checkpoints:
logger.warning(
"You are using PopulationBasedTraining but you haven't enabled checkpointing. "
"This means your trials will train from scratch everytime they are exploiting "
"new configurations. Consider enabling checkpointing by passing "
"`keep_checkpoints_num=1` as an additional argument to `Trainer.hyperparameter_search`."
)
# Check for `do_eval` and `eval_during_training` for schedulers that require intermediate reporting.
if isinstance(
kwargs["scheduler"], (ASHAScheduler, MedianStoppingRule, HyperBandForBOHB, PopulationBasedTraining)
) and (not trainer.args.do_eval or trainer.args.evaluation_strategy == IntervalStrategy.NO):
raise RuntimeError(
"You are using {cls} as a scheduler but you haven't enabled evaluation during training. "
"This means your trials will not report intermediate results to Ray Tune, and "
"can thus not be stopped early or used to exploit other trials parameters. "
"If this is what you want, do not use {cls}. If you would like to use {cls}, "
"make sure you pass `do_eval=True` and `evaluation_strategy='steps'` in the "
"Trainer `args`.".format(cls=type(kwargs["scheduler"]).__name__)
)
trainable = ray.tune.with_parameters(_objective, local_trainer=trainer)
@functools.wraps(trainable)
def dynamic_modules_import_trainable(*args, **kwargs):
"""
Wrapper around `tune.with_parameters` to ensure datasets_modules are loaded on each Actor.
Without this, an ImportError will be thrown. See https://github.com/huggingface/transformers/issues/11565.
Assumes that `_objective`, defined above, is a function.
"""
if is_datasets_available():
import datasets.load
dynamic_modules_path = os.path.join(datasets.load.init_dynamic_modules(), "__init__.py")
# load dynamic_modules from path
spec = importlib.util.spec_from_file_location("datasets_modules", dynamic_modules_path)
datasets_modules = importlib.util.module_from_spec(spec)
sys.modules[spec.name] = datasets_modules
spec.loader.exec_module(datasets_modules)
return trainable(*args, **kwargs)
# special attr set by tune.with_parameters
if hasattr(trainable, "__mixins__"):
dynamic_modules_import_trainable.__mixins__ = trainable.__mixins__
analysis = ray.tune.run(
dynamic_modules_import_trainable,
config=trainer.hp_space(None),
num_samples=n_trials,
**kwargs,
)
best_trial = analysis.get_best_trial(metric="objective", mode=direction[:3])
best_run = BestRun(best_trial.trial_id, best_trial.last_result["objective"], best_trial.config)
if _tb_writer is not None:
trainer.add_callback(_tb_writer)
return best_run | null |
18,421 | import functools
import importlib.util
import numbers
import os
import sys
import tempfile
from pathlib import Path
from .file_utils import is_datasets_available
from .utils import logging
logger = logging.get_logger(__name__)
from .file_utils import ENV_VARS_TRUE_VALUES, is_torch_tpu_available
from .trainer_callback import ProgressCallback, TrainerCallback
from .trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, IntervalStrategy
class BestRun(NamedTuple):
"""
The best run found by an hyperparameter search (see [`~Trainer.hyperparameter_search`]).
Parameters:
run_id (`str`):
The id of the best run (if models were saved, the corresponding checkpoint will be in the folder ending
with run-{run_id}).
objective (`float`):
The objective that was obtained for this run.
hyperparameters (`Dict[str, Any]`):
The hyperparameters picked to get this run.
"""
run_id: str
objective: float
hyperparameters: Dict[str, Any]
def run_hp_search_sigopt(trainer, n_trials: int, direction: str, **kwargs) -> BestRun:
from sigopt import Connection
conn = Connection()
proxies = kwargs.pop("proxies", None)
if proxies is not None:
conn.set_proxies(proxies)
experiment = conn.experiments().create(
name="huggingface-tune",
parameters=trainer.hp_space(None),
metrics=[dict(name="objective", objective=direction, strategy="optimize")],
parallel_bandwidth=1,
observation_budget=n_trials,
project="huggingface",
)
logger.info(f"created experiment: https://app.sigopt.com/experiment/{experiment.id}")
while experiment.progress.observation_count < experiment.observation_budget:
suggestion = conn.experiments(experiment.id).suggestions().create()
trainer.objective = None
trainer.train(resume_from_checkpoint=None, trial=suggestion)
# If there hasn't been any evaluation during the training loop.
if getattr(trainer, "objective", None) is None:
metrics = trainer.evaluate()
trainer.objective = trainer.compute_objective(metrics)
values = [dict(name="objective", value=trainer.objective)]
obs = conn.experiments(experiment.id).observations().create(suggestion=suggestion.id, values=values)
logger.info(f"[suggestion_id, observation_id]: [{suggestion.id}, {obs.id}]")
experiment = conn.experiments(experiment.id).fetch()
best = list(conn.experiments(experiment.id).best_assignments().fetch().iterate_pages())[0]
best_run = BestRun(best.id, best.value, best.assignments)
return best_run | null |
18,422 | import functools
import importlib.util
import numbers
import os
import sys
import tempfile
from pathlib import Path
from .file_utils import is_datasets_available
from .utils import logging
from .file_utils import ENV_VARS_TRUE_VALUES, is_torch_tpu_available
from .trainer_callback import ProgressCallback, TrainerCallback
from .trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, IntervalStrategy
def is_wandb_available():
# any value of WANDB_DISABLED disables wandb
if os.getenv("WANDB_DISABLED", "").upper() in ENV_VARS_TRUE_VALUES:
logger.warning(
"Using the `WAND_DISABLED` environment variable is deprecated and will be removed in v5. Use the "
"--report_to flag to control the integrations used for logging result (for instance --report_to none)."
)
return False
return importlib.util.find_spec("wandb") is not None
def is_comet_available():
return _has_comet
def is_tensorboard_available():
return importlib.util.find_spec("tensorboard") is not None or importlib.util.find_spec("tensorboardX") is not None
def is_azureml_available():
if importlib.util.find_spec("azureml") is None:
return False
if importlib.util.find_spec("azureml.core") is None:
return False
return importlib.util.find_spec("azureml.core.run") is not None
def is_mlflow_available():
return importlib.util.find_spec("mlflow") is not None
def is_codecarbon_available():
return importlib.util.find_spec("codecarbon") is not None
def get_available_reporting_integrations():
integrations = []
if is_azureml_available():
integrations.append("azure_ml")
if is_comet_available():
integrations.append("comet_ml")
if is_mlflow_available():
integrations.append("mlflow")
if is_tensorboard_available():
integrations.append("tensorboard")
if is_wandb_available():
integrations.append("wandb")
if is_codecarbon_available():
integrations.append("codecarbon")
return integrations | null |
18,423 | import functools
import importlib.util
import numbers
import os
import sys
import tempfile
from pathlib import Path
from .file_utils import is_datasets_available
from .utils import logging
from .file_utils import ENV_VARS_TRUE_VALUES, is_torch_tpu_available
from .trainer_callback import ProgressCallback, TrainerCallback
from .trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, IntervalStrategy
def rewrite_logs(d):
new_d = {}
eval_prefix = "eval_"
eval_prefix_len = len(eval_prefix)
test_prefix = "test_"
test_prefix_len = len(test_prefix)
for k, v in d.items():
if k.startswith(eval_prefix):
new_d["eval/" + k[eval_prefix_len:]] = v
elif k.startswith(test_prefix):
new_d["test/" + k[test_prefix_len:]] = v
else:
new_d["train/" + k] = v
return new_d | null |
18,424 | import functools
import importlib.util
import numbers
import os
import sys
import tempfile
from pathlib import Path
from .file_utils import is_datasets_available
from .utils import logging
from .file_utils import ENV_VARS_TRUE_VALUES, is_torch_tpu_available
from .trainer_callback import ProgressCallback, TrainerCallback
from .trainer_utils import PREFIX_CHECKPOINT_DIR, BestRun, IntervalStrategy
INTEGRATION_TO_CALLBACK = {
"azure_ml": AzureMLCallback,
"comet_ml": CometCallback,
"mlflow": MLflowCallback,
"neptune": NeptuneCallback,
"tensorboard": TensorBoardCallback,
"wandb": WandbCallback,
"codecarbon": CodeCarbonCallback,
}
def get_reporting_integration_callbacks(report_to):
for integration in report_to:
if integration not in INTEGRATION_TO_CALLBACK:
raise ValueError(
f"{integration} is not supported, only {', '.join(INTEGRATION_TO_CALLBACK.keys())} are supported."
)
return [INTEGRATION_TO_CALLBACK[integration] for integration in report_to] | null |
18,425 | import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from .file_utils import add_start_docstrings
class MaxLengthCriteria(StoppingCriteria):
"""
This class can be used to stop generation whenever the full generated number of tokens exceeds `max_length`.
Keep in mind for decoder-only type of transformers, this will include the initial prompted tokens.
Args:
max_length (`int`):
The maximum length that the output sequence can have in number of tokens.
"""
def __init__(self, max_length: int):
self.max_length = max_length
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
return input_ids.shape[-1] >= self.max_length
class StoppingCriteriaList(list):
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
return any(criteria(input_ids, scores) for criteria in self)
def max_length(self) -> Optional[int]:
for stopping_criterium in self:
if isinstance(stopping_criterium, MaxLengthCriteria):
return stopping_criterium.max_length
elif isinstance(stopping_criterium, MaxNewTokensCriteria):
return stopping_criterium.max_length
return None
def validate_stopping_criteria(stopping_criteria: StoppingCriteriaList, max_length: int) -> StoppingCriteriaList:
stopping_max_length = stopping_criteria.max_length
new_stopping_criteria = deepcopy(stopping_criteria)
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter", UserWarning)
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=max_length))
return new_stopping_criteria | null |
18,426 | import collections
from .file_utils import ExplicitEnum, is_torch_available
from .utils import logging
def get_abs_min_max(var, ctx):
abs_var = var.abs()
return f"{abs_var.min():8.2e} {abs_var.max():8.2e} {ctx}" | null |
18,427 | import collections
from .file_utils import ExplicitEnum, is_torch_available
from .utils import logging
The provided code snippet includes necessary dependencies for implementing the `detect_overflow` function. Write a Python function `def detect_overflow(var, ctx)` to solve the following problem:
Report whether the tensor contains any `nan` or `inf` entries. This is useful for detecting overflows/underflows and best to call right after the function that did some math that modified the tensor in question. This function contains a few other helper features that you can enable and tweak directly if you want to track various other things. Args: var: the tensor variable to check ctx: the message to print as a context Return: `True` if `inf` or `nan` was detected, `False` otherwise
Here is the function:
def detect_overflow(var, ctx):
"""
Report whether the tensor contains any `nan` or `inf` entries.
This is useful for detecting overflows/underflows and best to call right after the function that did some math that
modified the tensor in question.
This function contains a few other helper features that you can enable and tweak directly if you want to track
various other things.
Args:
var: the tensor variable to check
ctx: the message to print as a context
Return:
`True` if `inf` or `nan` was detected, `False` otherwise
"""
detected = False
if torch.isnan(var).any().item():
detected = True
print(f"{ctx} has nans")
if torch.isinf(var).any().item():
detected = True
print(f"{ctx} has infs")
# if needed to monitor large elements can enable the following
if 0: # and detected:
n100 = var[torch.ge(var.abs(), 100)]
if n100.numel() > 0:
print(f"{ctx}: n100={n100.numel()}")
n1000 = var[torch.ge(var.abs(), 1000)]
if n1000.numel() > 0:
print(f"{ctx}: n1000={n1000.numel()}")
n10000 = var[torch.ge(var.abs(), 10000)]
if n10000.numel() > 0:
print(f"{ctx}: n10000={n10000.numel()}")
if 0:
print(f"min={var.min():9.2e} max={var.max():9.2e}")
if 0:
print(f"min={var.min():9.2e} max={var.max():9.2e} var={var.var():9.2e} mean={var.mean():9.2e} ({ctx})")
return detected | Report whether the tensor contains any `nan` or `inf` entries. This is useful for detecting overflows/underflows and best to call right after the function that did some math that modified the tensor in question. This function contains a few other helper features that you can enable and tweak directly if you want to track various other things. Args: var: the tensor variable to check ctx: the message to print as a context Return: `True` if `inf` or `nan` was detected, `False` otherwise |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.