id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
185,097 | import itertools
import logging
import os
import sys
from typing import Any, List, Optional, Union
import numpy as np
import torch
import torch.nn.functional as F
import librosa
from fairseq.data.audio.speech_to_text_dataset import get_features_or_waveform
from fairseq.data import data_utils
from fairseq.data.fairseq_dataset import FairseqDataset
def load_label_offset(label_path, inds, tot):
with open(label_path) as f:
code_lengths = [len(line.encode("utf-8")) for line in f]
assert (
len(code_lengths) == tot
), f"number of labels does not match ({len(code_lengths)} != {tot})"
offsets = list(itertools.accumulate([0] + code_lengths))
offsets = [(offsets[i], offsets[i + 1]) for i in inds]
return offsets | null |
185,098 | import itertools
import logging
import os
import sys
from typing import Any, List, Optional, Union
import numpy as np
import torch
import torch.nn.functional as F
import librosa
from fairseq.data.audio.speech_to_text_dataset import get_features_or_waveform
from fairseq.data import data_utils
from fairseq.data.fairseq_dataset import FairseqDataset
logger = logging.getLogger(__name__)
def verify_label_lengths(
audio_sizes,
audio_rate,
label_path,
label_rate,
inds,
tot,
tol=0.1, # tolerance in seconds
):
if label_rate < 0:
logger.info(f"{label_path} is sequence label. skipped")
return
with open(label_path) as f:
lengths = [len(line.rstrip().split()) for line in f]
assert len(lengths) == tot
lengths = [lengths[i] for i in inds]
num_invalid = 0
for i, ind in enumerate(inds):
dur_from_audio = audio_sizes[i] / audio_rate
dur_from_label = lengths[i] / label_rate
if abs(dur_from_audio - dur_from_label) > tol:
logger.warning(
(
f"audio and label duration differ too much "
f"(|{dur_from_audio} - {dur_from_label}| > {tol}) "
f"in line {ind+1} of {label_path}. Check if `label_rate` "
f"is correctly set (currently {label_rate}). "
f"num. of samples = {audio_sizes[i]}; "
f"label length = {lengths[i]}"
)
)
num_invalid += 1
if num_invalid > 0:
logger.warning(
f"total {num_invalid} (audio, label) pairs with mismatched lengths"
) | null |
185,099 | import itertools
import logging
import os
import sys
from typing import Any, List, Optional, Union
import numpy as np
import torch
import torch.nn.functional as F
import librosa
from fairseq.data.audio.speech_to_text_dataset import get_features_or_waveform
from fairseq.data import data_utils
from fairseq.data.fairseq_dataset import FairseqDataset
The provided code snippet includes necessary dependencies for implementing the `logmelfilterbank` function. Write a Python function `def logmelfilterbank( audio, sampling_rate, fft_size=1024, hop_size=256, win_length=None, window="hann", num_mels=80, fmin=80, fmax=7600, eps=1e-10, )` to solve the following problem:
Compute log-Mel filterbank feature. (https://github.com/kan-bayashi/ParallelWaveGAN/blob/master/parallel_wavegan/bin/preprocess.py) Args: audio (ndarray): Audio signal (T,). sampling_rate (int): Sampling rate. fft_size (int): FFT size. hop_size (int): Hop size. win_length (int): Window length. If set to None, it will be the same as fft_size. window (str): Window function type. num_mels (int): Number of mel basis. fmin (int): Minimum frequency in mel basis calculation. fmax (int): Maximum frequency in mel basis calculation. eps (float): Epsilon value to avoid inf in log calculation. Returns: ndarray: Log Mel filterbank feature (#frames, num_mels).
Here is the function:
def logmelfilterbank(
audio,
sampling_rate,
fft_size=1024,
hop_size=256,
win_length=None,
window="hann",
num_mels=80,
fmin=80,
fmax=7600,
eps=1e-10,
):
"""Compute log-Mel filterbank feature.
(https://github.com/kan-bayashi/ParallelWaveGAN/blob/master/parallel_wavegan/bin/preprocess.py)
Args:
audio (ndarray): Audio signal (T,).
sampling_rate (int): Sampling rate.
fft_size (int): FFT size.
hop_size (int): Hop size.
win_length (int): Window length. If set to None, it will be the same as fft_size.
window (str): Window function type.
num_mels (int): Number of mel basis.
fmin (int): Minimum frequency in mel basis calculation.
fmax (int): Maximum frequency in mel basis calculation.
eps (float): Epsilon value to avoid inf in log calculation.
Returns:
ndarray: Log Mel filterbank feature (#frames, num_mels).
"""
# get amplitude spectrogram
x_stft = librosa.stft(audio, n_fft=fft_size, hop_length=hop_size,
win_length=win_length, window=window, pad_mode="reflect")
spc = np.abs(x_stft).T # (#frames, #bins)
# get mel basis
fmin = 0 if fmin is None else fmin
fmax = sampling_rate / 2 if fmax is None else fmax
mel_basis = librosa.filters.mel(sr=sampling_rate, n_fft=fft_size, n_mels=num_mels, fmin=fmin, fmax=fmax)
return np.log10(np.maximum(eps, np.dot(spc, mel_basis.T))) | Compute log-Mel filterbank feature. (https://github.com/kan-bayashi/ParallelWaveGAN/blob/master/parallel_wavegan/bin/preprocess.py) Args: audio (ndarray): Audio signal (T,). sampling_rate (int): Sampling rate. fft_size (int): FFT size. hop_size (int): Hop size. win_length (int): Window length. If set to None, it will be the same as fft_size. window (str): Window function type. num_mels (int): Number of mel basis. fmin (int): Minimum frequency in mel basis calculation. fmax (int): Maximum frequency in mel basis calculation. eps (float): Epsilon value to avoid inf in log calculation. Returns: ndarray: Log Mel filterbank feature (#frames, num_mels). |
185,100 | import math
import numpy as np
import torch
from fairseq.data import FairseqDataset, data_utils
def collate(
samples,
pad_idx,
eos_idx,
vocab,
left_pad_source=False,
left_pad_target=False,
input_feeding=True,
pad_to_length=None,
):
assert input_feeding
if len(samples) == 0:
return {}
def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx,
eos_idx=None, # use eos_idx of each sample instead of vocab.eos()
left_pad=left_pad,
move_eos_to_beginning=move_eos_to_beginning,
pad_to_length=pad_to_length,
)
id = torch.LongTensor([s["id"] for s in samples])
src_tokens = merge(
"source",
left_pad=left_pad_source,
pad_to_length=pad_to_length["source"] if pad_to_length is not None else None,
)
# sort by descending source length
src_lengths = torch.LongTensor([s["source"].numel() for s in samples])
src_lengths, sort_order = src_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
src_tokens = src_tokens.index_select(0, sort_order)
prev_output_tokens = None
target = None
if samples[0].get("target", None) is not None:
target = merge(
"target",
left_pad=left_pad_target,
pad_to_length=pad_to_length["target"]
if pad_to_length is not None
else None,
)
target = target.index_select(0, sort_order)
ntokens = sum(len(s["target"]) for s in samples)
if input_feeding:
# we create a shifted version of targets for feeding the
# previous output token(s) into the next decoder step
prev_output_tokens = merge(
"target",
left_pad=left_pad_target,
move_eos_to_beginning=True,
pad_to_length=pad_to_length["target"]
if pad_to_length is not None
else None,
)
prev_output_tokens = prev_output_tokens.index_select(0, sort_order)
else:
ntokens = sum(len(s["source"]) for s in samples)
batch = {
"id": id,
"ntokens": ntokens,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
},
"target": target,
"nsentences": samples[0]["source"].size(0),
"sort_order": sort_order,
"task_name": 'text_pretrain',
}
if prev_output_tokens is not None:
batch["net_input"]["prev_output_tokens"] = prev_output_tokens
return batch | null |
185,101 | import itertools
import logging
import os
from typing import Any, List, Optional
import numpy as np
import torch
import torch.nn.functional as F
import librosa
from fairseq.data.audio.speech_to_text_dataset import get_features_or_waveform
from fairseq.data import data_utils, Dictionary
from fairseq.data.fairseq_dataset import FairseqDataset
The provided code snippet includes necessary dependencies for implementing the `_collate_frames` function. Write a Python function `def _collate_frames( frames: List[torch.Tensor], is_audio_input: bool = False )` to solve the following problem:
Convert a list of 2D frames into a padded 3D tensor Args: frames (list): list of 2D frames of size L[i]*f_dim. Where L[i] is length of i-th frame and f_dim is static dimension of features Returns: 3D tensor of size len(frames)*len_max*f_dim where len_max is max of L[i]
Here is the function:
def _collate_frames(
frames: List[torch.Tensor], is_audio_input: bool = False
):
"""
Convert a list of 2D frames into a padded 3D tensor
Args:
frames (list): list of 2D frames of size L[i]*f_dim. Where L[i] is
length of i-th frame and f_dim is static dimension of features
Returns:
3D tensor of size len(frames)*len_max*f_dim where len_max is max of L[i]
"""
max_len = max(frame.size(0) for frame in frames)
if is_audio_input:
out = frames[0].new_zeros((len(frames), max_len))
else:
out = frames[0].new_zeros((len(frames), max_len, frames[0].size(1)))
for i, v in enumerate(frames):
out[i, : v.size(0)] = v
return out | Convert a list of 2D frames into a padded 3D tensor Args: frames (list): list of 2D frames of size L[i]*f_dim. Where L[i] is length of i-th frame and f_dim is static dimension of features Returns: 3D tensor of size len(frames)*len_max*f_dim where len_max is max of L[i] |
185,102 | import itertools
import logging
import os
from typing import Any, List, Optional
import numpy as np
import torch
import torch.nn.functional as F
import librosa
from fairseq.data.audio.speech_to_text_dataset import get_features_or_waveform
from fairseq.data import data_utils, Dictionary
from fairseq.data.fairseq_dataset import FairseqDataset
logger = logging.getLogger(__name__)
def load_audio(manifest_path, max_keep, min_keep):
n_long, n_short = 0, 0
names, inds, sizes, spk_embeds = [], [], [], []
with open(manifest_path) as f:
root = f.readline().strip()
for ind, line in enumerate(f):
items = line.strip().split("\t")
assert len(items) == 3, line
sz = int(items[1])
if min_keep is not None and sz < min_keep:
n_short += 1
elif max_keep is not None and sz > max_keep:
n_long += 1
else:
names.append(items[0])
spk_embeds.append(items[2])
inds.append(ind)
sizes.append(sz)
tot = ind + 1
logger.info(
(
f"max_keep={max_keep}, min_keep={min_keep}, "
f"loaded {len(names)}, skipped {n_short} short and {n_long} long, "
f"longest-loaded={max(sizes)}, shortest-loaded={min(sizes)}"
)
)
return root, names, inds, tot, sizes, spk_embeds | null |
185,103 | import itertools
import logging
import os
from typing import Any, List, Optional
import numpy as np
import torch
import torch.nn.functional as F
import librosa
from fairseq.data.audio.speech_to_text_dataset import get_features_or_waveform
from fairseq.data import data_utils, Dictionary
from fairseq.data.fairseq_dataset import FairseqDataset
def load_label(label_path, inds, tot):
with open(label_path) as f:
labels = [line.rstrip() for line in f]
assert (
len(labels) == tot
), f"number of labels does not match ({len(labels)} != {tot})"
labels = [labels[i] for i in inds]
return labels | null |
185,104 | import itertools
import logging
import os
from typing import Any, List, Optional
import numpy as np
import torch
import torch.nn.functional as F
import librosa
from fairseq.data.audio.speech_to_text_dataset import get_features_or_waveform
from fairseq.data import data_utils, Dictionary
from fairseq.data.fairseq_dataset import FairseqDataset
def load_label_offset(label_path, inds, tot):
with open(label_path) as f:
code_lengths = [len(line.encode("utf-8")) for line in f]
assert (
len(code_lengths) == tot
), f"number of labels does not match ({len(code_lengths)} != {tot})"
offsets = list(itertools.accumulate([0] + code_lengths))
offsets = [(offsets[i], offsets[i + 1]) for i in inds]
return offsets | null |
185,105 | import itertools
import logging
import os
from typing import Any, List, Optional
import numpy as np
import torch
import torch.nn.functional as F
import librosa
from fairseq.data.audio.speech_to_text_dataset import get_features_or_waveform
from fairseq.data import data_utils, Dictionary
from fairseq.data.fairseq_dataset import FairseqDataset
The provided code snippet includes necessary dependencies for implementing the `logmelfilterbank` function. Write a Python function `def logmelfilterbank( audio, sampling_rate, fft_size=1024, hop_size=256, win_length=None, window="hann", num_mels=80, fmin=80, fmax=7600, eps=1e-10, )` to solve the following problem:
Compute log-Mel filterbank feature. (https://github.com/kan-bayashi/ParallelWaveGAN/blob/master/parallel_wavegan/bin/preprocess.py) Args: audio (ndarray): Audio signal (T,). sampling_rate (int): Sampling rate. fft_size (int): FFT size. hop_size (int): Hop size. win_length (int): Window length. If set to None, it will be the same as fft_size. window (str): Window function type. num_mels (int): Number of mel basis. fmin (int): Minimum frequency in mel basis calculation. fmax (int): Maximum frequency in mel basis calculation. eps (float): Epsilon value to avoid inf in log calculation. Returns: ndarray: Log Mel filterbank feature (#frames, num_mels).
Here is the function:
def logmelfilterbank(
audio,
sampling_rate,
fft_size=1024,
hop_size=256,
win_length=None,
window="hann",
num_mels=80,
fmin=80,
fmax=7600,
eps=1e-10,
):
"""Compute log-Mel filterbank feature.
(https://github.com/kan-bayashi/ParallelWaveGAN/blob/master/parallel_wavegan/bin/preprocess.py)
Args:
audio (ndarray): Audio signal (T,).
sampling_rate (int): Sampling rate.
fft_size (int): FFT size.
hop_size (int): Hop size.
win_length (int): Window length. If set to None, it will be the same as fft_size.
window (str): Window function type.
num_mels (int): Number of mel basis.
fmin (int): Minimum frequency in mel basis calculation.
fmax (int): Maximum frequency in mel basis calculation.
eps (float): Epsilon value to avoid inf in log calculation.
Returns:
ndarray: Log Mel filterbank feature (#frames, num_mels).
"""
# get amplitude spectrogram
x_stft = librosa.stft(audio, n_fft=fft_size, hop_length=hop_size,
win_length=win_length, window=window, pad_mode="reflect")
spc = np.abs(x_stft).T # (#frames, #bins)
# get mel basis
fmin = 0 if fmin is None else fmin
fmax = sampling_rate / 2 if fmax is None else fmax
mel_basis = librosa.filters.mel(sr=sampling_rate, n_fft=fft_size, n_mels=num_mels, fmin=fmin, fmax=fmax)
return np.log10(np.maximum(eps, np.dot(spc, mel_basis.T))) | Compute log-Mel filterbank feature. (https://github.com/kan-bayashi/ParallelWaveGAN/blob/master/parallel_wavegan/bin/preprocess.py) Args: audio (ndarray): Audio signal (T,). sampling_rate (int): Sampling rate. fft_size (int): FFT size. hop_size (int): Hop size. win_length (int): Window length. If set to None, it will be the same as fft_size. window (str): Window function type. num_mels (int): Number of mel basis. fmin (int): Minimum frequency in mel basis calculation. fmax (int): Maximum frequency in mel basis calculation. eps (float): Epsilon value to avoid inf in log calculation. Returns: ndarray: Log Mel filterbank feature (#frames, num_mels). |
185,106 | import math
from argparse import Namespace
from dataclasses import dataclass, field
from omegaconf import II
from typing import Optional
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from fairseq.data.data_utils import post_process
from fairseq.tasks import FairseqTask
from fairseq.logging.meters import safe_round
import logging
def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=None, reduce=True):
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(-1)
nll_loss = -lprobs.gather(dim=-1, index=target)
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
if ignore_index is not None:
pad_mask = target.eq(ignore_index)
nll_loss.masked_fill_(pad_mask, 0.0)
smooth_loss.masked_fill_(pad_mask, 0.0)
else:
nll_loss = nll_loss.squeeze(-1)
smooth_loss = smooth_loss.squeeze(-1)
if reduce:
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
eps_i = epsilon / (lprobs.size(-1) - 1)
loss = (1.0 - epsilon - eps_i) * nll_loss + eps_i * smooth_loss
return loss, nll_loss | null |
185,107 | from typing import Dict, List
import numpy as np
import torch
import torch.nn as nn
import contextlib
from fairseq import utils
from fairseq.models import (
FairseqEncoder,
)
from fairseq.modules import (
FairseqDropout,
LayerNorm,
TransformerEncoderLayer,
)
from torch import Tensor
from .transformer_layer import TransformerSentenceEncoderLayer
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m | null |
185,108 | from fairseq.models import (
register_model_architecture,
)
from fairseq.models.transformer_lm import base_lm_architecture
def base_lm_architecture(args):
def transformer_lm_t5(args):
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 1280)
args.decoder_ffn_embed_dim = getattr(args, "decoder_ffn_embed_dim", 6144)
args.decoder_layers = getattr(args, "decoder_layers", 20)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.activation_fn = getattr(args, "activation_fn", "gelu")
base_lm_architecture(args) | null |
185,109 | import logging
from ast import literal_eval
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from .modules.text_encoder_prenet import TextEncoderPrenet
from .modules.text_decoder_prenet import TextDecoderPrenet
from .modules.text_decoder_postnet import TextDecoderPostnet
from .modules.speech_encoder_prenet import SpeechEncoderPrenet
from .modules.speech_encoder_postnet import SpeechEncoderPostnet
from .modules.speech_decoder_prenet import SpeechDecoderPrenet
from .modules.speech_decoder_postnet import SpeechDecoderPostnet
from .modules.speaker_decoder_postnet import SpeakerDecoderPostnet
from .modules.encoder import TransformerEncoder
from .modules.decoder import TransformerDecoder
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from fairseq.models.transformer import Embedding
from fairseq.modules import (
GumbelVectorQuantizer,
)
from torch import Tensor
def base_architecture(args):
# Transformer
args.bert_init = getattr(args, "bert_init", False)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 768 * 4)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 12)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.max_text_positions = getattr(args, "max_text_positions", DEFAULT_MAX_TEXT_POSITIONS)
args.max_speech_positions = getattr(args, "max_speech_positions", DEFAULT_MAX_SPEECH_POSITIONS)
# Espnet related, including prenet, postnet
args.eprenet_conv_layers = getattr(args, "eprenet_conv_layers", 0)
args.eprenet_conv_filts = getattr(args, "eprenet_conv_filts", 0)
args.eprenet_conv_chans = getattr(args, "eprenet_conv_chans", 0)
args.use_batch_norm = getattr(args, "use_batch_norm", True)
args.eprenet_dropout_rate = getattr(args, "eprenet_dropout_rate", 0.0)
args.enc_use_scaled_pos_enc = getattr(args, "enc_use_scaled_pos_enc", True)
args.dec_use_scaled_pos_enc = getattr(args, "dec_use_scaled_pos_enc", True)
args.postnet_layers = getattr(args, "postnet_layers", 5)
args.postnet_chans = getattr(args, "postnet_chans", 256)
args.postnet_filts = getattr(args, "postnet_filts", 5)
args.postnet_dropout_rate = getattr(args, "postnet_dropout_rate", 0.5)
args.dprenet_dropout_rate = getattr(args, "dprenet_dropout_rate", 0.5)
args.dprenet_layers = getattr(args, "dprenet_layers", 2)
args.dprenet_units = getattr(args, "dprenet_units", 256)
args.initial_encoder_alpha = getattr(args, "initial_encoder_alpha", 1.0)
args.initial_decoder_alpha = getattr(args, "initial_decoder_alpha", 1.0)
args.spk_embed_integration_type = getattr(args, "spk_embed_integration_type", "pre")
args.spk_embed_dim = getattr(args, "spk_embed_dim", 512)
args.encoder_reduction_factor = getattr(args, "encoder_reduction_factor", 1)
args.reduction_factor = getattr(args, "reduction_factor", 2)
args.transformer_enc_positional_dropout_rate = getattr(args, "transformer_enc_positional_dropout_rate", 0.1)
args.transformer_dec_positional_dropout_rate = getattr(args, "transformer_dec_positional_dropout_rate", 0.1)
args.layer_norm_eps = getattr(args, "layer_norm_eps", 1e-5)
args.no_scale_embedding = getattr(args, "no_scale_embedding", True)
# Convolutional subsampler
args.encoder_speech_prenet = getattr(args, "encoder_speech_prenet", "conv")
args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5")
args.conv_channels = getattr(args, "conv_channels", 1024)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.share_input_output_embed = getattr(args, "share_input_output_embed", False)
args.share_ctc_embed = getattr(args, "share_ctc_embed", False)
args.freeze_encoder_updates = getattr(args, "freeze_encoder_updates", 0)
args.freeze_decoder_updates = getattr(args, "freeze_decoder_updates", 0)
args.no_freeze_encoder_layer = getattr(args, "no_freeze_encoder_layer", None)
## sid
args.sid_embed_dim = getattr(args, "sid_embed_dim", 128)
args.sid_pooling_layer = getattr(args, "sid_pooling_layer", "decoder")
args.softmax_scale = getattr(args, "softmax_scale", 1)
args.softmax_margin = getattr(args, "softmax_margin", 0)
args.softmax_easy_margin = getattr(args, "softmax_easy_margin", False)
args.modules_filter = getattr(args, "modules_filter", None)
## Hubert
args.conv_pos = getattr(args, "conv_pos", 128)
args.conv_pos_groups = getattr(args, "conv_pos_groups", 16)
args.target_glu = getattr(args, "target_glu", False)
args.logit_temp = getattr(args, "logit_temp", 0.1)
args.final_dim = getattr(args, "final_dim", 256)
args.untie_final_proj = getattr(args, "untie_final_proj", True)
args.feature_grad_mult = getattr(args, "feature_grad_mult", 0.1)
args.use_sent_enc_layer = getattr(args, "use_sent_enc_layer", True)
# hubert feature extractor
args.extractor_mode = getattr(args, "extractor_mode", "default")
args.conv_feature_layers = getattr(args, "conv_feature_layers", "[(512,10,5)] + [(512,3,2)] * 4 + [(512,2,2)] * 2")
args.conv_bias = getattr(args, "conv_bias", False)
# mask
args.hubert_mask_length = getattr(args, "hubert_mask_length", 10)
args.mask_prob = getattr(args, "mask_prob", 0.0)
args.mask_selection = getattr(args, "mask_selection", "static")
args.mask_other = getattr(args, "mask_other", 0)
args.no_mask_overlap = getattr(args, "no_mask_overlap", False)
args.mask_min_space = getattr(args, "mask_min_space", 1)
# channel mask
args.mask_channel_length = getattr(args, "mask_channel_length", 10)
args.mask_channel_prob = getattr(args, "mask_channel_prob", 0.0)
args.mask_channel_selection = getattr(args, "mask_channel_selection", "static")
args.mask_channel_other = getattr(args, "mask_channel_other", 0)
args.no_mask_channel_overlap = getattr(args, "no_mask_channel_overlap", False)
args.mask_channel_min_space = getattr(args, "mask_channel_min_space", 1)
# loss computation
args.skip_masked = getattr(args, "skip_masked", False)
args.skip_nomask = getattr(args, "skip_nomask", False)
# conv Pos
args.use_conv_pos = getattr(args, "use_conv_pos", False)
args.use_sinc_pos = getattr(args, "use_sinc_pos", False)
# codebook
args.use_codebook = getattr(args, "use_codebook", False)
args.latent_vars = getattr(args, "latent_vars", 100)
args.latent_groups = getattr(args, "latent_groups", 2)
args.latent_dim = getattr(args, "latent_dim", 0)
args.latent_temp = getattr(args, "latent_temp", (2, 0.5, 0.999995))
args.quantizer_depth = getattr(args, "quantizer_depth", 1)
args.quantizer_factor = getattr(args, "quantizer_factor", 3)
args.codebook_prob = getattr(args, "codebook_prob", 0.5)
# Relative pos embed
args.relative_position_embedding = getattr(args, "relative_position_embedding", False)
args.num_buckets = getattr(args, "num_buckets", 320)
args.max_distance = getattr(args, "max_distance", 1280)
args.encoder_max_relative_position = getattr(args, "encoder_max_relative_position", 160)
args.decoder_max_relative_position = getattr(args, "decoder_max_relative_position", 160)
def t5_transformer_base(args):
args.use_conv_pos = getattr(args, "use_conv_pos", True)
args.use_sinc_pos = getattr(args, "use_sinc_pos", True)
args.layernorm_embedding = getattr(args, "layernorm_embedding", False)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.layer_norm_first = getattr(args, "layer_norm_first", False)
args.relative_position_embedding = getattr(args, "relative_position_embedding", True)
args.dropout = getattr(args, "dropout", 0.1)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0.05)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.05)
args.mask_prob = getattr(args, "mask_prob", 0.80)
base_architecture(args) | null |
185,110 | import logging
from ast import literal_eval
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from .modules.text_encoder_prenet import TextEncoderPrenet
from .modules.text_decoder_prenet import TextDecoderPrenet
from .modules.text_decoder_postnet import TextDecoderPostnet
from .modules.speech_encoder_prenet import SpeechEncoderPrenet
from .modules.speech_encoder_postnet import SpeechEncoderPostnet
from .modules.speech_decoder_prenet import SpeechDecoderPrenet
from .modules.speech_decoder_postnet import SpeechDecoderPostnet
from .modules.speaker_decoder_postnet import SpeakerDecoderPostnet
from .modules.encoder import TransformerEncoder
from .modules.decoder import TransformerDecoder
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from fairseq.models.transformer import Embedding
from fairseq.modules import (
GumbelVectorQuantizer,
)
from torch import Tensor
def base_architecture(args):
# Transformer
args.bert_init = getattr(args, "bert_init", False)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 768 * 4)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 12)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.max_text_positions = getattr(args, "max_text_positions", DEFAULT_MAX_TEXT_POSITIONS)
args.max_speech_positions = getattr(args, "max_speech_positions", DEFAULT_MAX_SPEECH_POSITIONS)
# Espnet related, including prenet, postnet
args.eprenet_conv_layers = getattr(args, "eprenet_conv_layers", 0)
args.eprenet_conv_filts = getattr(args, "eprenet_conv_filts", 0)
args.eprenet_conv_chans = getattr(args, "eprenet_conv_chans", 0)
args.use_batch_norm = getattr(args, "use_batch_norm", True)
args.eprenet_dropout_rate = getattr(args, "eprenet_dropout_rate", 0.0)
args.enc_use_scaled_pos_enc = getattr(args, "enc_use_scaled_pos_enc", True)
args.dec_use_scaled_pos_enc = getattr(args, "dec_use_scaled_pos_enc", True)
args.postnet_layers = getattr(args, "postnet_layers", 5)
args.postnet_chans = getattr(args, "postnet_chans", 256)
args.postnet_filts = getattr(args, "postnet_filts", 5)
args.postnet_dropout_rate = getattr(args, "postnet_dropout_rate", 0.5)
args.dprenet_dropout_rate = getattr(args, "dprenet_dropout_rate", 0.5)
args.dprenet_layers = getattr(args, "dprenet_layers", 2)
args.dprenet_units = getattr(args, "dprenet_units", 256)
args.initial_encoder_alpha = getattr(args, "initial_encoder_alpha", 1.0)
args.initial_decoder_alpha = getattr(args, "initial_decoder_alpha", 1.0)
args.spk_embed_integration_type = getattr(args, "spk_embed_integration_type", "pre")
args.spk_embed_dim = getattr(args, "spk_embed_dim", 512)
args.encoder_reduction_factor = getattr(args, "encoder_reduction_factor", 1)
args.reduction_factor = getattr(args, "reduction_factor", 2)
args.transformer_enc_positional_dropout_rate = getattr(args, "transformer_enc_positional_dropout_rate", 0.1)
args.transformer_dec_positional_dropout_rate = getattr(args, "transformer_dec_positional_dropout_rate", 0.1)
args.layer_norm_eps = getattr(args, "layer_norm_eps", 1e-5)
args.no_scale_embedding = getattr(args, "no_scale_embedding", True)
# Convolutional subsampler
args.encoder_speech_prenet = getattr(args, "encoder_speech_prenet", "conv")
args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5")
args.conv_channels = getattr(args, "conv_channels", 1024)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.share_input_output_embed = getattr(args, "share_input_output_embed", False)
args.share_ctc_embed = getattr(args, "share_ctc_embed", False)
args.freeze_encoder_updates = getattr(args, "freeze_encoder_updates", 0)
args.freeze_decoder_updates = getattr(args, "freeze_decoder_updates", 0)
args.no_freeze_encoder_layer = getattr(args, "no_freeze_encoder_layer", None)
## sid
args.sid_embed_dim = getattr(args, "sid_embed_dim", 128)
args.sid_pooling_layer = getattr(args, "sid_pooling_layer", "decoder")
args.softmax_scale = getattr(args, "softmax_scale", 1)
args.softmax_margin = getattr(args, "softmax_margin", 0)
args.softmax_easy_margin = getattr(args, "softmax_easy_margin", False)
args.modules_filter = getattr(args, "modules_filter", None)
## Hubert
args.conv_pos = getattr(args, "conv_pos", 128)
args.conv_pos_groups = getattr(args, "conv_pos_groups", 16)
args.target_glu = getattr(args, "target_glu", False)
args.logit_temp = getattr(args, "logit_temp", 0.1)
args.final_dim = getattr(args, "final_dim", 256)
args.untie_final_proj = getattr(args, "untie_final_proj", True)
args.feature_grad_mult = getattr(args, "feature_grad_mult", 0.1)
args.use_sent_enc_layer = getattr(args, "use_sent_enc_layer", True)
# hubert feature extractor
args.extractor_mode = getattr(args, "extractor_mode", "default")
args.conv_feature_layers = getattr(args, "conv_feature_layers", "[(512,10,5)] + [(512,3,2)] * 4 + [(512,2,2)] * 2")
args.conv_bias = getattr(args, "conv_bias", False)
# mask
args.hubert_mask_length = getattr(args, "hubert_mask_length", 10)
args.mask_prob = getattr(args, "mask_prob", 0.0)
args.mask_selection = getattr(args, "mask_selection", "static")
args.mask_other = getattr(args, "mask_other", 0)
args.no_mask_overlap = getattr(args, "no_mask_overlap", False)
args.mask_min_space = getattr(args, "mask_min_space", 1)
# channel mask
args.mask_channel_length = getattr(args, "mask_channel_length", 10)
args.mask_channel_prob = getattr(args, "mask_channel_prob", 0.0)
args.mask_channel_selection = getattr(args, "mask_channel_selection", "static")
args.mask_channel_other = getattr(args, "mask_channel_other", 0)
args.no_mask_channel_overlap = getattr(args, "no_mask_channel_overlap", False)
args.mask_channel_min_space = getattr(args, "mask_channel_min_space", 1)
# loss computation
args.skip_masked = getattr(args, "skip_masked", False)
args.skip_nomask = getattr(args, "skip_nomask", False)
# conv Pos
args.use_conv_pos = getattr(args, "use_conv_pos", False)
args.use_sinc_pos = getattr(args, "use_sinc_pos", False)
# codebook
args.use_codebook = getattr(args, "use_codebook", False)
args.latent_vars = getattr(args, "latent_vars", 100)
args.latent_groups = getattr(args, "latent_groups", 2)
args.latent_dim = getattr(args, "latent_dim", 0)
args.latent_temp = getattr(args, "latent_temp", (2, 0.5, 0.999995))
args.quantizer_depth = getattr(args, "quantizer_depth", 1)
args.quantizer_factor = getattr(args, "quantizer_factor", 3)
args.codebook_prob = getattr(args, "codebook_prob", 0.5)
# Relative pos embed
args.relative_position_embedding = getattr(args, "relative_position_embedding", False)
args.num_buckets = getattr(args, "num_buckets", 320)
args.max_distance = getattr(args, "max_distance", 1280)
args.encoder_max_relative_position = getattr(args, "encoder_max_relative_position", 160)
args.decoder_max_relative_position = getattr(args, "decoder_max_relative_position", 160)
def t5_transformer_large(args):
args.use_conv_pos = getattr(args, "use_conv_pos", True)
args.use_sinc_pos = getattr(args, "use_sinc_pos", True)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", True)
args.layer_norm_first = getattr(args, "layer_norm_first", True)
args.relative_position_embedding = getattr(args, "relative_position_embedding", True)
args.dropout = getattr(args, "dropout", 0.0)
args.activation_dropout = getattr(args, "activation_dropout", 0.0)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0.0)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 1024)
args.encoder_layers = getattr(args, "encoder_layers", 24)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 4096)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 16)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 16)
args.feature_grad_mult = getattr(args, "feature_grad_mult", 1.0)
args.extractor_mode = getattr(args, "extractor_mode", "layer_norm")
args.final_dim = getattr(args, "final_dim", 768)
args.mask_prob = getattr(args, "mask_prob", 0.80)
base_architecture(args) | null |
185,111 | import logging
from ast import literal_eval
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.models import (
FairseqEncoderDecoderModel,
FairseqIncrementalDecoder,
register_model,
register_model_architecture,
)
from .modules.text_encoder_prenet import TextEncoderPrenet
from .modules.text_decoder_prenet import TextDecoderPrenet
from .modules.text_decoder_postnet import TextDecoderPostnet
from .modules.speech_encoder_prenet import SpeechEncoderPrenet
from .modules.speech_encoder_postnet import SpeechEncoderPostnet
from .modules.speech_decoder_prenet import SpeechDecoderPrenet
from .modules.speech_decoder_postnet import SpeechDecoderPostnet
from .modules.speaker_decoder_postnet import SpeakerDecoderPostnet
from .modules.encoder import TransformerEncoder
from .modules.decoder import TransformerDecoder
from fairseq.modules.transformer_sentence_encoder import init_bert_params
from fairseq.models.transformer import Embedding
from fairseq.modules import (
GumbelVectorQuantizer,
)
from torch import Tensor
def base_architecture(args):
# Transformer
args.bert_init = getattr(args, "bert_init", False)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 768)
args.encoder_ffn_embed_dim = getattr(args, "encoder_ffn_embed_dim", 768 * 4)
args.encoder_layers = getattr(args, "encoder_layers", 12)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 12)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", args.encoder_embed_dim)
args.decoder_ffn_embed_dim = getattr(
args, "decoder_ffn_embed_dim", args.encoder_ffn_embed_dim
)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 12)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.dropout = getattr(args, "dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", args.dropout)
args.activation_dropout = getattr(args, "activation_dropout", args.dropout)
args.activation_fn = getattr(args, "activation_fn", "gelu")
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.0)
args.decoder_output_dim = getattr(
args, "decoder_output_dim", args.decoder_embed_dim
)
args.decoder_input_dim = getattr(args, "decoder_input_dim", args.decoder_embed_dim)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0)
args.max_text_positions = getattr(args, "max_text_positions", DEFAULT_MAX_TEXT_POSITIONS)
args.max_speech_positions = getattr(args, "max_speech_positions", DEFAULT_MAX_SPEECH_POSITIONS)
# Espnet related, including prenet, postnet
args.eprenet_conv_layers = getattr(args, "eprenet_conv_layers", 0)
args.eprenet_conv_filts = getattr(args, "eprenet_conv_filts", 0)
args.eprenet_conv_chans = getattr(args, "eprenet_conv_chans", 0)
args.use_batch_norm = getattr(args, "use_batch_norm", True)
args.eprenet_dropout_rate = getattr(args, "eprenet_dropout_rate", 0.0)
args.enc_use_scaled_pos_enc = getattr(args, "enc_use_scaled_pos_enc", True)
args.dec_use_scaled_pos_enc = getattr(args, "dec_use_scaled_pos_enc", True)
args.postnet_layers = getattr(args, "postnet_layers", 5)
args.postnet_chans = getattr(args, "postnet_chans", 256)
args.postnet_filts = getattr(args, "postnet_filts", 5)
args.postnet_dropout_rate = getattr(args, "postnet_dropout_rate", 0.5)
args.dprenet_dropout_rate = getattr(args, "dprenet_dropout_rate", 0.5)
args.dprenet_layers = getattr(args, "dprenet_layers", 2)
args.dprenet_units = getattr(args, "dprenet_units", 256)
args.initial_encoder_alpha = getattr(args, "initial_encoder_alpha", 1.0)
args.initial_decoder_alpha = getattr(args, "initial_decoder_alpha", 1.0)
args.spk_embed_integration_type = getattr(args, "spk_embed_integration_type", "pre")
args.spk_embed_dim = getattr(args, "spk_embed_dim", 512)
args.encoder_reduction_factor = getattr(args, "encoder_reduction_factor", 1)
args.reduction_factor = getattr(args, "reduction_factor", 2)
args.transformer_enc_positional_dropout_rate = getattr(args, "transformer_enc_positional_dropout_rate", 0.1)
args.transformer_dec_positional_dropout_rate = getattr(args, "transformer_dec_positional_dropout_rate", 0.1)
args.layer_norm_eps = getattr(args, "layer_norm_eps", 1e-5)
args.no_scale_embedding = getattr(args, "no_scale_embedding", True)
# Convolutional subsampler
args.encoder_speech_prenet = getattr(args, "encoder_speech_prenet", "conv")
args.conv_kernel_sizes = getattr(args, "conv_kernel_sizes", "5,5")
args.conv_channels = getattr(args, "conv_channels", 1024)
args.quant_noise_pq = getattr(args, "quant_noise_pq", 0)
args.adaptive_softmax_cutoff = getattr(args, "adaptive_softmax_cutoff", None)
args.adaptive_softmax_dropout = getattr(args, "adaptive_softmax_dropout", 0)
args.no_token_positional_embeddings = getattr(
args, "no_token_positional_embeddings", False
)
args.adaptive_input = getattr(args, "adaptive_input", False)
args.decoder_learned_pos = getattr(args, "decoder_learned_pos", False)
args.share_input_output_embed = getattr(args, "share_input_output_embed", False)
args.share_ctc_embed = getattr(args, "share_ctc_embed", False)
args.freeze_encoder_updates = getattr(args, "freeze_encoder_updates", 0)
args.freeze_decoder_updates = getattr(args, "freeze_decoder_updates", 0)
args.no_freeze_encoder_layer = getattr(args, "no_freeze_encoder_layer", None)
## sid
args.sid_embed_dim = getattr(args, "sid_embed_dim", 128)
args.sid_pooling_layer = getattr(args, "sid_pooling_layer", "decoder")
args.softmax_scale = getattr(args, "softmax_scale", 1)
args.softmax_margin = getattr(args, "softmax_margin", 0)
args.softmax_easy_margin = getattr(args, "softmax_easy_margin", False)
args.modules_filter = getattr(args, "modules_filter", None)
## Hubert
args.conv_pos = getattr(args, "conv_pos", 128)
args.conv_pos_groups = getattr(args, "conv_pos_groups", 16)
args.target_glu = getattr(args, "target_glu", False)
args.logit_temp = getattr(args, "logit_temp", 0.1)
args.final_dim = getattr(args, "final_dim", 256)
args.untie_final_proj = getattr(args, "untie_final_proj", True)
args.feature_grad_mult = getattr(args, "feature_grad_mult", 0.1)
args.use_sent_enc_layer = getattr(args, "use_sent_enc_layer", True)
# hubert feature extractor
args.extractor_mode = getattr(args, "extractor_mode", "default")
args.conv_feature_layers = getattr(args, "conv_feature_layers", "[(512,10,5)] + [(512,3,2)] * 4 + [(512,2,2)] * 2")
args.conv_bias = getattr(args, "conv_bias", False)
# mask
args.hubert_mask_length = getattr(args, "hubert_mask_length", 10)
args.mask_prob = getattr(args, "mask_prob", 0.0)
args.mask_selection = getattr(args, "mask_selection", "static")
args.mask_other = getattr(args, "mask_other", 0)
args.no_mask_overlap = getattr(args, "no_mask_overlap", False)
args.mask_min_space = getattr(args, "mask_min_space", 1)
# channel mask
args.mask_channel_length = getattr(args, "mask_channel_length", 10)
args.mask_channel_prob = getattr(args, "mask_channel_prob", 0.0)
args.mask_channel_selection = getattr(args, "mask_channel_selection", "static")
args.mask_channel_other = getattr(args, "mask_channel_other", 0)
args.no_mask_channel_overlap = getattr(args, "no_mask_channel_overlap", False)
args.mask_channel_min_space = getattr(args, "mask_channel_min_space", 1)
# loss computation
args.skip_masked = getattr(args, "skip_masked", False)
args.skip_nomask = getattr(args, "skip_nomask", False)
# conv Pos
args.use_conv_pos = getattr(args, "use_conv_pos", False)
args.use_sinc_pos = getattr(args, "use_sinc_pos", False)
# codebook
args.use_codebook = getattr(args, "use_codebook", False)
args.latent_vars = getattr(args, "latent_vars", 100)
args.latent_groups = getattr(args, "latent_groups", 2)
args.latent_dim = getattr(args, "latent_dim", 0)
args.latent_temp = getattr(args, "latent_temp", (2, 0.5, 0.999995))
args.quantizer_depth = getattr(args, "quantizer_depth", 1)
args.quantizer_factor = getattr(args, "quantizer_factor", 3)
args.codebook_prob = getattr(args, "codebook_prob", 0.5)
# Relative pos embed
args.relative_position_embedding = getattr(args, "relative_position_embedding", False)
args.num_buckets = getattr(args, "num_buckets", 320)
args.max_distance = getattr(args, "max_distance", 1280)
args.encoder_max_relative_position = getattr(args, "encoder_max_relative_position", 160)
args.decoder_max_relative_position = getattr(args, "decoder_max_relative_position", 160)
def t5_transformer_base_asr(args):
args.use_conv_pos = getattr(args, "use_conv_pos", True)
args.use_sinc_pos = getattr(args, "use_sinc_pos", True)
args.encoder_normalize_before = getattr(args, "encoder_normalize_before", False)
args.decoder_normalize_before = getattr(args, "decoder_normalize_before", False)
args.layer_norm_first = getattr(args, "layer_norm_first", False)
args.relative_position_embedding = getattr(args, "relative_position_embedding", True)
args.dropout = getattr(args, "dropout", 0.1)
args.activation_dropout = getattr(args, "activation_dropout", 0.1)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.feature_grad_mult = getattr(args, "feature_grad_mult", 0.0)
args.encoder_layerdrop = getattr(args, "encoder_layerdrop", 0.1)
args.decoder_layerdrop = getattr(args, "decoder_layerdrop", 0.1)
args.mask_prob = getattr(args, "mask_prob", 0.75)
args.mask_selection = getattr(args, "mask_selection", "static")
args.mask_channel_length = getattr(args, "mask_channel_length", 64)
args.mask_channel_prob = getattr(args, "mask_channel_prob", 0.5)
args.mask_channel_selection = getattr(args, "mask_channel_selection", "static")
args.max_text_positions = getattr(args, "max_text_positions", 600)
base_architecture(args) | null |
185,112 | import ast
import logging
import os
import os.path as op
import sys
from argparse import Namespace
import numpy as np
import torch
from fairseq import checkpoint_utils, options, tasks, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import progress_bar
from omegaconf import DictConfig
def _plot_and_save(array, figname, figsize=(6, 4), dpi=150):
import matplotlib.pyplot as plt
shape = array.shape
if len(shape) == 1:
# for eos probability
plt.figure(figsize=figsize, dpi=dpi)
plt.plot(array)
plt.xlabel("Frame")
plt.ylabel("Probability")
plt.ylim([0, 1])
elif len(shape) == 2:
# for tacotron 2 attention weights, whose shape is (out_length, in_length)
plt.figure(figsize=figsize, dpi=dpi)
plt.imshow(array, aspect="auto")
elif len(shape) == 4:
# for transformer attention weights,
# whose shape is (#leyers, #heads, out_length, in_length)
plt.figure(figsize=(figsize[0] * shape[0], figsize[1] * shape[1]), dpi=dpi)
for idx1, xs in enumerate(array):
for idx2, x in enumerate(xs, 1):
plt.subplot(shape[0], shape[1], idx1 * shape[1] + idx2)
plt.imshow(x, aspect="auto")
plt.xlabel("Input")
plt.ylabel("Output")
else:
raise NotImplementedError("Support only from 1D to 4D array.")
plt.tight_layout()
if not op.exists(op.dirname(figname)):
# NOTE: exist_ok = True is needed for parallel process decoding
os.makedirs(op.dirname(figname), exist_ok=True)
plt.savefig(figname)
plt.close()
def _calculate_focus_rete(att_ws):
if att_ws is None:
# fastspeech case -> None
return 1.0
elif len(att_ws.shape) == 2:
# tacotron 2 case -> (L, T)
return float(att_ws.max(dim=-1)[0].mean())
elif len(att_ws.shape) == 4:
# transformer case -> (#layers, #heads, L, T)
return float(att_ws.max(dim=-1)[0].mean(dim=-1).max())
else:
raise ValueError("att_ws should be 2 or 4 dimensional tensor.")
def progress_bar(
iterator,
log_format: Optional[str] = None,
log_interval: int = 100,
log_file: Optional[str] = None,
epoch: Optional[int] = None,
prefix: Optional[str] = None,
tensorboard_logdir: Optional[str] = None,
default_log_format: str = "tqdm",
wandb_project: Optional[str] = None,
wandb_run_name: Optional[str] = None,
azureml_logging: Optional[bool] = False,
):
if log_format is None:
log_format = default_log_format
if log_file is not None:
handler = logging.FileHandler(filename=log_file)
logger.addHandler(handler)
if log_format == "tqdm" and not sys.stderr.isatty():
log_format = "simple"
if log_format == "json":
bar = JsonProgressBar(iterator, epoch, prefix, log_interval)
elif log_format == "none":
bar = NoopProgressBar(iterator, epoch, prefix)
elif log_format == "simple":
bar = SimpleProgressBar(iterator, epoch, prefix, log_interval)
elif log_format == "tqdm":
bar = TqdmProgressBar(iterator, epoch, prefix)
else:
raise ValueError("Unknown log format: {}".format(log_format))
if tensorboard_logdir:
try:
# [FB only] custom wrapper for TensorBoard
import palaas # noqa
from .fb_tbmf_wrapper import FbTbmfWrapper
bar = FbTbmfWrapper(bar, log_interval)
except ImportError:
bar = TensorboardProgressBarWrapper(bar, tensorboard_logdir)
if wandb_project:
bar = WandBProgressBarWrapper(bar, wandb_project, run_name=wandb_run_name)
if azureml_logging:
bar = AzureMLProgressBarWrapper(bar)
return bar
def _main(cfg: DictConfig, output_file):
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=output_file,
)
logger = logging.getLogger("speecht5.generate_speech")
utils.import_user_module(cfg.common)
assert cfg.dataset.batch_size == 1, "only support batch size 1"
logger.info(cfg)
# Fix seed for stochastic decoding
if cfg.common.seed is not None and not cfg.generation.no_seed_provided:
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
if not use_cuda:
logger.info("generate speech on cpu")
# build task
task = tasks.setup_task(cfg.task)
# Load ensemble
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
overrides = ast.literal_eval(cfg.common_eval.model_overrides)
models, saved_cfg = checkpoint_utils.load_model_ensemble(
utils.split_paths(cfg.common_eval.path),
arg_overrides=overrides,
task=task,
suffix=cfg.checkpoint.checkpoint_suffix,
strict=(cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.checkpoint.checkpoint_shard_count,
)
logger.info(saved_cfg)
# loading the dataset should happen after the checkpoint has been loaded so we can give it the saved task config
task.load_dataset(cfg.dataset.gen_subset, task_cfg=saved_cfg.task)
# optimize ensemble for generation
for model in models:
if model is None:
continue
if cfg.common.fp16:
model.half()
if use_cuda and not cfg.distributed_training.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(cfg)
# load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(cfg.dataset.gen_subset),
max_tokens=cfg.dataset.max_tokens,
max_sentences=cfg.dataset.batch_size,
max_positions=None,
ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=cfg.dataset.required_batch_size_multiple,
seed=cfg.common.seed,
num_shards=cfg.distributed_training.distributed_world_size,
shard_id=cfg.distributed_training.distributed_rank,
num_workers=cfg.dataset.num_workers,
data_buffer_size=cfg.dataset.data_buffer_size,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
)
for i, sample in enumerate(progress):
if "net_input" not in sample:
continue
sample = utils.move_to_cuda(sample) if use_cuda else sample
outs, _, attn = task.generate_speech(
models,
sample["net_input"],
)
focus_rate = _calculate_focus_rete(attn)
outs = outs.cpu().numpy()
audio_name = op.basename(sample['name'][0])
np.save(op.join(cfg.common_eval.results_path, audio_name.replace(".wav", "-feats.npy")), outs)
logging.info(
"{} (size: {}->{} ({}), focus rate: {:.3f})".format(
sample['name'][0],
sample['src_lengths'][0].item(),
outs.shape[0],
sample['dec_target_lengths'][0].item(),
focus_rate
)
)
if i < 6 and attn is not None:
import shutil
demo_dir = op.join(op.dirname(cfg.common_eval.results_path), "demo")
audio_dir = op.join(demo_dir, "audio")
os.makedirs(audio_dir, exist_ok=True)
shutil.copy(op.join(task.dataset(cfg.dataset.gen_subset).audio_root, sample['tgt_name'][0] if "tgt_name" in sample else sample['name'][0]), op.join(audio_dir, audio_name))
att_dir = op.join(demo_dir, "att_ws")
_plot_and_save(attn.cpu().numpy(), op.join(att_dir, f"{audio_name}_att_ws.png"))
spec_dir = op.join(demo_dir, "spec")
_plot_and_save(outs.T, op.join(spec_dir, f"{audio_name}_gen.png"))
_plot_and_save(sample["target"][0].cpu().numpy().T, op.join(spec_dir, f"{audio_name}_ori.png")) | null |
185,113 | import ast
import logging
import os
import os.path as op
import sys
from argparse import Namespace
import numpy as np
import torch
from fairseq import checkpoint_utils, options, tasks, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import progress_bar
from omegaconf import DictConfig
def main(cfg: DictConfig):
def cli_main():
parser = options.get_generation_parser()
args = options.parse_args_and_arch(parser)
main(args) | null |
185,137 | import datetime
import io
import os
import math
import time
import json
import argparse
import numpy as np
from pathlib import Path
from collections import defaultdict, deque
from timm.utils import get_state_dict
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch._six import inf
from torchmetrics import Metric
from tensorboardX import SummaryWriter
The provided code snippet includes necessary dependencies for implementing the `bool_flag` function. Write a Python function `def bool_flag(s)` to solve the following problem:
Parse boolean arguments from the command line.
Here is the function:
def bool_flag(s):
"""
Parse boolean arguments from the command line.
"""
FALSY_STRINGS = {"off", "false", "0"}
TRUTHY_STRINGS = {"on", "true", "1"}
if s.lower() in FALSY_STRINGS:
return False
elif s.lower() in TRUTHY_STRINGS:
return True
else:
raise argparse.ArgumentTypeError("invalid value for a boolean flag") | Parse boolean arguments from the command line. |
185,138 | import datetime
import io
import os
import math
import time
import json
import argparse
import numpy as np
from pathlib import Path
from collections import defaultdict, deque
from timm.utils import get_state_dict
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch._six import inf
from torchmetrics import Metric
from tensorboardX import SummaryWriter
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def _get_rank_env():
if "RANK" in os.environ:
return int(os.environ["RANK"])
else:
return int(os.environ['OMPI_COMM_WORLD_RANK'])
def _get_local_rank_env():
if "LOCAL_RANK" in os.environ:
return int(os.environ["LOCAL_RANK"])
else:
return int(os.environ['OMPI_COMM_WORLD_LOCAL_RANK'])
def _get_world_size_env():
if "WORLD_SIZE" in os.environ:
return int(os.environ["WORLD_SIZE"])
else:
return int(os.environ['OMPI_COMM_WORLD_SIZE'])
def init_distributed_mode(args):
if args.dist_on_itp:
args.rank = _get_rank_env()
args.world_size = _get_world_size_env() # int(os.environ['OMPI_COMM_WORLD_SIZE'])
args.gpu = _get_local_rank_env()
args.dist_url = "tcp://%s:%s" % (os.environ['MASTER_ADDR'], os.environ['MASTER_PORT'])
os.environ['LOCAL_RANK'] = str(args.gpu)
os.environ['RANK'] = str(args.rank)
os.environ['WORLD_SIZE'] = str(args.world_size)
# ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"]
elif 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}, gpu {}'.format(
args.rank, args.dist_url, args.gpu), flush=True)
torch.distributed.init_process_group(
backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank,
timeout=datetime.timedelta(0, 7200)
)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0) | null |
185,139 | import datetime
import io
import os
import math
import time
import json
import argparse
import numpy as np
from pathlib import Path
from collections import defaultdict, deque
from timm.utils import get_state_dict
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch._six import inf
from torchmetrics import Metric
from tensorboardX import SummaryWriter
def get_grad_norm_(parameters, norm_type: float = 2.0) -> torch.Tensor:
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = [p for p in parameters if p.grad is not None]
norm_type = float(norm_type)
if len(parameters) == 0:
return torch.tensor(0.)
device = parameters[0].grad.device
if norm_type == inf:
total_norm = max(p.grad.detach().abs().max().to(device) for p in parameters)
else:
total_norm = torch.norm(torch.stack([torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]), norm_type)
return total_norm | null |
185,140 | import datetime
import io
import os
import math
import time
import json
import argparse
import numpy as np
from pathlib import Path
from collections import defaultdict, deque
from timm.utils import get_state_dict
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch._six import inf
from torchmetrics import Metric
from tensorboardX import SummaryWriter
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0,
start_warmup_value=0, warmup_steps=-1, sched_type="cos"):
warmup_schedule = np.array([])
warmup_iters = warmup_epochs * niter_per_ep
if warmup_steps > 0:
warmup_iters = warmup_steps
print("Set warmup steps = %d" % warmup_iters)
if warmup_epochs > 0:
warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)
if sched_type == "cos":
iters = np.arange(epochs * niter_per_ep - warmup_iters)
schedule = np.array([
final_value + 0.5 * (base_value - final_value) * (1 + math.cos(math.pi * i / (len(iters)))) for i in iters])
elif sched_type == "linear":
schedule = np.linspace(base_value, final_value, epochs * niter_per_ep - warmup_iters)
else:
raise NotImplementedError()
schedule = np.concatenate((warmup_schedule, schedule))
assert len(schedule) == epochs * niter_per_ep
return schedule | null |
185,141 | import datetime
import io
import os
import math
import time
import json
import argparse
import numpy as np
from pathlib import Path
from collections import defaultdict, deque
from timm.utils import get_state_dict
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch._six import inf
from torchmetrics import Metric
from tensorboardX import SummaryWriter
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def save_model(args, epoch, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
if loss_scaler is not None:
checkpoint_paths = [output_dir / ('checkpoint-%s.pth' % epoch)]
for checkpoint_path in checkpoint_paths:
to_save = {
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'epoch': epoch,
'scaler': loss_scaler.state_dict(),
'args': args,
}
if model_ema is not None:
to_save['model_ema'] = get_state_dict(model_ema)
save_on_master(to_save, checkpoint_path)
else:
client_state = {'epoch': epoch, "args": args}
if model_ema is not None:
client_state['model_ema'] = get_state_dict(model_ema)
model.save_checkpoint(save_dir=args.output_dir, tag="checkpoint-%s" % epoch, client_state=client_state) | null |
185,142 | import datetime
import io
import os
import math
import time
import json
import argparse
import numpy as np
from pathlib import Path
from collections import defaultdict, deque
from timm.utils import get_state_dict
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch._six import inf
from torchmetrics import Metric
from tensorboardX import SummaryWriter
def _load_checkpoint_for_ema(model_ema, checkpoint):
"""
Workaround for ModelEma._load_checkpoint to accept an already-loaded object
"""
mem_file = io.BytesIO()
torch.save(checkpoint, mem_file)
mem_file.seek(0)
model_ema._load_checkpoint(mem_file)
def load_state_dict(model, state_dict, prefix='', ignore_missing="relative_position_index"):
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix=prefix)
warn_missing_keys = []
ignore_missing_keys = []
for key in missing_keys:
keep_flag = True
for ignore_key in ignore_missing.split('|'):
if ignore_key in key:
keep_flag = False
break
if keep_flag:
warn_missing_keys.append(key)
else:
ignore_missing_keys.append(key)
missing_keys = warn_missing_keys
if len(missing_keys) > 0:
print("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
print("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(ignore_missing_keys) > 0:
print("Ignored weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, ignore_missing_keys))
if len(error_msgs) > 0:
print('\n'.join(error_msgs))
def auto_load_model(args, model, model_without_ddp, optimizer, loss_scaler, model_ema=None):
output_dir = Path(args.output_dir)
if loss_scaler is not None:
# torch.amp
if args.auto_resume and len(args.resume) == 0:
import glob
all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*.pth'))
latest_ckpt = -1
for ckpt in all_checkpoints:
t = ckpt.split('-')[-1].split('.')[0]
if t.isdigit():
latest_ckpt = max(int(t), latest_ckpt)
if latest_ckpt >= 0:
args.resume = os.path.join(output_dir, 'checkpoint-%d.pth' % latest_ckpt)
print("Auto resume checkpoint: %s" % args.resume)
if args.resume:
if args.resume.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
args.resume, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
print("Resume checkpoint %s" % args.resume)
if 'optimizer' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
args.start_epoch = checkpoint['epoch'] + 1
if hasattr(args, 'model_ema') and args.model_ema:
_load_checkpoint_for_ema(model_ema, checkpoint['model_ema'])
if 'scaler' in checkpoint:
loss_scaler.load_state_dict(checkpoint['scaler'])
print("With optim & sched!")
else:
# deepspeed, only support '--auto_resume'.
if args.auto_resume:
import glob
all_checkpoints = glob.glob(os.path.join(output_dir, 'checkpoint-*'))
latest_ckpt = -1
for ckpt in all_checkpoints:
t = ckpt.split('-')[-1].split('.')[0]
if t.isdigit():
latest_ckpt = max(int(t), latest_ckpt)
if latest_ckpt >= 0:
args.resume = os.path.join(output_dir, 'checkpoint-%d' % latest_ckpt)
print("Auto resume checkpoint: %d" % latest_ckpt)
_, client_states = model.load_checkpoint(args.output_dir, tag='checkpoint-%d' % latest_ckpt)
args.start_epoch = client_states['epoch'] + 1
if model_ema is not None:
if args.model_ema:
_load_checkpoint_for_ema(model_ema, client_states['model_ema']) | null |
185,143 | import datetime
import io
import os
import math
import time
import json
import argparse
import numpy as np
from pathlib import Path
from collections import defaultdict, deque
from timm.utils import get_state_dict
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch._six import inf
from torchmetrics import Metric
from tensorboardX import SummaryWriter
def load_state_dict(model, state_dict, prefix='', ignore_missing="relative_position_index"):
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
load(model, prefix=prefix)
warn_missing_keys = []
ignore_missing_keys = []
for key in missing_keys:
keep_flag = True
for ignore_key in ignore_missing.split('|'):
if ignore_key in key:
keep_flag = False
break
if keep_flag:
warn_missing_keys.append(key)
else:
ignore_missing_keys.append(key)
missing_keys = warn_missing_keys
if len(missing_keys) > 0:
print("Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
print("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(ignore_missing_keys) > 0:
print("Ignored weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, ignore_missing_keys))
if len(error_msgs) > 0:
print('\n'.join(error_msgs))
def load_model_and_may_interpolate(ckpt_path, model, model_key, model_prefix):
if ckpt_path.startswith('https'):
checkpoint = torch.hub.load_state_dict_from_url(
ckpt_path, map_location='cpu', check_hash=True)
else:
checkpoint = torch.load(ckpt_path, map_location='cpu')
print("Load ckpt from %s" % ckpt_path)
checkpoint_model = None
for model_key in model_key.split('|'):
if model_key in checkpoint:
checkpoint_model = checkpoint[model_key]
print("Load state_dict by model_key = %s" % model_key)
break
if checkpoint_model is None:
checkpoint_model = checkpoint
state_dict = model.state_dict()
for k in ['head.weight', 'head.bias']:
if k in checkpoint_model and checkpoint_model[k].shape != state_dict[k].shape:
print(f"Removing key {k} from pretrained checkpoint")
del checkpoint_model[k]
# interpolate position embedding
for pos_embed_key in ("vision_pos_embed", "pos_embed", "beit3.encoder.embed_positions.A.weight"):
if pos_embed_key in checkpoint_model:
pos_embed_checkpoint = checkpoint_model[pos_embed_key]
embedding_size = pos_embed_checkpoint.shape[-1]
if pos_embed_key == "beit3.encoder.embed_positions.A.weight":
# being consistent with Fairseq, which starts from 2 for position embedding
torchscale_model = True
num_patches = model.beit3.vision_embed.num_patches
num_extra_tokens = model.beit3.vision_embed.num_position_embeddings() + 2 - num_patches
else:
torchscale_model = False
num_patches = model.patch_embed.num_patches
num_extra_tokens = getattr(model, pos_embed_key).shape[-2] - num_patches
# height (== width) for the checkpoint position embedding
orig_size = int((pos_embed_checkpoint.shape[-2] - num_extra_tokens) ** 0.5)
# height (== width) for the new position embedding
new_size = int(num_patches ** 0.5)
# class_token and dist_token are kept unchanged
if orig_size != new_size:
print("Position interpolate from %dx%d to %dx%d" % (orig_size, orig_size, new_size, new_size))
if torchscale_model:
extra_tokens = pos_embed_checkpoint[:num_extra_tokens].unsqueeze(0)
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[num_extra_tokens:]
else:
extra_tokens = pos_embed_checkpoint[:, :num_extra_tokens]
# only the position tokens are interpolated
pos_tokens = pos_embed_checkpoint[:, num_extra_tokens:]
pos_tokens = pos_tokens.reshape(-1, orig_size, orig_size, embedding_size).permute(0, 3, 1, 2)
pos_tokens = torch.nn.functional.interpolate(
pos_tokens, size=(new_size, new_size), mode='bicubic', align_corners=False)
pos_tokens = pos_tokens.permute(0, 2, 3, 1).flatten(1, 2)
new_pos_embed = torch.cat((extra_tokens, pos_tokens), dim=1)
if torchscale_model:
new_pos_embed = new_pos_embed.squeeze(0)
checkpoint_model[pos_embed_key] = new_pos_embed
load_state_dict(model, checkpoint_model, prefix=model_prefix) | null |
185,144 | import datetime
import io
import os
import math
import time
import json
import argparse
import numpy as np
from pathlib import Path
from collections import defaultdict, deque
from timm.utils import get_state_dict
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch._six import inf
from torchmetrics import Metric
from tensorboardX import SummaryWriter
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def create_ds_config(args):
args.deepspeed_config = os.path.join(args.output_dir, "deepspeed_config.json")
with open(args.deepspeed_config, mode="w") as writer:
ds_config = {
"train_batch_size": args.batch_size * args.update_freq * get_world_size(),
"train_micro_batch_size_per_gpu": args.batch_size,
"steps_per_print": 1000,
"optimizer": {
"type": "Adam",
"adam_w_mode": True,
"params": {
"lr": args.lr,
"weight_decay": args.weight_decay,
"bias_correction": True,
"betas": [
args.opt_betas[0],
args.opt_betas[1]
],
"eps": args.opt_eps
}
},
"fp16": {
"enabled": True,
"loss_scale": 0,
"initial_scale_power": getattr(args, "initial_scale_power", 12),
"loss_scale_window": 1000,
"hysteresis": 2,
"min_loss_scale": 1
},
"amp": {
"enabled": False,
"opt_level": "O2"
}
}
if args.clip_grad is not None:
ds_config.update({'gradient_clipping': args.clip_grad})
if args.zero_stage == 1:
ds_config.update({"zero_optimization": {"stage": args.zero_stage, "reduce_bucket_size": 5e8}})
elif args.zero_stage > 1:
raise NotImplementedError()
writer.write(json.dumps(ds_config, indent=2)) | null |
185,145 | import datetime
import io
import os
import math
import time
import json
import argparse
import numpy as np
from pathlib import Path
from collections import defaultdict, deque
from timm.utils import get_state_dict
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch._six import inf
from torchmetrics import Metric
from tensorboardX import SummaryWriter
class GatherLayer(torch.autograd.Function):
"""
Gather tensors from all workers with support for backward propagation:
This implementation does not cut the gradients as torch.distributed.all_gather does.
"""
def forward(ctx, x):
output = [torch.zeros_like(x) for _ in range(dist.get_world_size())]
dist.all_gather(output, x)
return tuple(output)
def backward(ctx, *grads):
all_gradients = torch.stack(grads)
dist.all_reduce(all_gradients)
return all_gradients[dist.get_rank()]
def gather_features(
image_features,
text_features,
):
gathered_image_features = GatherLayer.apply(image_features)
gathered_text_features = GatherLayer.apply(text_features)
all_image_features = torch.cat(gathered_image_features)
all_text_features = torch.cat(gathered_text_features)
return all_image_features, all_text_features | null |
185,146 | import datetime
import io
import os
import math
import time
import json
import argparse
import numpy as np
from pathlib import Path
from collections import defaultdict, deque
from timm.utils import get_state_dict
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch._six import inf
from torchmetrics import Metric
from tensorboardX import SummaryWriter
def write_result_to_jsonl(test_stats, result_file):
with open(result_file, mode="w", encoding="utf-8") as writer:
writer.write(json.dumps(test_stats, indent=None)) | null |
185,147 | import datetime
import io
import os
import math
import time
import json
import argparse
import numpy as np
from pathlib import Path
from collections import defaultdict, deque
from timm.utils import get_state_dict
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch._six import inf
from torchmetrics import Metric
from tensorboardX import SummaryWriter
def read_result_from_jsonl(result_file):
with open(result_file, mode="r", encoding="utf-8") as reader:
return json.load(reader) | null |
185,148 | import datetime
import io
import os
import math
import time
import json
import argparse
import numpy as np
from pathlib import Path
from collections import defaultdict, deque
from timm.utils import get_state_dict
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch._six import inf
from torchmetrics import Metric
from tensorboardX import SummaryWriter
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def dump_predictions(args, result, file_suffix):
global_rank = get_rank()
jsons = None
if global_rank >= 0:
output_file = os.path.join(args.task_cache_path, f"submit_{global_rank}_{file_suffix}.json")
with open(output_file, "w") as fp:
json.dump(result, fp, indent=2)
torch.distributed.barrier()
if global_rank == 0:
world_size = get_world_size()
jsons = []
for i in range(world_size):
each_file = os.path.join(args.task_cache_path, f"submit_{i}_{file_suffix}.json")
with open(each_file, "r") as fp:
jsons += json.load(fp)
new_jsons = []
res_dict = dict()
if args.task in ["coco_captioning", "nocaps"]:
qid_key = "image_id"
else:
# for VQAv2
qid_key = "question_id"
for item in jsons:
if item[qid_key] in res_dict:
continue
new_jsons.append(item)
res_dict[item[qid_key]] = item
jsons = new_jsons
torch.distributed.barrier()
os.remove(output_file)
else:
jsons = result
result_file = os.path.join(args.output_dir, f"submit_{file_suffix}.json")
if jsons is not None:
with open(result_file, "w") as fp:
json.dump(jsons, fp, indent=2)
print("Infer %d examples into %s" % (len(jsons), result_file))
return result_file | null |
185,149 | import datetime
import io
import os
import math
import time
import json
import argparse
import numpy as np
from pathlib import Path
from collections import defaultdict, deque
from timm.utils import get_state_dict
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from torch._six import inf
from torchmetrics import Metric
from tensorboardX import SummaryWriter
def coco_caption_eval(gt_dir, results_file, split):
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
from torchvision.datasets.utils import download_url
urls = {'coco_captioning_val': 'https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_val_gt.json',
'coco_captioning_test': 'https://storage.googleapis.com/sfr-vision-language-research/datasets/coco_karpathy_test_gt.json',
'nocaps_val': 'https://conversationhub.blob.core.windows.net/beit-share-public/beit3/nocaps/nocaps_val_gt.json?sv=2021-10-04&st=2023-06-08T11%3A16%3A02Z&se=2033-06-09T11%3A16%3A00Z&sr=c&sp=r&sig=N4pfCVmSeq4L4tS8QbrFVsX6f6q844eft8xSuXdxU48%3D'}
filenames = {'coco_captioning_val':'coco_karpathy_val_gt.json',
'coco_captioning_test':'coco_karpathy_test_gt.json',
'nocaps_val':'nocaps_val_gt.json'}
download_url(urls[split], gt_dir)
annotation_file = os.path.join(gt_dir, filenames[split])
# create coco object and coco_result object
coco = COCO(annotation_file)
coco_result = coco.loadRes(results_file)
# create coco_eval object by taking coco and coco_result
coco_eval = COCOEvalCap(coco, coco_result)
# evaluate results
# SPICE will take a few minutes the first time, but speeds up due to caching
coco_eval.evaluate()
res_dict = dict()
for metric, score in coco_eval.eval.items():
res_dict[metric] = score
return res_dict | null |
185,150 | from torch import optim as optim
from timm.optim.lookahead import Lookahead
import json
def get_num_layer_for_vit(var_name, num_max_layer):
if "embed" in var_name:
return 0
elif var_name in (
"cls_token", "mask_token", "pos_embed", "language_pos_embed",
"word_embeddings.weight", "vision_cls_token", "vision_pos_embed"
):
return 0
elif var_name.startswith("patch_embed"):
return 0
elif var_name.startswith("rel_pos_bias"):
return num_max_layer - 1
elif "layers." in var_name:
layer_id = int(var_name.split('layers.')[1].split('.')[0])
return layer_id + 1
else:
return num_max_layer - 1 | null |
185,153 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.registry import register_model
import numpy as np
import utils
from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config
class BEiT3ForImageClassification(BEiT3Wrapper):
def __init__(
self,
args,
num_classes,
norm_layer=nn.LayerNorm,
**kwargs
):
super(BEiT3ForImageClassification, self).__init__(args=args)
embed_dim = args.encoder_embed_dim
self.fc_norm = norm_layer(embed_dim)
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
self.fc_norm.apply(self._init_weights)
self.head.apply(self._init_weights)
init_scale = 0.001
if isinstance(self.head, nn.Linear):
self.head.weight.data.mul_(init_scale)
self.head.bias.data.mul_(init_scale)
def forward(self, image, **kwargs):
x = self.beit3(textual_tokens=None, visual_tokens=image)["encoder_out"]
t = x[:, 1:, :]
cls_x = self.fc_norm(t.mean(1))
return self.head(cls_x)
def _get_base_config(
img_size=224, patch_size=16, drop_path_rate=0,
checkpoint_activations=None, mlp_ratio=4, vocab_size=64010, **kwargs
):
return EncoderConfig(
img_size=img_size, patch_size=patch_size, vocab_size=vocab_size, multiway=True,
layernorm_embedding=False, normalize_output=True, no_output_layer=True,
drop_path_rate=drop_path_rate, encoder_embed_dim=768, encoder_attention_heads=12,
encoder_ffn_embed_dim=int(768 * mlp_ratio), encoder_layers=12,
checkpoint_activations=checkpoint_activations,
)
def beit3_base_patch16_224_imageclassification(pretrained=False, **kwargs):
args = _get_base_config(**kwargs)
args.normalize_output = False
model = BEiT3ForImageClassification(args, num_classes=1000, **kwargs)
return model | null |
185,154 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.registry import register_model
import numpy as np
import utils
from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config
class BEiT3ForImageClassification(BEiT3Wrapper):
def __init__(
self,
args,
num_classes,
norm_layer=nn.LayerNorm,
**kwargs
):
super(BEiT3ForImageClassification, self).__init__(args=args)
embed_dim = args.encoder_embed_dim
self.fc_norm = norm_layer(embed_dim)
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
self.fc_norm.apply(self._init_weights)
self.head.apply(self._init_weights)
init_scale = 0.001
if isinstance(self.head, nn.Linear):
self.head.weight.data.mul_(init_scale)
self.head.bias.data.mul_(init_scale)
def forward(self, image, **kwargs):
x = self.beit3(textual_tokens=None, visual_tokens=image)["encoder_out"]
t = x[:, 1:, :]
cls_x = self.fc_norm(t.mean(1))
return self.head(cls_x)
def _get_large_config(
img_size=224, patch_size=16, drop_path_rate=0,
checkpoint_activations=None, mlp_ratio=4, vocab_size=64010, **kwargs
):
return EncoderConfig(
img_size=img_size, patch_size=patch_size, vocab_size=vocab_size, multiway=True,
layernorm_embedding=False, normalize_output=True, no_output_layer=True,
drop_path_rate=drop_path_rate, encoder_embed_dim=1024, encoder_attention_heads=16,
encoder_ffn_embed_dim=int(1024 * mlp_ratio), encoder_layers=24,
checkpoint_activations=checkpoint_activations,
)
def beit3_large_patch16_224_imageclassification(pretrained=False, **kwargs):
args = _get_large_config(**kwargs)
args.normalize_output = False
model = BEiT3ForImageClassification(args, num_classes=1000, **kwargs)
return model | null |
185,155 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.registry import register_model
import numpy as np
import utils
from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config
class BEiT3ForVisualReasoning(BEiT3Wrapper):
def __init__(
self,
args,
num_classes,
norm_layer=nn.LayerNorm,
**kwargs
):
def forward(self, image_a, image_b, text_description, padding_mask, **kwargs):
def _get_base_config(
img_size=224, patch_size=16, drop_path_rate=0,
checkpoint_activations=None, mlp_ratio=4, vocab_size=64010, **kwargs
):
def beit3_base_patch16_224_nlvr2(pretrained=False, **kwargs):
args = _get_base_config(**kwargs)
model = BEiT3ForVisualReasoning(args, num_classes=2, **kwargs)
return model | null |
185,156 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.registry import register_model
import numpy as np
import utils
from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config
class BEiT3ForVisualReasoning(BEiT3Wrapper):
def __init__(
self,
args,
num_classes,
norm_layer=nn.LayerNorm,
**kwargs
):
super(BEiT3ForVisualReasoning, self).__init__(args=args)
embed_dim = args.encoder_embed_dim
self.head = TwoLayerMLP(
in_features=embed_dim * 4,
hidden_features=embed_dim * 2,
out_features=num_classes,
norm_layer=norm_layer,
)
init_scale = 0.001
self.head.apply(self._init_weights)
if isinstance(self.head.dense1, nn.Linear):
self.head.dense1.weight.data.mul_(init_scale)
self.head.dense1.bias.data.mul_(init_scale)
if isinstance(self.head.dense2, nn.Linear):
self.head.dense2.weight.data.mul_(init_scale)
self.head.dense2.bias.data.mul_(init_scale)
def forward(self, image_a, image_b, text_description, padding_mask, **kwargs):
bsz, _ = text_description.size()
vision_input = torch.cat((image_a, image_b), dim=0)
language_input = torch.cat((text_description, text_description), dim=0)
padding_mask = torch.cat((padding_mask, padding_mask), dim=0)
outputs = self.beit3(
textual_tokens=language_input,
visual_tokens=vision_input,
text_padding_position=padding_mask,
)
x = outputs["encoder_out"]
multiway_split_position = outputs["multiway_split_position"]
vision_cls = x[:, 0, :]
language_cls = x[:, multiway_split_position, :]
cls_rep = torch.cat((vision_cls, language_cls), dim=-1)
a, b = torch.split(cls_rep, split_size_or_sections=[bsz, bsz], dim=0)
cls_rep = torch.cat((a, b), dim=-1)
return self.head(cls_rep)
def _get_large_config(
img_size=224, patch_size=16, drop_path_rate=0,
checkpoint_activations=None, mlp_ratio=4, vocab_size=64010, **kwargs
):
return EncoderConfig(
img_size=img_size, patch_size=patch_size, vocab_size=vocab_size, multiway=True,
layernorm_embedding=False, normalize_output=True, no_output_layer=True,
drop_path_rate=drop_path_rate, encoder_embed_dim=1024, encoder_attention_heads=16,
encoder_ffn_embed_dim=int(1024 * mlp_ratio), encoder_layers=24,
checkpoint_activations=checkpoint_activations,
)
def beit3_large_patch16_224_nlvr2(pretrained=False, **kwargs):
args = _get_large_config(**kwargs)
model = BEiT3ForVisualReasoning(args, num_classes=2, **kwargs)
return model | null |
185,157 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.registry import register_model
import numpy as np
import utils
from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config
class BEiT3ForVisualQuestionAnswering(BEiT3Wrapper):
def __init__(
self,
args,
num_classes,
norm_layer=nn.LayerNorm,
**kwargs
):
super(BEiT3ForVisualQuestionAnswering, self).__init__(args=args)
embed_dim = args.encoder_embed_dim
self.pooler = Pooler(
input_features=embed_dim,
output_features=embed_dim,
norm_layer=norm_layer,
)
self.pooler.apply(self._init_weights)
self.head = nn.Sequential(
nn.Linear(embed_dim, embed_dim * 2),
norm_layer(embed_dim * 2),
nn.GELU(),
nn.Linear(embed_dim * 2, num_classes),
)
self.head.apply(self._init_weights)
def forward(self, image, question, padding_mask, **kwargs):
outputs = self.beit3(
textual_tokens=question,
visual_tokens=image,
text_padding_position=padding_mask,
)
x = outputs["encoder_out"]
cls_rep = self.pooler(x)
return self.head(cls_rep)
def _get_base_config(
img_size=224, patch_size=16, drop_path_rate=0,
checkpoint_activations=None, mlp_ratio=4, vocab_size=64010, **kwargs
):
return EncoderConfig(
img_size=img_size, patch_size=patch_size, vocab_size=vocab_size, multiway=True,
layernorm_embedding=False, normalize_output=True, no_output_layer=True,
drop_path_rate=drop_path_rate, encoder_embed_dim=768, encoder_attention_heads=12,
encoder_ffn_embed_dim=int(768 * mlp_ratio), encoder_layers=12,
checkpoint_activations=checkpoint_activations,
)
def beit3_base_patch16_384_vqav2(pretrained=False, **kwargs):
args = _get_base_config(img_size=384, **kwargs)
args.normalize_output = False
model = BEiT3ForVisualQuestionAnswering(args, num_classes=3129, **kwargs)
return model | null |
185,158 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.registry import register_model
import numpy as np
import utils
from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config
class BEiT3ForVisualQuestionAnswering(BEiT3Wrapper):
def __init__(
self,
args,
num_classes,
norm_layer=nn.LayerNorm,
**kwargs
):
super(BEiT3ForVisualQuestionAnswering, self).__init__(args=args)
embed_dim = args.encoder_embed_dim
self.pooler = Pooler(
input_features=embed_dim,
output_features=embed_dim,
norm_layer=norm_layer,
)
self.pooler.apply(self._init_weights)
self.head = nn.Sequential(
nn.Linear(embed_dim, embed_dim * 2),
norm_layer(embed_dim * 2),
nn.GELU(),
nn.Linear(embed_dim * 2, num_classes),
)
self.head.apply(self._init_weights)
def forward(self, image, question, padding_mask, **kwargs):
outputs = self.beit3(
textual_tokens=question,
visual_tokens=image,
text_padding_position=padding_mask,
)
x = outputs["encoder_out"]
cls_rep = self.pooler(x)
return self.head(cls_rep)
def _get_base_config(
img_size=224, patch_size=16, drop_path_rate=0,
checkpoint_activations=None, mlp_ratio=4, vocab_size=64010, **kwargs
):
return EncoderConfig(
img_size=img_size, patch_size=patch_size, vocab_size=vocab_size, multiway=True,
layernorm_embedding=False, normalize_output=True, no_output_layer=True,
drop_path_rate=drop_path_rate, encoder_embed_dim=768, encoder_attention_heads=12,
encoder_ffn_embed_dim=int(768 * mlp_ratio), encoder_layers=12,
checkpoint_activations=checkpoint_activations,
)
def beit3_base_patch16_480_vqav2(pretrained=False, **kwargs):
args = _get_base_config(img_size=480, **kwargs)
args.normalize_output = False
model = BEiT3ForVisualQuestionAnswering(args, num_classes=3129, **kwargs)
return model | null |
185,159 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.registry import register_model
import numpy as np
import utils
from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config
class BEiT3ForVisualQuestionAnswering(BEiT3Wrapper):
def __init__(
self,
args,
num_classes,
norm_layer=nn.LayerNorm,
**kwargs
):
super(BEiT3ForVisualQuestionAnswering, self).__init__(args=args)
embed_dim = args.encoder_embed_dim
self.pooler = Pooler(
input_features=embed_dim,
output_features=embed_dim,
norm_layer=norm_layer,
)
self.pooler.apply(self._init_weights)
self.head = nn.Sequential(
nn.Linear(embed_dim, embed_dim * 2),
norm_layer(embed_dim * 2),
nn.GELU(),
nn.Linear(embed_dim * 2, num_classes),
)
self.head.apply(self._init_weights)
def forward(self, image, question, padding_mask, **kwargs):
outputs = self.beit3(
textual_tokens=question,
visual_tokens=image,
text_padding_position=padding_mask,
)
x = outputs["encoder_out"]
cls_rep = self.pooler(x)
return self.head(cls_rep)
def _get_large_config(
img_size=224, patch_size=16, drop_path_rate=0,
checkpoint_activations=None, mlp_ratio=4, vocab_size=64010, **kwargs
):
return EncoderConfig(
img_size=img_size, patch_size=patch_size, vocab_size=vocab_size, multiway=True,
layernorm_embedding=False, normalize_output=True, no_output_layer=True,
drop_path_rate=drop_path_rate, encoder_embed_dim=1024, encoder_attention_heads=16,
encoder_ffn_embed_dim=int(1024 * mlp_ratio), encoder_layers=24,
checkpoint_activations=checkpoint_activations,
)
def beit3_large_patch16_384_vqav2(pretrained=False, **kwargs):
args = _get_large_config(img_size=384, **kwargs)
args.normalize_output = False
model = BEiT3ForVisualQuestionAnswering(args, num_classes=3129, **kwargs)
return model | null |
185,160 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.registry import register_model
import numpy as np
import utils
from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config
class BEiT3ForVisualQuestionAnswering(BEiT3Wrapper):
def __init__(
self,
args,
num_classes,
norm_layer=nn.LayerNorm,
**kwargs
):
def forward(self, image, question, padding_mask, **kwargs):
def _get_large_config(
img_size=224, patch_size=16, drop_path_rate=0,
checkpoint_activations=None, mlp_ratio=4, vocab_size=64010, **kwargs
):
def beit3_large_patch16_480_vqav2(pretrained=False, **kwargs):
args = _get_large_config(img_size=480, **kwargs)
args.normalize_output = False
model = BEiT3ForVisualQuestionAnswering(args, num_classes=3129, **kwargs)
return model | null |
185,161 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.registry import register_model
import numpy as np
import utils
from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config
class BEiT3ForVisualQuestionAnswering(BEiT3Wrapper):
def __init__(
self,
args,
num_classes,
norm_layer=nn.LayerNorm,
**kwargs
):
super(BEiT3ForVisualQuestionAnswering, self).__init__(args=args)
embed_dim = args.encoder_embed_dim
self.pooler = Pooler(
input_features=embed_dim,
output_features=embed_dim,
norm_layer=norm_layer,
)
self.pooler.apply(self._init_weights)
self.head = nn.Sequential(
nn.Linear(embed_dim, embed_dim * 2),
norm_layer(embed_dim * 2),
nn.GELU(),
nn.Linear(embed_dim * 2, num_classes),
)
self.head.apply(self._init_weights)
def forward(self, image, question, padding_mask, **kwargs):
outputs = self.beit3(
textual_tokens=question,
visual_tokens=image,
text_padding_position=padding_mask,
)
x = outputs["encoder_out"]
cls_rep = self.pooler(x)
return self.head(cls_rep)
def _get_large_config(
img_size=224, patch_size=16, drop_path_rate=0,
checkpoint_activations=None, mlp_ratio=4, vocab_size=64010, **kwargs
):
return EncoderConfig(
img_size=img_size, patch_size=patch_size, vocab_size=vocab_size, multiway=True,
layernorm_embedding=False, normalize_output=True, no_output_layer=True,
drop_path_rate=drop_path_rate, encoder_embed_dim=1024, encoder_attention_heads=16,
encoder_ffn_embed_dim=int(1024 * mlp_ratio), encoder_layers=24,
checkpoint_activations=checkpoint_activations,
)
def beit3_large_patch16_768_vqav2(pretrained=False, **kwargs):
args = _get_large_config(img_size=768, **kwargs)
args.normalize_output = False
model = BEiT3ForVisualQuestionAnswering(args, num_classes=3129, **kwargs)
return model | null |
185,162 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.registry import register_model
import numpy as np
import utils
from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config
class BEiT3ForCaptioning(BEiT3Wrapper):
def __init__(
self,
args,
**kwargs
):
super(BEiT3ForCaptioning, self).__init__(args=args)
embed_dim = args.encoder_embed_dim
self.mlm_head = nn.Linear(embed_dim, args.vocab_size)
self.mlm_head.apply(self._init_weights)
def forward(self, image, text_ids, padding_mask, language_masked_pos, text_len=None, incremental_state=None, **kwargs):
text_len = text_len if text_len is not None else text_ids.size(1)
image_len = self.beit3.vision_embed.num_position_embeddings()
max_len = text_len + image_len
uni_mask = torch.zeros((max_len, max_len), dtype=torch.long, device=text_ids.device)
i_start, i_end = 0, image_len
t_start, t_end = image_len, max_len
# triangle mask for caption to caption
uni_mask[t_start:t_end, t_start:t_end] = torch.tril(torch.ones(text_len, text_len, dtype=torch.long, device=text_ids.device))
# full attention for caption to image
uni_mask[t_start:t_end, i_start:i_end] = 1
# full attention for image to image
uni_mask[i_start:i_end, i_start:i_end] = 1
uni_mask = 1-uni_mask
if incremental_state is not None:
for idx in range(self.get_num_layers()):
if idx not in incremental_state:
incremental_state[idx] = {}
# for incremental decoding
positions = None
if image is None:
uni_mask = uni_mask[-2:]
padding_mask = None
# start position (2 (fairseq starts at 2) + cur_position) is equal to text_len
positions = torch.arange(text_len, text_ids.size(1) + text_len, device=text_ids.device).long().unsqueeze(0)
outputs = self.beit3(
textual_tokens=text_ids,
visual_tokens=image,
text_padding_position=padding_mask,
attn_mask=uni_mask,
incremental_state=incremental_state,
positions=positions,
)
if image is not None:
text_feats = outputs["encoder_out"][:, image_len:]
else:
text_feats = outputs["encoder_out"]
if language_masked_pos is not None:
text_feats = text_feats[language_masked_pos.bool()]
return self.mlm_head(text_feats), incremental_state
def _get_base_config(
img_size=224, patch_size=16, drop_path_rate=0,
checkpoint_activations=None, mlp_ratio=4, vocab_size=64010, **kwargs
):
return EncoderConfig(
img_size=img_size, patch_size=patch_size, vocab_size=vocab_size, multiway=True,
layernorm_embedding=False, normalize_output=True, no_output_layer=True,
drop_path_rate=drop_path_rate, encoder_embed_dim=768, encoder_attention_heads=12,
encoder_ffn_embed_dim=int(768 * mlp_ratio), encoder_layers=12,
checkpoint_activations=checkpoint_activations,
)
def beit3_base_patch16_224_captioning(pretrained=False, **kwargs):
args = _get_base_config(**kwargs)
model = BEiT3ForCaptioning(args, **kwargs)
return model | null |
185,163 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.registry import register_model
import numpy as np
import utils
from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config
class BEiT3ForCaptioning(BEiT3Wrapper):
def __init__(
self,
args,
**kwargs
):
super(BEiT3ForCaptioning, self).__init__(args=args)
embed_dim = args.encoder_embed_dim
self.mlm_head = nn.Linear(embed_dim, args.vocab_size)
self.mlm_head.apply(self._init_weights)
def forward(self, image, text_ids, padding_mask, language_masked_pos, text_len=None, incremental_state=None, **kwargs):
text_len = text_len if text_len is not None else text_ids.size(1)
image_len = self.beit3.vision_embed.num_position_embeddings()
max_len = text_len + image_len
uni_mask = torch.zeros((max_len, max_len), dtype=torch.long, device=text_ids.device)
i_start, i_end = 0, image_len
t_start, t_end = image_len, max_len
# triangle mask for caption to caption
uni_mask[t_start:t_end, t_start:t_end] = torch.tril(torch.ones(text_len, text_len, dtype=torch.long, device=text_ids.device))
# full attention for caption to image
uni_mask[t_start:t_end, i_start:i_end] = 1
# full attention for image to image
uni_mask[i_start:i_end, i_start:i_end] = 1
uni_mask = 1-uni_mask
if incremental_state is not None:
for idx in range(self.get_num_layers()):
if idx not in incremental_state:
incremental_state[idx] = {}
# for incremental decoding
positions = None
if image is None:
uni_mask = uni_mask[-2:]
padding_mask = None
# start position (2 (fairseq starts at 2) + cur_position) is equal to text_len
positions = torch.arange(text_len, text_ids.size(1) + text_len, device=text_ids.device).long().unsqueeze(0)
outputs = self.beit3(
textual_tokens=text_ids,
visual_tokens=image,
text_padding_position=padding_mask,
attn_mask=uni_mask,
incremental_state=incremental_state,
positions=positions,
)
if image is not None:
text_feats = outputs["encoder_out"][:, image_len:]
else:
text_feats = outputs["encoder_out"]
if language_masked_pos is not None:
text_feats = text_feats[language_masked_pos.bool()]
return self.mlm_head(text_feats), incremental_state
def _get_base_config(
img_size=224, patch_size=16, drop_path_rate=0,
checkpoint_activations=None, mlp_ratio=4, vocab_size=64010, **kwargs
):
return EncoderConfig(
img_size=img_size, patch_size=patch_size, vocab_size=vocab_size, multiway=True,
layernorm_embedding=False, normalize_output=True, no_output_layer=True,
drop_path_rate=drop_path_rate, encoder_embed_dim=768, encoder_attention_heads=12,
encoder_ffn_embed_dim=int(768 * mlp_ratio), encoder_layers=12,
checkpoint_activations=checkpoint_activations,
)
def beit3_base_patch16_480_captioning(pretrained=False, **kwargs):
args = _get_base_config(img_size=480, **kwargs)
model = BEiT3ForCaptioning(args, **kwargs)
return model | null |
185,164 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.registry import register_model
import numpy as np
import utils
from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config
class BEiT3ForCaptioning(BEiT3Wrapper):
def __init__(
self,
args,
**kwargs
):
def forward(self, image, text_ids, padding_mask, language_masked_pos, text_len=None, incremental_state=None, **kwargs):
def _get_large_config(
img_size=224, patch_size=16, drop_path_rate=0,
checkpoint_activations=None, mlp_ratio=4, vocab_size=64010, **kwargs
):
def beit3_large_patch16_480_captioning(pretrained=False, **kwargs):
args = _get_large_config(img_size=480, **kwargs)
model = BEiT3ForCaptioning(args, **kwargs)
return model | null |
185,165 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.registry import register_model
import numpy as np
import utils
from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config
class BEiT3ForRetrieval(BEiT3Wrapper):
def __init__(
self,
args,
**kwargs
):
super(BEiT3ForRetrieval, self).__init__(args=args)
embed_dim = args.encoder_embed_dim
self.language_head = nn.Linear(embed_dim, embed_dim, bias=False)
self.vision_head = nn.Linear(embed_dim, embed_dim, bias=False)
self.language_head.apply(self._init_weights)
self.vision_head.apply(self._init_weights)
self.criterion = utils.ClipLoss(
rank=utils.get_rank(),
world_size=utils.get_world_size(),
)
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
def forward(self, image=None, text_description=None, padding_mask=None, only_infer=False, **kwargs):
if image is not None:
outputs = self.beit3(
textual_tokens=None,
visual_tokens=image,
text_padding_position=None,
)
x = outputs["encoder_out"]
vision_cls = self.vision_head(x[:, 0, :])
vision_cls = F.normalize(vision_cls, dim=-1)
else:
vision_cls = None
if text_description is not None:
outputs = self.beit3(
textual_tokens=text_description,
visual_tokens=None,
text_padding_position=padding_mask,
)
x = outputs["encoder_out"]
language_cls = self.language_head(x[:, 0, :])
language_cls = F.normalize(language_cls, dim=-1)
else:
language_cls = None
if only_infer:
return vision_cls, language_cls
else:
loss, logits_per_image, logits_per_text = self.criterion(
vision_cls, language_cls, self.logit_scale.exp())
return loss, vision_cls, language_cls
def _get_base_config(
img_size=224, patch_size=16, drop_path_rate=0,
checkpoint_activations=None, mlp_ratio=4, vocab_size=64010, **kwargs
):
return EncoderConfig(
img_size=img_size, patch_size=patch_size, vocab_size=vocab_size, multiway=True,
layernorm_embedding=False, normalize_output=True, no_output_layer=True,
drop_path_rate=drop_path_rate, encoder_embed_dim=768, encoder_attention_heads=12,
encoder_ffn_embed_dim=int(768 * mlp_ratio), encoder_layers=12,
checkpoint_activations=checkpoint_activations,
)
def beit3_base_patch16_224_retrieval(pretrained=False, **kwargs):
args = _get_base_config(**kwargs)
model = BEiT3ForRetrieval(args, **kwargs)
return model | null |
185,166 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.registry import register_model
import numpy as np
import utils
from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config
class BEiT3ForRetrieval(BEiT3Wrapper):
def __init__(
self,
args,
**kwargs
):
super(BEiT3ForRetrieval, self).__init__(args=args)
embed_dim = args.encoder_embed_dim
self.language_head = nn.Linear(embed_dim, embed_dim, bias=False)
self.vision_head = nn.Linear(embed_dim, embed_dim, bias=False)
self.language_head.apply(self._init_weights)
self.vision_head.apply(self._init_weights)
self.criterion = utils.ClipLoss(
rank=utils.get_rank(),
world_size=utils.get_world_size(),
)
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
def forward(self, image=None, text_description=None, padding_mask=None, only_infer=False, **kwargs):
if image is not None:
outputs = self.beit3(
textual_tokens=None,
visual_tokens=image,
text_padding_position=None,
)
x = outputs["encoder_out"]
vision_cls = self.vision_head(x[:, 0, :])
vision_cls = F.normalize(vision_cls, dim=-1)
else:
vision_cls = None
if text_description is not None:
outputs = self.beit3(
textual_tokens=text_description,
visual_tokens=None,
text_padding_position=padding_mask,
)
x = outputs["encoder_out"]
language_cls = self.language_head(x[:, 0, :])
language_cls = F.normalize(language_cls, dim=-1)
else:
language_cls = None
if only_infer:
return vision_cls, language_cls
else:
loss, logits_per_image, logits_per_text = self.criterion(
vision_cls, language_cls, self.logit_scale.exp())
return loss, vision_cls, language_cls
def _get_base_config(
img_size=224, patch_size=16, drop_path_rate=0,
checkpoint_activations=None, mlp_ratio=4, vocab_size=64010, **kwargs
):
return EncoderConfig(
img_size=img_size, patch_size=patch_size, vocab_size=vocab_size, multiway=True,
layernorm_embedding=False, normalize_output=True, no_output_layer=True,
drop_path_rate=drop_path_rate, encoder_embed_dim=768, encoder_attention_heads=12,
encoder_ffn_embed_dim=int(768 * mlp_ratio), encoder_layers=12,
checkpoint_activations=checkpoint_activations,
)
def beit3_base_patch16_384_retrieval(pretrained=False, **kwargs):
args = _get_base_config(img_size=384, **kwargs)
model = BEiT3ForRetrieval(args, **kwargs)
return model | null |
185,167 | import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.models.registry import register_model
import numpy as np
import utils
from modeling_utils import BEiT3Wrapper, _get_base_config, _get_large_config
class BEiT3ForRetrieval(BEiT3Wrapper):
def __init__(
self,
args,
**kwargs
):
super(BEiT3ForRetrieval, self).__init__(args=args)
embed_dim = args.encoder_embed_dim
self.language_head = nn.Linear(embed_dim, embed_dim, bias=False)
self.vision_head = nn.Linear(embed_dim, embed_dim, bias=False)
self.language_head.apply(self._init_weights)
self.vision_head.apply(self._init_weights)
self.criterion = utils.ClipLoss(
rank=utils.get_rank(),
world_size=utils.get_world_size(),
)
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
def forward(self, image=None, text_description=None, padding_mask=None, only_infer=False, **kwargs):
if image is not None:
outputs = self.beit3(
textual_tokens=None,
visual_tokens=image,
text_padding_position=None,
)
x = outputs["encoder_out"]
vision_cls = self.vision_head(x[:, 0, :])
vision_cls = F.normalize(vision_cls, dim=-1)
else:
vision_cls = None
if text_description is not None:
outputs = self.beit3(
textual_tokens=text_description,
visual_tokens=None,
text_padding_position=padding_mask,
)
x = outputs["encoder_out"]
language_cls = self.language_head(x[:, 0, :])
language_cls = F.normalize(language_cls, dim=-1)
else:
language_cls = None
if only_infer:
return vision_cls, language_cls
else:
loss, logits_per_image, logits_per_text = self.criterion(
vision_cls, language_cls, self.logit_scale.exp())
return loss, vision_cls, language_cls
def _get_large_config(
img_size=224, patch_size=16, drop_path_rate=0,
checkpoint_activations=None, mlp_ratio=4, vocab_size=64010, **kwargs
):
return EncoderConfig(
img_size=img_size, patch_size=patch_size, vocab_size=vocab_size, multiway=True,
layernorm_embedding=False, normalize_output=True, no_output_layer=True,
drop_path_rate=drop_path_rate, encoder_embed_dim=1024, encoder_attention_heads=16,
encoder_ffn_embed_dim=int(1024 * mlp_ratio), encoder_layers=24,
checkpoint_activations=checkpoint_activations,
)
def beit3_large_patch16_384_retrieval(pretrained=False, **kwargs):
args = _get_large_config(img_size=384, **kwargs)
model = BEiT3ForRetrieval(args, **kwargs)
return model | null |
185,168 | import re
contractions = {
"aint": "ain't",
"arent": "aren't",
"cant": "can't",
"couldve": "could've",
"couldnt": "couldn't",
"couldn'tve": "couldn't've",
"couldnt've": "couldn't've",
"didnt": "didn't",
"doesnt": "doesn't",
"dont": "don't",
"hadnt": "hadn't",
"hadnt've": "hadn't've",
"hadn'tve": "hadn't've",
"hasnt": "hasn't",
"havent": "haven't",
"hed": "he'd",
"hed've": "he'd've",
"he'dve": "he'd've",
"hes": "he's",
"howd": "how'd",
"howll": "how'll",
"hows": "how's",
"Id've": "I'd've",
"I'dve": "I'd've",
"Im": "I'm",
"Ive": "I've",
"isnt": "isn't",
"itd": "it'd",
"itd've": "it'd've",
"it'dve": "it'd've",
"itll": "it'll",
"let's": "let's",
"maam": "ma'am",
"mightnt": "mightn't",
"mightnt've": "mightn't've",
"mightn'tve": "mightn't've",
"mightve": "might've",
"mustnt": "mustn't",
"mustve": "must've",
"neednt": "needn't",
"notve": "not've",
"oclock": "o'clock",
"oughtnt": "oughtn't",
"ow's'at": "'ow's'at",
"'ows'at": "'ow's'at",
"'ow'sat": "'ow's'at",
"shant": "shan't",
"shed've": "she'd've",
"she'dve": "she'd've",
"she's": "she's",
"shouldve": "should've",
"shouldnt": "shouldn't",
"shouldnt've": "shouldn't've",
"shouldn'tve": "shouldn't've",
"somebody'd": "somebodyd",
"somebodyd've": "somebody'd've",
"somebody'dve": "somebody'd've",
"somebodyll": "somebody'll",
"somebodys": "somebody's",
"someoned": "someone'd",
"someoned've": "someone'd've",
"someone'dve": "someone'd've",
"someonell": "someone'll",
"someones": "someone's",
"somethingd": "something'd",
"somethingd've": "something'd've",
"something'dve": "something'd've",
"somethingll": "something'll",
"thats": "that's",
"thered": "there'd",
"thered've": "there'd've",
"there'dve": "there'd've",
"therere": "there're",
"theres": "there's",
"theyd": "they'd",
"theyd've": "they'd've",
"they'dve": "they'd've",
"theyll": "they'll",
"theyre": "they're",
"theyve": "they've",
"twas": "'twas",
"wasnt": "wasn't",
"wed've": "we'd've",
"we'dve": "we'd've",
"weve": "we've",
"werent": "weren't",
"whatll": "what'll",
"whatre": "what're",
"whats": "what's",
"whatve": "what've",
"whens": "when's",
"whered": "where'd",
"wheres": "where's",
"whereve": "where've",
"whod": "who'd",
"whod've": "who'd've",
"who'dve": "who'd've",
"wholl": "who'll",
"whos": "who's",
"whove": "who've",
"whyll": "why'll",
"whyre": "why're",
"whys": "why's",
"wont": "won't",
"wouldve": "would've",
"wouldnt": "wouldn't",
"wouldnt've": "wouldn't've",
"wouldn'tve": "wouldn't've",
"yall": "y'all",
"yall'll": "y'all'll",
"y'allll": "y'all'll",
"yall'd've": "y'all'd've",
"y'alld've": "y'all'd've",
"y'all'dve": "y'all'd've",
"youd": "you'd",
"youd've": "you'd've",
"you'dve": "you'd've",
"youll": "you'll",
"youre": "you're",
"youve": "you've",
}
manual_map = {
"none": "0",
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9",
"ten": "10",
}
articles = ["a", "an", "the"]
period_strip = re.compile("(?!<=\d)(\.)(?!\d)")
comma_strip = re.compile("(\d)(\,)(\d)")
punct = [
";",
r"/",
"[",
"]",
'"',
"{",
"}",
"(",
")",
"=",
"+",
"\\",
"_",
"-",
">",
"<",
"@",
"`",
",",
"?",
"!",
]
def normalize_word(token):
_token = token
for p in punct:
if (p + " " in token or " " + p in token) or (
re.search(comma_strip, token) != None
):
_token = _token.replace(p, "")
else:
_token = _token.replace(p, " ")
token = period_strip.sub("", _token, re.UNICODE)
_token = []
temp = token.lower().split()
for word in temp:
word = manual_map.setdefault(word, word)
if word not in articles:
_token.append(word)
for i, word in enumerate(_token):
if word in contractions:
_token[i] = contractions[word]
token = " ".join(_token)
token = token.replace(",", "")
return token | null |
185,169 | import os
import json
import random
import torch
import glob
from collections import defaultdict, Counter
from torchvision import transforms
from torchvision.datasets.folder import default_loader
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.data.transforms import RandomResizedCropAndInterpolation
from timm.data import create_transform
import utils
from glossary import normalize_word
from randaug import RandomAugment
def _write_data_into_jsonl(items, jsonl_file):
with open(jsonl_file, mode="w", encoding="utf-8") as writer:
for data in items:
writer.write(json.dumps(data, indent=None))
writer.write('\n')
print("Write %s with %d items !" % (jsonl_file, len(items)))
def _make_retrieval_coco_karpathy_dataset_index(
data_path,
tokenizer,
split=("train", "restval"),
split_name="train",
):
coco_karpathy_split_json_file = os.path.join(data_path, "dataset_coco.json")
items = []
image_counter = set()
print("read %s" % coco_karpathy_split_json_file)
with open(coco_karpathy_split_json_file, mode="r", encoding="utf-8") as reader:
data = json.loads(reader.read())
for item in data["images"]:
if item["split"] in split:
image_path = os.path.join(item["filepath"], item["filename"])
for sent in item["sentences"]:
tokens = tokenizer.tokenize(sent["raw"])
token_ids = tokenizer.convert_tokens_to_ids(tokens)
items.append({
"image_path": image_path,
"text_segment": token_ids,
"image_id": len(image_counter),
})
if image_path not in image_counter:
image_counter.add(image_path)
print("Find %d images and %d image-text pairs for karpathy dataset %s split !" % \
(len(image_counter), len(items), split_name))
index_file = os.path.join(data_path, "coco_retrieval.%s.jsonl" % split_name)
_write_data_into_jsonl(items, index_file)
pass | null |
185,170 | import os
import json
import random
import torch
import glob
from collections import defaultdict, Counter
from torchvision import transforms
from torchvision.datasets.folder import default_loader
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.data.transforms import RandomResizedCropAndInterpolation
from timm.data import create_transform
import utils
from glossary import normalize_word
from randaug import RandomAugment
def _write_data_into_jsonl(items, jsonl_file):
with open(jsonl_file, mode="w", encoding="utf-8") as writer:
for data in items:
writer.write(json.dumps(data, indent=None))
writer.write('\n')
print("Write %s with %d items !" % (jsonl_file, len(items)))
def _make_captioning_coco_karpathy_dataset_index(
data_path,
tokenizer,
split=("train", "restval"),
split_name="train",
):
coco_karpathy_split_json_file = os.path.join(data_path, "dataset_coco.json")
items = []
image_counter = set()
print("read %s" % coco_karpathy_split_json_file)
with open(coco_karpathy_split_json_file, mode="r", encoding="utf-8") as reader:
data = json.loads(reader.read())
for item in data["images"]:
if item["split"] in split:
image_path = os.path.join(item["filepath"], item["filename"])
if item["split"] in ["train", "restval"]:
for sent in item["sentences"]:
tokens = tokenizer.tokenize(sent["raw"])
token_ids = tokenizer.convert_tokens_to_ids(tokens)
items.append({
"image_path": image_path,
"text_segment": token_ids,
"image_id": item["cocoid"],
})
else:
items.append({
"image_path": image_path,
"text_segment": None,
"image_id": item["cocoid"],
})
if image_path not in image_counter:
image_counter.add(image_path)
print("Find %d images and %d image-text pairs for karpathy dataset %s split !" % \
(len(image_counter), len(items), split_name))
index_file = os.path.join(data_path, "coco_captioning.%s.jsonl" % split_name)
_write_data_into_jsonl(items, index_file)
pass | null |
185,171 | import os
import json
import random
import torch
import glob
from collections import defaultdict, Counter
from torchvision import transforms
from torchvision.datasets.folder import default_loader
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.data.transforms import RandomResizedCropAndInterpolation
from timm.data import create_transform
import utils
from glossary import normalize_word
from randaug import RandomAugment
def _write_data_into_jsonl(items, jsonl_file):
def _make_nocaps_dataset_index(
data_path,
split="val",
):
if split == "val":
json_file = "nocaps_val_4500_captions.json"
elif split == "test":
json_file = "nocaps_test_image_info.json"
nocaps_split_json_file = os.path.join(data_path, json_file)
items = []
image_counter = set()
print("read %s" % nocaps_split_json_file)
with open(nocaps_split_json_file, mode="r", encoding="utf-8") as reader:
data = json.loads(reader.read())
for item in data["images"]:
image_path = os.path.join(split, item["file_name"])
items.append({
"image_path": image_path,
"text_segment": None,
"image_id": item["id"],
})
if image_path not in image_counter:
image_counter.add(image_path)
print("Find %d images and %d image-text pairs for nocaps dataset %s split !" % \
(len(image_counter), len(items), split))
index_file = os.path.join(data_path, "nocaps.%s.jsonl" % split)
_write_data_into_jsonl(items, index_file) | null |
185,172 | import os
import json
import random
import torch
import glob
from collections import defaultdict, Counter
from torchvision import transforms
from torchvision.datasets.folder import default_loader
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.data.transforms import RandomResizedCropAndInterpolation
from timm.data import create_transform
import utils
from glossary import normalize_word
from randaug import RandomAugment
def create_dataset_by_split(args, split, is_train=True):
transform = build_transform(is_train=is_train, args=args)
dataset_class = task2dataset[args.task]
tokenizer = get_sentencepiece_model_for_beit3(args)
opt_kwargs = {}
if args.task in ["coco_captioning", "nocaps"]:
opt_kwargs["mask_prob"] = args.captioning_mask_prob
dataset = dataset_class(
data_path=args.data_path, split=split,
transform=transform, tokenizer=tokenizer,
num_max_bpe_tokens=args.num_max_bpe_tokens,
task=args.task, **opt_kwargs,
)
if is_train:
batch_size = args.batch_size
elif hasattr(args, "eval_batch_size") and args.eval_batch_size is not None:
batch_size = args.eval_batch_size
else:
batch_size = int(args.batch_size * 1.5)
return create_dataloader(
dataset, is_train=is_train, batch_size=batch_size,
num_workers=args.num_workers, pin_mem=args.pin_mem, dist_eval=args.dist_eval,
)
def create_downstream_dataset(args, is_eval=False):
if is_eval:
return create_dataset_by_split(args, split="test", is_train=False)
else:
return \
create_dataset_by_split(args, split="train", is_train=True), \
create_dataset_by_split(args, split="val", is_train=True) | null |
185,173 | import math
import sys
import json
from typing import Iterable, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.utils import ModelEma
from timm.utils import accuracy, ModelEma
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from datasets import get_sentencepiece_model_for_beit3
import utils
class NLVR2Handler(TaskHandler):
def __init__(self) -> None:
super().__init__()
self.criterion = torch.nn.CrossEntropyLoss()
def train_batch(self, model, image, image2, language_tokens, padding_mask, label):
logits = model(
image_a=image, image_b=image2,
text_description=language_tokens,
padding_mask=padding_mask)
acc = (logits.max(-1)[-1] == label).float().mean()
return {
"loss": self.criterion(input=logits, target=label),
"acc": acc,
}
def eval_batch(self, model, image, image2, language_tokens, padding_mask, label):
logits = model(
image_a=image, image_b=image2,
text_description=language_tokens,
padding_mask=padding_mask)
batch_size = language_tokens.shape[0]
acc = (logits.max(-1)[-1] == label).float().sum(0) * 100.0 / batch_size
self.metric_logger.meters['acc'].update(acc.item(), n=batch_size)
def after_eval(self, **kwargs):
print('* Acc {acc.global_avg:.3f}'.format(acc=self.metric_logger.acc))
return {k: meter.global_avg for k, meter in self.metric_logger.meters.items()}, "acc"
class ImageNetHandler(TaskHandler):
def __init__(self, args) -> None:
super().__init__()
mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None
if mixup_active:
# smoothing is handled with mixup label transform
self.criterion = SoftTargetCrossEntropy()
elif args.label_smoothing > 0.:
self.criterion = LabelSmoothingCrossEntropy(smoothing=args.label_smoothing)
else:
self.criterion = torch.nn.CrossEntropyLoss()
def train_batch(self, model, image, label):
logits = model(image=image)
return {
"loss": self.criterion(logits, label),
}
def eval_batch(self, model, image, label):
logits = model(image=image)
batch_size = image.shape[0]
acc1, acc5 = accuracy(logits, label, topk=(1, 5))
self.metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
self.metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
def after_eval(self, **kwargs):
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f}'
.format(top1=self.metric_logger.acc1, top5=self.metric_logger.acc5))
return {k: meter.global_avg for k, meter in self.metric_logger.meters.items()}, "acc1"
class RetrievalHandler(TaskHandler):
def __init__(self) -> None:
super().__init__()
self.image_feats = []
self.text_feats = []
self.image_ids = []
self.metric_logger = None
def train_batch(self, model, image, language_tokens, padding_mask, image_id):
loss, vision_cls, language_cls = model(
image=image, text_description=language_tokens, padding_mask=padding_mask)
return {
"loss": loss,
}
def before_eval(self, metric_logger, **kwargs):
self.image_feats.clear()
self.text_feats.clear()
self.image_ids.clear()
self.metric_logger = metric_logger
def eval_batch(self, model, image, language_tokens, padding_mask, image_id):
vision_cls, _ = model(image=image, only_infer=True)
_, language_cls = model(
text_description=language_tokens, padding_mask=padding_mask, only_infer=True)
self.image_feats.append(vision_cls.clone())
self.text_feats.append(language_cls.clone())
self.image_ids.append(image_id.clone())
def after_eval(self, **kwargs):
image_feats = {}
for feats, ids in zip(self.image_feats, self.image_ids):
for i, _idx in enumerate(ids):
idx = _idx.item()
if idx not in image_feats:
image_feats[idx] = feats[i]
tiids = torch.cat(self.image_ids, dim=0)
iids = []
sorted_tensors = []
for key in sorted(image_feats.keys()):
sorted_tensors.append(image_feats[key].view(1, -1))
iids.append(key)
image_cls_feats = torch.cat(sorted_tensors, dim=0)
text_cls_feats = torch.cat(self.text_feats, dim=0)
scores = image_cls_feats @ text_cls_feats.t()
iids = torch.LongTensor(iids).to(scores.device)
print("scores: {}".format(scores.size()))
print("iids: {}".format(iids.size()))
print("tiids: {}".format(tiids.size()))
topk10 = scores.topk(10, dim=1)
topk5 = scores.topk(5, dim=1)
topk1 = scores.topk(1, dim=1)
topk10_iids = tiids[topk10.indices]
topk5_iids = tiids[topk5.indices]
topk1_iids = tiids[topk1.indices]
tr_r10 = (iids.unsqueeze(1) == topk10_iids).float().max(dim=1)[0].mean()
tr_r5 = (iids.unsqueeze(1) == topk5_iids).float().max(dim=1)[0].mean()
tr_r1 = (iids.unsqueeze(1) == topk1_iids).float().max(dim=1)[0].mean()
topk10 = scores.topk(10, dim=0)
topk5 = scores.topk(5, dim=0)
topk1 = scores.topk(1, dim=0)
topk10_iids = iids[topk10.indices]
topk5_iids = iids[topk5.indices]
topk1_iids = iids[topk1.indices]
ir_r10 = (tiids.unsqueeze(0) == topk10_iids).float().max(dim=0)[0].mean()
ir_r5 = (tiids.unsqueeze(0) == topk5_iids).float().max(dim=0)[0].mean()
ir_r1 = (tiids.unsqueeze(0) == topk1_iids).float().max(dim=0)[0].mean()
eval_result = {
"tr_r10": tr_r10.item() * 100.0,
"tr_r5": tr_r5.item() * 100.0,
"tr_r1": tr_r1.item() * 100.0,
"ir_r10": ir_r10.item() * 100.0,
"ir_r5": ir_r5.item() * 100.0,
"ir_r1": ir_r1.item() * 100.0,
"average_score": 100.0 * (tr_r1 + tr_r5 + tr_r10 + ir_r1 + ir_r5 + ir_r10).item() / 6.0,
}
print('* Eval result = %s' % json.dumps(eval_result))
return eval_result, "average_score"
class VQAHandler(TaskHandler):
def __init__(self) -> None:
super().__init__()
self.predictions = []
self.criterion = nn.BCEWithLogitsLoss(reduction='mean')
self.label2ans = None
def train_batch(self, model, image, language_tokens, padding_mask, labels):
logits = model(
image=image, question=language_tokens,
padding_mask=padding_mask)
return {
"loss": self.criterion(input=logits.float(), target=labels.float()) * labels.shape[1],
}
def before_eval(self, metric_logger, data_loader, **kwargs):
self.predictions.clear()
self.metric_logger = metric_logger
self.label2ans = data_loader.dataset.label2ans
def eval_batch(self, model, image, language_tokens, padding_mask, labels=None, qid=None):
logits = model(
image=image, question=language_tokens,
padding_mask=padding_mask)
batch_size = language_tokens.shape[0]
if labels is not None:
scores = utils.VQAScore()(logits, labels) * 100.0
self.metric_logger.meters['score'].update(scores.item(), n=batch_size)
else:
_, preds = logits.max(-1)
for image_id, pred in zip(qid, preds):
self.predictions.append({
"question_id": image_id.item(),
"answer": self.label2ans[pred.item()],
})
def after_eval(self, **kwargs):
if len(self.predictions) == 0:
print('* Score {score.global_avg:.3f}'.format(score=self.metric_logger.score))
return {k: meter.global_avg for k, meter in self.metric_logger.meters.items()}, "score"
else:
return self.predictions, "prediction"
class CaptioningHandler(TaskHandler):
def __init__(self, args) -> None:
super().__init__()
self.predictions = []
self.criterion = utils.BertCaptioningLoss(args.label_smoothing, args.drop_worst_ratio, args.drop_worst_after)
self.tokenizer = get_sentencepiece_model_for_beit3(args)
self.num_beams = args.num_beams
self.max_len = args.num_max_bpe_tokens
self.length_penalty = args.length_penalty
self.vocab_size = args.vocab_size
def train_batch(self, model, image, language_tokens, masked_tokens, language_masked_pos, padding_mask, image_id, global_step):
logits, _ = model(
image=image, text_ids=masked_tokens, padding_mask=padding_mask, language_masked_pos=language_masked_pos, image_id=image_id)
masked_labels = language_tokens[language_masked_pos.bool()]
score = torch.max(logits, -1)[1].data == masked_labels
acc = torch.sum(score.float()) / torch.sum(language_masked_pos)
return {
"loss": self.criterion(logits, masked_labels, global_step),
"acc": acc
}
def before_eval(self, metric_logger, data_loader, **kwargs):
self.predictions.clear()
self.metric_logger = metric_logger
def eval_batch(self, model, image, image_id=None):
cur_len = 2
num_keep_best = 1
TOPN_PER_BEAM = 3
batch_size = image.size(0)
mask_id = self.tokenizer.mask_token_id
cls_id = self.tokenizer.cls_token_id
pad_id = self.tokenizer.pad_token_id
sep_id = self.tokenizer.sep_token_id
eos_token_ids = [sep_id]
cls_ids = torch.full(
(batch_size, 1), cls_id, dtype=torch.long, device=image.device
)
mask_ids = torch.full(
(batch_size, 1), mask_id, dtype=torch.long, device=image.device
)
cur_input_ids = torch.cat([cls_ids, mask_ids], dim=1)
tmp_ids = torch.full(
(batch_size, self.max_len-1), mask_id, dtype=torch.long, device=image.device
)
decoding_results = torch.cat([cls_ids, tmp_ids], dim=1)
# Expand input to num beams
cur_input_ids = cur_input_ids.unsqueeze(1).expand(batch_size, self.num_beams, cur_len)
cur_input_ids = cur_input_ids.contiguous().view(batch_size * self.num_beams, cur_len) # (batch_size * num_beams, cur_len)
decoding_results = decoding_results.unsqueeze(1).expand(batch_size, self.num_beams, self.max_len)
decoding_results = decoding_results.contiguous().view(batch_size * self.num_beams, self.max_len) # (batch_size * num_beams, cur_len)
image = image.unsqueeze(1).expand(batch_size, self.num_beams, image.size(-3), image.size(-2), image.size(-1))
image = image.contiguous().view(batch_size * self.num_beams, image.size(-3), image.size(-2), image.size(-1))
generated_hyps = [
utils.BeamHypotheses(
num_keep_best, self.max_len, length_penalty=self.length_penalty, early_stopping=False
) for _ in range(batch_size)
]
# scores for each sentence in the beam
beam_scores = torch.zeros((batch_size, self.num_beams), dtype=torch.float, device=cur_input_ids.device)
beam_scores[:, 1:] = -1e9
beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,)
# done sentences
done = [False for _ in range(batch_size)]
incremental_state = {}
while cur_len <= self.max_len:
next_token_idx = 1
padding_masks = torch.full(
cur_input_ids.shape, 0, dtype=torch.long, device=image.device
)
input_image = image
if cur_len != 2:
input_image = None
outputs, incremental_state_next = model(
image=input_image, text_ids=cur_input_ids, language_masked_pos=None,
padding_mask=padding_masks, text_len=cur_len, incremental_state=incremental_state)
incremental_state = incremental_state_next
# assert outputs.shape[1] == token_len
scores = outputs[:, next_token_idx, :] # (batch_size * num_beams, vocab_size)
scores = F.log_softmax(scores, dim=-1) # (batch_size * num_beams, vocab_size)
assert scores.size() == (batch_size * self.num_beams, self.vocab_size)
# Add the log prob of the new beams to the log prob of the beginning of the sequence (sum of logs == log of the product)
_scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)
# re-organize to group the beam together (we are keeping top hypothesis accross beams)
_scores = _scores.view(batch_size, self.num_beams * self.vocab_size) # (batch_size, num_beams * vocab_size)
next_scores, next_words = torch.topk(_scores, TOPN_PER_BEAM * self.num_beams, dim=1, largest=True, sorted=True)
assert next_scores.size() == next_words.size() == (batch_size, TOPN_PER_BEAM * self.num_beams)
# next batch beam content
# list of (batch_size * num_beams) tuple(next hypothesis score, next word, current position in the batch)
next_batch_beam = []
# for each sentence
for batch_ex in range(batch_size):
# if we are done with this sentence
done[batch_ex] = done[batch_ex] or generated_hyps[batch_ex].is_done(next_scores[batch_ex].max().item())
if done[batch_ex]:
next_batch_beam.extend([(0, pad_id, 0)] * self.num_beams) # pad the batch
continue
# next sentence beam content
next_sent_beam = []
for idx, score in zip(next_words[batch_ex], next_scores[batch_ex]):
# get beam and word IDs
beam_id = idx // self.vocab_size
word_id = idx % self.vocab_size
# end of sentence, or next word
# if word_id.item() in eos_token_ids or cur_len + 1 == max_len:
if (word_id.item() in eos_token_ids and cur_len + 1 <= self.max_len) or (cur_len + 1 == self.max_len):
generated_hyps[batch_ex].add(
decoding_results[batch_ex * self.num_beams + beam_id, :cur_len].clone(), score.item()
)
else:
next_sent_beam.append((score, word_id, batch_ex * self.num_beams + beam_id))
# the beam for next step is full
if len(next_sent_beam) == self.num_beams:
break
# update next beam content
if cur_len + 1 == self.max_len:
assert len(next_sent_beam) == 0
else:
assert len(next_sent_beam) == self.num_beams
if len(next_sent_beam) == 0:
next_sent_beam = [(0, pad_id, 0)] * self.num_beams # pad the batch
next_batch_beam.extend(next_sent_beam)
assert len(next_batch_beam) == self.num_beams * (batch_ex + 1)
# sanity check / prepare next batch
assert len(next_batch_beam) == batch_size * self.num_beams
beam_scores = beam_scores.new([x[0] for x in next_batch_beam])
beam_words = cur_input_ids.new([x[1] for x in next_batch_beam])
beam_idx = cur_input_ids.new([x[2] for x in next_batch_beam])
# re-order batch
cur_input_ids = cur_input_ids[beam_idx, :]
decoding_results = decoding_results[beam_idx, :]
for module in incremental_state:
for key in incremental_state[module]:
result = incremental_state[module][key].index_select(0, beam_idx)
incremental_state[module][key] = result[:,:,:-1,:]
next_ids = torch.full(
(batch_size * self.num_beams, 1), mask_id, dtype=torch.long, device=image.device
)
cur_input_ids = torch.cat([beam_words.unsqueeze(1), next_ids], dim=1)
decoding_results[:, cur_len-1] = beam_words
# update current length
cur_len = cur_len + 1
# stop when we are done with each sentence
if all(done):
break
# select the best hypotheses
tgt_len = torch.ones(batch_size, num_keep_best, dtype=torch.long)
logprobs = torch.zeros(batch_size, num_keep_best,
dtype=torch.float).fill_(-1e5).to(cur_input_ids.device)
all_best = []
for i, hypotheses in enumerate(generated_hyps):
best = []
hyp_scores = torch.tensor([x[0] for x in hypotheses.hyp])
_, best_indices = torch.topk(hyp_scores,
min(num_keep_best, len(hyp_scores)), largest=True)
for best_idx, hyp_idx in enumerate(best_indices):
conf, best_hyp = hypotheses.hyp[hyp_idx]
best.append(best_hyp)
logprobs[i, best_idx] = conf
tgt_len[i, best_idx] = len(best_hyp) + 1 # +1 for the <EOS> symbol
all_best.append(best)
# generate target batch, pad to the same length
decoded = cur_input_ids.new(batch_size, num_keep_best, self.max_len).fill_(pad_id)
for batch_idx, best in enumerate(all_best):
for best_idx, hypo in enumerate(best):
decoded[batch_idx, best_idx, : tgt_len[batch_idx, best_idx] - 1] = hypo
decoded[batch_idx, best_idx, tgt_len[batch_idx, best_idx] - 1] = eos_token_ids[0]
captions = self.tokenizer.batch_decode(decoded.squeeze(1), skip_special_tokens=True)
for qid, pred in zip(image_id, captions):
self.predictions.append({
"image_id": qid.item(),
"caption": pred,
})
def after_eval(self, **kwargs):
return self.predictions, "prediction"
def get_handler(args):
if args.task == "nlvr2":
return NLVR2Handler()
elif args.task == "vqav2":
return VQAHandler()
elif args.task in ("flickr30k", "coco_retrieval"):
return RetrievalHandler()
elif args.task in ("coco_captioning", "nocaps"):
return CaptioningHandler(args)
elif args.task in ("imagenet"):
return ImageNetHandler(args)
else:
raise NotImplementedError("Sorry, %s is not support." % args.task) | null |
185,174 | import math
import sys
import json
from typing import Iterable, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.utils import ModelEma
from timm.utils import accuracy, ModelEma
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from datasets import get_sentencepiece_model_for_beit3
import utils
class TaskHandler(object):
def __init__(self) -> None:
self.metric_logger = None
self.split = None
def train_batch(self, model, **kwargs):
raise NotImplementedError()
def eval_batch(self, model, **kwargs):
raise NotImplementedError()
def before_eval(self, metric_logger, data_loader, **kwargs):
self.metric_logger = metric_logger
self.split = data_loader.dataset.split
def after_eval(self, **kwargs):
raise NotImplementedError()
def train_one_epoch(
model: torch.nn.Module, data_loader: Iterable,
optimizer: torch.optim.Optimizer, device: torch.device,
handler: TaskHandler, epoch: int, start_steps: int,
lr_schedule_values: list, loss_scaler, max_norm: float = 0,
update_freq: int = 1, model_ema: Optional[ModelEma] = None,
log_writer: Optional[utils.TensorboardLogger] = None,
task = None, mixup_fn=None,
):
model.train(True)
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('min_lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
if loss_scaler is None:
model.zero_grad()
model.micro_steps = 0
else:
optimizer.zero_grad()
for data_iter_step, data in enumerate(metric_logger.log_every(data_loader, print_freq, header)):
step = data_iter_step // update_freq
global_step = start_steps + step # global training iteration
# Update LR & WD for the first acc
if lr_schedule_values is not None and data_iter_step % update_freq == 0:
for i, param_group in enumerate(optimizer.param_groups):
if lr_schedule_values is not None:
param_group["lr"] = lr_schedule_values[global_step] * param_group["lr_scale"]
# put input data into cuda
for tensor_key in data.keys():
data[tensor_key] = data[tensor_key].to(device, non_blocking=True)
# print("input %s = %s" % (tensor_key, data[tensor_key]))
if loss_scaler is None and tensor_key.startswith("image"):
data[tensor_key] = data[tensor_key].half()
# mixup for imagenet finetuning
if mixup_fn is not None:
data["image"], data["label"] = mixup_fn(data["image"], data["label"])
if task in ["coco_captioning", "nocaps"]:
data["global_step"] = global_step
if loss_scaler is None:
results = handler.train_batch(model, **data)
else:
with torch.cuda.amp.autocast():
results = handler.train_batch(model, **data)
loss = results.pop("loss")
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
if loss_scaler is None:
loss /= update_freq
model.backward(loss)
model.step()
if (data_iter_step + 1) % update_freq == 0:
# model.zero_grad()
# Deepspeed will call step() & model.zero_grad() automatic
if model_ema is not None:
model_ema.update(model)
grad_norm = None
loss_scale_value = utils.get_loss_scale_for_deepspeed(model)
else:
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
loss /= update_freq
grad_norm = loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order,
update_grad=(data_iter_step + 1) % update_freq == 0)
if (data_iter_step + 1) % update_freq == 0:
optimizer.zero_grad()
if model_ema is not None:
model_ema.update(model)
loss_scale_value = loss_scaler.state_dict()["scale"]
torch.cuda.synchronize()
metric_logger.update(loss=loss_value)
metric_logger.update(loss_scale=loss_scale_value)
min_lr = 10.
max_lr = 0.
for group in optimizer.param_groups:
min_lr = min(min_lr, group["lr"])
max_lr = max(max_lr, group["lr"])
metric_logger.update(lr=max_lr)
metric_logger.update(min_lr=min_lr)
weight_decay_value = None
for group in optimizer.param_groups:
if group["weight_decay"] > 0:
weight_decay_value = group["weight_decay"]
metric_logger.update(weight_decay=weight_decay_value)
metric_logger.update(grad_norm=grad_norm)
if log_writer is not None:
kwargs = {
"loss": loss_value,
}
for key in results:
kwargs[key] = results[key]
log_writer.update(head="train", **kwargs)
kwargs = {
"loss_scale": loss_scale_value,
"lr": max_lr,
"min_lr": min_lr,
"weight_decay": weight_decay_value,
"grad_norm": grad_norm,
}
log_writer.update(head="opt", **kwargs)
log_writer.set_step()
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()} | null |
185,175 | import math
import sys
import json
from typing import Iterable, Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.utils import ModelEma
from timm.utils import accuracy, ModelEma
from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
from datasets import get_sentencepiece_model_for_beit3
import utils
def evaluate(data_loader, model, device, handler):
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test:'
# switch to evaluation mode
model.eval()
handler.before_eval(metric_logger=metric_logger, data_loader=data_loader)
for data in metric_logger.log_every(data_loader, 10, header):
for tensor_key in data.keys():
data[tensor_key] = data[tensor_key].to(device, non_blocking=True)
with torch.cuda.amp.autocast():
handler.eval_batch(model=model, **data)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
return handler.after_eval() | null |
185,176 | import math
import torch
import torch.nn as nn
from timm.models.layers import trunc_normal_ as __call_trunc_normal_
from torchscale.model.BEiT3 import BEiT3
from torchscale.architecture.config import EncoderConfig
def trunc_normal_(tensor, mean=0., std=1.):
__call_trunc_normal_(tensor, mean=mean, std=std, a=-std, b=std) | null |
185,177 | import argparse
import datetime
import numpy as np
import time
import torch
import torch.backends.cudnn as cudnn
import json
import os
from pathlib import Path
from timm.data.mixup import Mixup
from timm.models import create_model
from timm.utils import ModelEma
from optim_factory import create_optimizer, get_parameter_groups, \
LayerDecayValueAssigner, get_is_head_flag_for_vit
from engine_for_finetuning import train_one_epoch, get_handler, evaluate
from datasets import create_downstream_dataset
from utils import NativeScalerWithGradNormCount as NativeScaler
import utils
import modeling_finetune
def get_args():
parser = argparse.ArgumentParser('BEiT fine-tuning and evaluation script for image classification', add_help=False)
# Model parameters
parser.add_argument('--model', default='beit_base_patch16_224', type=str, metavar='MODEL',
help='Name of model to train')
parser.add_argument('--task', type=str, required=True,
choices=['nlvr2', 'vqav2', 'flickr30k', 'coco_retrieval', 'coco_captioning', 'nocaps', 'imagenet'],
help='Name of task to fine-tuning')
parser.add_argument('--input_size', default=224, type=int,
help='images input size')
parser.add_argument('--drop_path', type=float, default=0.1, metavar='PCT',
help='Drop path rate (default: 0.1)')
parser.add_argument('--checkpoint_activations', action='store_true', default=None,
help='Enable checkpointing to save your memory.')
parser.add_argument('--sentencepiece_model', type=str, required=True,
help='Sentencepiece model path for the pretrained model.')
parser.add_argument('--vocab_size', type=int, default=64010)
parser.add_argument('--num_max_bpe_tokens', type=int, default=64)
parser.add_argument('--model_ema', action='store_true', default=False)
parser.add_argument('--model_ema_decay', type=float, default=0.9999, help='')
parser.add_argument('--model_ema_force_cpu', action='store_true', default=False, help='')
# Optimizer parameters
parser.add_argument('--opt', default='adamw', type=str, metavar='OPTIMIZER',
help='Optimizer (default: "adamw"')
parser.add_argument('--opt_eps', default=1e-8, type=float, metavar='EPSILON',
help='Optimizer Epsilon (default: 1e-8)')
parser.add_argument('--opt_betas', default=[0.9, 0.999], type=float, nargs='+', metavar='BETA',
help='Optimizer Betas (default: 0.9, 0.999, use opt default)')
parser.add_argument('--clip_grad', type=float, default=None, metavar='NORM',
help='Clip gradient norm (default: None, no clipping)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight_decay', type=float, default=0.05,
help='weight decay (default: 0.05)')
parser.add_argument('--lr', type=float, default=5e-4, metavar='LR',
help='learning rate (default: 5e-4)')
parser.add_argument('--layer_decay', type=float, default=0.9)
parser.add_argument('--task_head_lr_weight', type=float, default=0)
parser.add_argument('--warmup_lr', type=float, default=1e-6, metavar='LR',
help='warmup learning rate (default: 1e-6)')
parser.add_argument('--min_lr', type=float, default=1e-6, metavar='LR',
help='lower lr bound for cyclic schedulers that hit 0 (1e-6)')
parser.add_argument('--warmup_epochs', type=int, default=5, metavar='N',
help='epochs to warmup LR, if scheduler supports')
parser.add_argument('--warmup_steps', type=int, default=-1, metavar='N',
help='num of steps to warmup LR, will overload warmup_epochs if set > 0')
parser.add_argument('--batch_size', default=64, type=int)
parser.add_argument('--eval_batch_size', default=None, type=int)
parser.add_argument('--epochs', default=20, type=int)
parser.add_argument('--update_freq', default=1, type=int)
parser.add_argument('--save_ckpt_freq', default=5, type=int)
# Augmentation parameters
parser.add_argument('--randaug', action='store_true', default=False)
parser.add_argument('--train_interpolation', type=str, default='bicubic',
help='Training interpolation (random, bilinear, bicubic default: "bicubic")')
# Finetuning params
parser.add_argument('--finetune', default='',
help='finetune from checkpoint')
parser.add_argument('--model_key', default='model|module', type=str)
parser.add_argument('--model_prefix', default='', type=str)
# Dataset parameters
parser.add_argument('--data_path', default='/datasets01/imagenet_full_size/061417/', type=str,
help='dataset path')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--log_dir', default=None,
help='path where to tensorboard log')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--resume', default='',
help='resume from checkpoint')
parser.add_argument('--auto_resume', action='store_true')
parser.add_argument('--no_auto_resume', action='store_false', dest='auto_resume')
parser.set_defaults(auto_resume=True)
parser.add_argument('--save_ckpt', action='store_true')
parser.add_argument('--no_save_ckpt', action='store_false', dest='save_ckpt')
parser.set_defaults(save_ckpt=True)
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true',
help='Perform evaluation only')
parser.add_argument('--dist_eval', action='store_true', default=False,
help='Enabling distributed evaluation')
parser.add_argument('--num_workers', default=10, type=int)
parser.add_argument('--pin_mem', action='store_true',
help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.')
parser.add_argument('--no_pin_mem', action='store_false', dest='pin_mem')
parser.set_defaults(pin_mem=True)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--local_rank', default=-1, type=int)
parser.add_argument('--dist_on_itp', action='store_true')
parser.add_argument('--dist_url', default='env://',
help='url used to set up distributed training')
# parameter for dump predictions (VQA, COCO captioning, NoCaps)
parser.add_argument('--task_cache_path', default=None, type=str)
# parameter for imagenet finetuning
parser.add_argument('--nb_classes', default=1000, type=int,
help='number of the classification types')
parser.add_argument('--mixup', type=float, default=0,
help='mixup alpha, mixup enabled if > 0.')
parser.add_argument('--cutmix', type=float, default=0,
help='cutmix alpha, cutmix enabled if > 0.')
parser.add_argument('--cutmix_minmax', type=float, nargs='+', default=None,
help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)')
parser.add_argument('--mixup_prob', type=float, default=1.0,
help='Probability of performing mixup or cutmix when either/both is enabled')
parser.add_argument('--mixup_switch_prob', type=float, default=0.5,
help='Probability of switching to cutmix when both mixup and cutmix enabled')
parser.add_argument('--mixup_mode', type=str, default='batch',
help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"')
# augmentation parameters for imagenet finetuning
parser.add_argument('--color_jitter', type=float, default=0.4, metavar='PCT',
help='Color jitter factor (default: 0.4)')
parser.add_argument('--aa', type=str, default='rand-m9-mstd0.5-inc1', metavar='NAME',
help='Use AutoAugment policy. "v0" or "original". " + "(default: rand-m9-mstd0.5-inc1)')
parser.add_argument('--smoothing', type=float, default=0.1,
help='Label smoothing (default: 0.1)')
# evaluation parameters for imagenet
parser.add_argument('--crop_pct', type=float, default=None)
# random Erase params for imagenet finetuning
parser.add_argument('--reprob', type=float, default=0.25, metavar='PCT',
help='Random erase prob (default: 0.25)')
parser.add_argument('--remode', type=str, default='pixel',
help='Random erase mode (default: "pixel")')
parser.add_argument('--recount', type=int, default=1,
help='Random erase count (default: 1)')
parser.add_argument('--resplit', action='store_true', default=False,
help='Do not random erase first (clean) augmentation split')
# parameter for captioning finetuning
parser.add_argument('--captioning_mask_prob', type=float, default=0.6)
parser.add_argument('--drop_worst_ratio', type=float, default=0.2)
parser.add_argument('--drop_worst_after', type=int, default=12000)
parser.add_argument('--num_beams', type=int, default=3)
parser.add_argument('--length_penalty', type=float, default=0.6)
# label smoothing for imagenet and captioning
parser.add_argument('--label_smoothing', type=float, default=0.1)
# deepspeed parameters
parser.add_argument('--enable_deepspeed', action='store_true', default=False)
parser.add_argument('--initial_scale_power', type=int, default=16)
parser.add_argument('--zero_stage', default=0, type=int,
help='ZeRO optimizer stage (default: 0)')
known_args, _ = parser.parse_known_args()
if known_args.enable_deepspeed:
try:
import deepspeed
from deepspeed import DeepSpeedConfig
parser = deepspeed.add_config_arguments(parser)
ds_init = deepspeed.initialize
except:
print("Please 'pip install deepspeed==0.4.0'")
exit(0)
else:
ds_init = None
return parser.parse_args(), ds_init | null |
185,178 | from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import json
import numpy as np
import torch
from seqeval.metrics import f1_score, precision_score, recall_score
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from torch.nn import CrossEntropyLoss
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertForTokenClassification,
BertTokenizer,
DistilBertConfig,
DistilBertForTokenClassification,
DistilBertTokenizer,
RobertaConfig,
RobertaForTokenClassification,
RobertaTokenizer,
XLMRobertaConfig,
XLMRobertaForTokenClassification,
XLMRobertaTokenizer
)
from transformers import AdamW, get_linear_schedule_with_warmup
from utils_ner import convert_examples_to_features, get_labels, read_examples_from_file
logger = logging.getLogger(__name__)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def save_best_result(best_epoch, best_performance, output_dir):
best_performance["checkpoint"] = best_epoch
with open(os.path.join(output_dir, "best_performance.json"), mode="w") as writer:
writer.write(json.dumps(best_performance, indent=2))
def evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=""):
eval_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=mode)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation %s *****", prefix)
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "adapterbert"] else None
) # XLM and RoBERTa don"t use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if args.n_gpu > 1:
tmp_eval_loss = tmp_eval_loss.mean() # mean() to average on multi-gpu parallel evaluating
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
results = {
"loss": eval_loss,
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list),
}
logger.info("***** Eval results %s *****", prefix)
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
output_file = os.path.join(args.output_dir, "eval_out.txt")
with open(output_file, "w+", encoding="utf-8") as f:
for line in tqdm(preds_list):
line = " ".join(line) + "\n"
f.write(line)
return results, preds_list
The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id)` to solve the following problem:
Train the model
Here is the function:
def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
if args.warmup_ratio > 0:
args.warmup_steps = int(t_total*args.warmup_ratio)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
metric_for_best = args.metric_for_choose_best_checkpoint
best_performance = None
best_epoch = None
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
if args.disable_tqdm:
epoch_iterator = train_dataloader
else:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet', 'unilm', 'adapterbert'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.max_grad_norm > 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
epoch_iterator.set_description('Iter (loss=%5.3f) lr=%9.7f' % (loss.item(), scheduler.get_lr()[0]))
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
logger.info(json.dumps({**logs, **{'step': global_step}}))
if args.max_steps > 0 and global_step > args.max_steps:
if not args.disable_tqdm:
epoch_iterator.close()
break
if args.local_rank in [-1, 0]:
logs = {}
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer, prefix='epoch-{}'.format(_ + 1))
for key, value in results.items():
eval_key = 'eval_{}'.format(key)
logs[eval_key] = value
if metric_for_best is None:
metric_for_best = key
if best_epoch is None or best_performance[metric_for_best] < results[metric_for_best]:
best_epoch = 'epoch-{}'.format(_ + 1)
best_performance = results
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{'step': global_step}}))
# Save model checkpoint
output_dir = os.path.join(args.output_dir, 'epoch-{}'.format(_ + 1))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
if best_epoch is not None:
logger.info(" ***************** Best checkpoint: {}, choosed by {} *****************".format(
best_epoch, metric_for_best))
logger.info("Best performance = %s" % json.dumps(best_performance))
save_best_result(best_epoch, best_performance, args.output_dir)
return global_step, tr_loss / global_step | Train the model |
185,179 | from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import json
import time
import numpy as np
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
from tqdm import tqdm, trange
from transformers import (WEIGHTS_NAME, BertConfig,
BertForSequenceClassification, BertTokenizer,
RobertaConfig,
RobertaForSequenceClassification,
RobertaTokenizer,
XLMConfig, XLMForSequenceClassification,
XLMTokenizer, XLNetConfig,
XLNetForSequenceClassification,
XLNetTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer,
AlbertConfig,
AlbertForSequenceClassification,
AlbertTokenizer,
XLMRobertaConfig,
XLMRobertaForSequenceClassification,
XLMRobertaTokenizer,
)
from transformers import AdamW, get_linear_schedule_with_warmup
from nlu_finetune.utils_for_glue import glue_compute_metrics as compute_metrics
from nlu_finetune.utils_for_glue import glue_output_modes as output_modes
from nlu_finetune.utils_for_glue import glue_processors as processors
from nlu_finetune.utils_for_glue import glue_convert_examples_to_features as convert_examples_to_features
logger = logging.getLogger(__name__)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def save_best_result(best_epoch, best_performance, output_dir):
best_performance["checkpoint"] = best_epoch
with open(os.path.join(output_dir, "best_performance.json"), mode="w") as writer:
writer.write(json.dumps(best_performance, indent=2))
def evaluate(args, model, tokenizer, prefix=""):
# Loop to handle MNLI double evaluation (matched, mis-matched)
eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,)
eval_outputs_dirs = (args.output_dir, args.output_dir + '-MM') if args.task_name == "mnli" else (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
cached_dev_file = args.cached_dev_file
if cached_dev_file is not None:
cached_dev_file = cached_dev_file + '_' + eval_task
eval_dataset = load_and_cache_examples(
args, eval_task, tokenizer, cached_features_file=cached_dev_file, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
if args.disable_tqdm:
epoch_iterator = eval_dataloader
else:
epoch_iterator = tqdm(eval_dataloader, desc="Evaluating")
for batch in epoch_iterator:
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet', 'adapterbert'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs['labels'].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs['labels'].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
elif args.output_mode == "regression":
preds = np.squeeze(preds)
processor = processors[eval_task]()
result = compute_metrics(eval_task, preds, out_label_ids,processor.get_labels()[1:])
results[eval_task] = result
eval_output_dir = os.path.join(eval_output_dir, prefix)
if not os.path.exists(eval_output_dir):
os.makedirs(eval_output_dir)
output_eval_file = os.path.join(eval_output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
writer.write(json.dumps(result, indent=2))
logger.info("Result = %s" % json.dumps(result, indent=2))
return results
The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(args, train_dataset, model, tokenizer)` to solve the following problem:
Train the model
Here is the function:
def train(args, train_dataset, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, num_workers=1)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
warmup_steps = t_total * args.warmup_ratio
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
metric_for_best = args.metric_for_choose_best_checkpoint
best_performance = None
best_epoch = None
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
if args.disable_tqdm:
epoch_iterator = train_dataloader
else:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet', 'unilm', 'adapterbert'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.max_grad_norm > 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
logger.info(json.dumps({**logs, **{'step': global_step}}))
if args.max_steps > 0 and global_step > args.max_steps:
if not args.disable_tqdm:
epoch_iterator.close()
break
if args.local_rank in [-1, 0]:
logs = {}
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer, prefix='epoch-{}'.format(_ + 1))
for key, value in results.items():
eval_key = 'eval_{}'.format(key)
logs[eval_key] = value
if metric_for_best is None:
metric_for_best = key
if best_epoch is None or best_performance[metric_for_best] < results[metric_for_best]:
best_epoch = 'epoch-{}'.format(_ + 1)
best_performance = results
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{'step': global_step}}))
# Save model checkpoint
output_dir = os.path.join(args.output_dir, 'epoch-{}'.format(_ + 1))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
if best_epoch is not None:
logger.info(" ***************** Best checkpoint: {}, choosed by {} *****************".format(
best_epoch, metric_for_best))
logger.info("Best performance = %s" % json.dumps(best_performance))
save_best_result(best_epoch, best_performance, args.output_dir)
return global_step, tr_loss / global_step | Train the model |
185,180 | from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import random
import json
import numpy as np
import torch
from sklearn.metrics import matthews_corrcoef, f1_score
from sklearn.metrics import cohen_kappa_score, precision_score, recall_score, precision_recall_fscore_support
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from torch.nn import CrossEntropyLoss
try:
from torch.utils.tensorboard import SummaryWriter
except:
from tensorboardX import SummaryWriter
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertForTokenClassification,
BertTokenizer,
DistilBertConfig,
DistilBertForTokenClassification,
DistilBertTokenizer,
RobertaConfig,
RobertaForTokenClassification,
RobertaTokenizer,
XLMRobertaConfig,
XLMRobertaForTokenClassification,
XLMRobertaTokenizer
)
from transformers import AdamW, get_linear_schedule_with_warmup
from utils_ner import convert_examples_to_features, get_labels, read_examples_from_file
logger = logging.getLogger(__name__)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def save_best_result(best_epoch, best_performance, output_dir):
best_performance["checkpoint"] = best_epoch
with open(os.path.join(output_dir, "best_performance.json"), mode="w") as writer:
writer.write(json.dumps(best_performance, indent=2))
def evaluate(args, model, tokenizer, labels, pad_token_label_id, mode, prefix=""):
eval_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=mode)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation %s *****", prefix)
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert", "xlnet", "adapterbert"] else None
) # XLM and RoBERTa don"t use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if args.n_gpu > 1:
tmp_eval_loss = tmp_eval_loss.mean() # mean() to average on multi-gpu parallel evaluating
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
out_labels = [i for item in out_label_list for i in item]
preds_labels = [i for item in preds_list for i in item]
results = {
"loss": eval_loss,
"f1": token_f1(true = out_labels,pred = preds_labels, labels = labels),
}
logger.info("***** Eval results %s *****", prefix)
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
output_file = os.path.join(args.output_dir, "eval_out.txt")
with open(output_file, "w+", encoding="utf-8") as f:
for line in tqdm(preds_list):
line = " ".join(line) + "\n"
f.write(line)
return results, preds_list
The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id)` to solve the following problem:
Train the model
Here is the function:
def train(args, train_dataset, model, tokenizer, labels, pad_token_label_id):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
if args.warmup_ratio > 0:
args.warmup_steps = int(t_total*args.warmup_ratio)
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
metric_for_best = args.metric_for_choose_best_checkpoint
best_performance = None
best_epoch = None
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Added here for reproductibility (even between python 2 and 3)
for _ in train_iterator:
if args.disable_tqdm:
epoch_iterator = train_dataloader
else:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {'input_ids': batch[0],
'attention_mask': batch[1],
'labels': batch[3]}
if args.model_type != 'distilbert':
inputs['token_type_ids'] = batch[2] if args.model_type in ['bert', 'xlnet', 'unilm', 'adapterbert'] else None # XLM, DistilBERT and RoBERTa don't use segment_ids
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.max_grad_norm > 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
epoch_iterator.set_description('Iter (loss=%5.3f) lr=%9.7f' % (loss.item(), scheduler.get_lr()[0]))
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
logs = {}
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
logger.info(json.dumps({**logs, **{'step': global_step}}))
if args.max_steps > 0 and global_step > args.max_steps:
if not args.disable_tqdm:
epoch_iterator.close()
break
if args.local_rank in [-1, 0]:
logs = {}
if args.local_rank == -1 and args.evaluate_during_training: # Only evaluate when single GPU otherwise metrics may not average well
results = evaluate(args, model, tokenizer, prefix='epoch-{}'.format(_ + 1))
for key, value in results.items():
eval_key = 'eval_{}'.format(key)
logs[eval_key] = value
if metric_for_best is None:
metric_for_best = key
if best_epoch is None or best_performance[metric_for_best] < results[metric_for_best]:
best_epoch = 'epoch-{}'.format(_ + 1)
best_performance = results
loss_scalar = (tr_loss - logging_loss) / args.logging_steps
learning_rate_scalar = scheduler.get_lr()[0]
logs['learning_rate'] = learning_rate_scalar
logs['loss'] = loss_scalar
logging_loss = tr_loss
for key, value in logs.items():
tb_writer.add_scalar(key, value, global_step)
print(json.dumps({**logs, **{'step': global_step}}))
# Save model checkpoint
output_dir = os.path.join(args.output_dir, 'epoch-{}'.format(_ + 1))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, 'module') else model # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, 'training_args.bin'))
logger.info("Saving model checkpoint to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
if best_epoch is not None:
logger.info(" ***************** Best checkpoint: {}, choosed by {} *****************".format(
best_epoch, metric_for_best))
logger.info("Best performance = %s" % json.dumps(best_performance))
save_best_result(best_epoch, best_performance, args.output_dir)
return global_step, tr_loss / global_step | Train the model |
185,181 | import logging
import os
from tqdm import *
def get_labels(path):
if path:
with open(path, "r") as f:
labels = f.read().splitlines()
if "O" not in labels:
labels = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] | null |
185,182 | import logging
import os
import csv
import sys
import copy
import json
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef, f1_score
from sklearn.preprocessing import MultiLabelBinarizer
logger = logging.getLogger(__name__)
class InputFeatures(object):
"""
A single set of features of data.
Args:
input_ids: Indices of input sequence tokens in the vocabulary.
attention_mask: Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded) tokens.
token_type_ids: Segment token indices to indicate first and second portions of the inputs.
label: Label corresponding to the input
"""
def __init__(self, input_ids, attention_mask=None, token_type_ids=None, label=None):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.label = label
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
glue_processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mnli-mm": MnliMismatchedProcessor,
"mrpc": MrpcProcessor,
"sst-2": Sst2Processor,
"sts-b": StsbProcessor,
"qqp": QqpProcessor,
"qnli": QnliProcessor,
"rte": RteProcessor,
"wnli": WnliProcessor,
"chemprot": ChemProcessor,
"arc": ARCProcessor,
"sci": SCIProcessor,
}
glue_output_modes = {
"cola": "classification",
"mnli": "classification",
"mnli-mm": "classification",
"mrpc": "classification",
"sst-2": "classification",
"sts-b": "regression",
"qqp": "classification",
"qnli": "classification",
"rte": "classification",
"wnli": "classification",
"chemprot": "classification",
"arc": "classification",
"sci": "classification",
}
The provided code snippet includes necessary dependencies for implementing the `glue_convert_examples_to_features` function. Write a Python function `def glue_convert_examples_to_features(examples, tokenizer, max_length=512, task=None, label_list=None, output_mode=None, pad_on_left=False, pad_token=0, pad_token_segment_id=0, mask_padding_with_zero=True)` to solve the following problem:
Loads a data file into a list of ``InputFeatures`` Args: examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples. tokenizer: Instance of a tokenizer that will tokenize the examples max_length: Maximum example length task: GLUE task label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method output_mode: String indicating the output mode. Either ``regression`` or ``classification`` pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default) pad_token: Padding token pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4) mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for actual values) Returns: If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset`` containing the task-specific features. If the input is a list of ``InputExamples``, will return a list of task-specific ``InputFeatures`` which can be fed to the model.
Here is the function:
def glue_convert_examples_to_features(examples, tokenizer,
max_length=512,
task=None,
label_list=None,
output_mode=None,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
mask_padding_with_zero=True):
"""
Loads a data file into a list of ``InputFeatures``
Args:
examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples.
tokenizer: Instance of a tokenizer that will tokenize the examples
max_length: Maximum example length
task: GLUE task
label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method
output_mode: String indicating the output mode. Either ``regression`` or ``classification``
pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default)
pad_token: Padding token
pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4)
mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values
and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for
actual values)
Returns:
If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset``
containing the task-specific features. If the input is a list of ``InputExamples``, will return
a list of task-specific ``InputFeatures`` which can be fed to the model.
"""
is_tf_dataset = False
if task is not None:
processor = glue_processors[task]()
if label_list is None:
label_list = processor.get_labels()
logger.info("Using label list %s for task %s" % (label_list, task))
if output_mode is None:
output_mode = glue_output_modes[task]
logger.info("Using output mode %s for task %s" % (output_mode, task))
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d" % (ex_index))
if is_tf_dataset:
example = processor.get_example_from_tensor_dict(example)
example = processor.tfds_map(example)
inputs = tokenizer.encode_plus(
example.text_a,
example.text_b,
add_special_tokens=True,
max_length=max_length,
)
input_ids = inputs["input_ids"]
if "token_type_ids" in inputs:
token_type_ids = inputs["token_type_ids"]
else:
token_type_ids = []
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
if len(token_type_ids) == 0:
padding_length = max_length
token_type_ids = ([pad_token_segment_id] * padding_length) + token_type_ids
else:
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
if len(token_type_ids) == 0:
padding_length = max_length
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length, "Error with input length {} vs {}".format(len(input_ids), max_length)
assert len(attention_mask) == max_length, "Error with input length {} vs {}".format(len(attention_mask), max_length)
assert len(token_type_ids) == max_length, "Error with input length {} vs {}".format(len(token_type_ids), max_length)
if output_mode == "classification":
label = label_map[example.label]
elif output_mode == "regression":
label = float(example.label)
else:
raise KeyError(output_mode)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_tokens: %s" % " ".join(tokenizer.convert_ids_to_tokens(input_ids)))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(
InputFeatures(input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
label=label))
return features | Loads a data file into a list of ``InputFeatures`` Args: examples: List of ``InputExamples`` or ``tf.data.Dataset`` containing the examples. tokenizer: Instance of a tokenizer that will tokenize the examples max_length: Maximum example length task: GLUE task label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method output_mode: String indicating the output mode. Either ``regression`` or ``classification`` pad_on_left: If set to ``True``, the examples will be padded on the left rather than on the right (default) pad_token: Padding token pad_token_segment_id: The segment ID for the padding token (It is usually 0, but can vary such as for XLNet where it is 4) mask_padding_with_zero: If set to ``True``, the attention mask will be filled by ``1`` for actual values and by ``0`` for padded values. If set to ``False``, inverts it (``1`` for padded values, ``0`` for actual values) Returns: If the ``examples`` input is a ``tf.data.Dataset``, will return a ``tf.data.Dataset`` containing the task-specific features. If the input is a list of ``InputExamples``, will return a list of task-specific ``InputFeatures`` which can be fed to the model. |
185,183 | import logging
import os
import csv
import sys
import copy
import json
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import matthews_corrcoef, f1_score
from sklearn.preprocessing import MultiLabelBinarizer
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def acc_and_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds)
return {
"acc": acc,
"f1": f1,
"acc_and_f1": (acc + f1) / 2,
}
def acc_and_macro_f1(preds, labels):
acc = simple_accuracy(preds, labels)
f1 = f1_score(y_true=labels, y_pred=preds,average="macro")
return {
"f1": f1,
"acc": acc,
"acc_and_f1": (acc + f1) / 2,
}
def acc_and_micro_f1(preds, labels, label_list):
acc = simple_accuracy(preds, labels)
print(label_list)
label_list = [str(i+1) for i in range(len(label_list))]
print(label_list)
mlb = MultiLabelBinarizer(classes = label_list)
labels = labels.tolist()
labels = [str(i) for i in labels]
print(labels[:20])
labels = mlb.fit_transform(labels)
preds = preds.tolist()
preds = [str(i) for i in preds]
print(preds[:20])
preds = mlb.fit_transform(preds)
f1 = f1_score(y_true=labels, y_pred=preds,average="micro")
return {
"f1": f1,
"acc": acc,
"f1_macro": f1_score(y_true=labels, y_pred=preds,average="macro"),
"acc_and_f1": (acc + f1) / 2,
}
def pearson_and_spearman(preds, labels):
pearson_corr = pearsonr(preds, labels)[0]
spearman_corr = spearmanr(preds, labels)[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def glue_compute_metrics(task_name, preds, labels, label_list):
assert len(preds) == len(labels)
if task_name == "cola":
return {"mcc": matthews_corrcoef(labels, preds)}
elif task_name == "sst-2":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mrpc":
return acc_and_f1(preds, labels)
elif task_name == "sts-b":
return pearson_and_spearman(preds, labels)
elif task_name == "qqp":
return acc_and_f1(preds, labels)
elif task_name == "mnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "mnli-mm":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "qnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "rte":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "wnli":
return {"acc": simple_accuracy(preds, labels)}
elif task_name == "chemprot":
return acc_and_micro_f1(preds, labels, label_list)
elif task_name == "arc" or task_name == "sci":
return acc_and_macro_f1(preds, labels)
else:
raise KeyError(task_name) | null |
185,184 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import sys
import unicodedata
import six
import logging
from six.moves import range
import time
import glob
_ALPHANUMERIC_CHAR_SET = set(
six.unichr(i) for i in range(sys.maxunicode)
if (unicodedata.category(six.unichr(i)).startswith("L") or
unicodedata.category(six.unichr(i)).startswith("N") or
unicodedata.category(six.unichr(i)).startswith("P")))
The provided code snippet includes necessary dependencies for implementing the `decode` function. Write a Python function `def decode(tokens)` to solve the following problem:
Decode a list of tokens to a unicode string. Args: tokens: a list of Unicode strings Returns: a unicode string
Here is the function:
def decode(tokens):
"""Decode a list of tokens to a unicode string.
Args:
tokens: a list of Unicode strings
Returns:
a unicode string
"""
token_is_alnum = [t[0] in _ALPHANUMERIC_CHAR_SET for t in tokens]
ret = []
for i, token in enumerate(tokens):
if i > 0 and token_is_alnum[i - 1] and token_is_alnum[i]:
ret.append(u" ")
ret.append(token)
return "".join(ret) | Decode a list of tokens to a unicode string. Args: tokens: a list of Unicode strings Returns: a unicode string |
185,185 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import sys
import unicodedata
import six
import logging
from six.moves import range
import time
import glob
_native_to_unicode = (lambda s: s.decode("utf-8")) if six.PY2 else (lambda s: s)
logger = logging.getLogger(__name__)
def _read_filepattern(filepattern, max_lines=None, split_on_newlines=True, do_lower_case=False):
"""Reads files matching a wildcard pattern, yielding the contents.
Args:
filepattern: A wildcard pattern matching one or more files.
max_lines: If set, stop reading after reading this many lines.
split_on_newlines: A boolean. If true, then split files by lines and strip
leading and trailing whitespace from each line. Otherwise, treat each
file as a single string.
Yields:
The contents of the files as lines, if split_on_newlines is True, or
the entire contents of each file if False.
"""
filenames = sorted(glob.glob(filepattern))
print(filenames, 'do lower case:', do_lower_case)
lines_read = 0
for filename in filenames:
start = time.time()
with open(filename) as f:
if split_on_newlines:
for line in f:
if do_lower_case:
line = line.lower()
yield line.strip()
lines_read += 1
if max_lines and lines_read >= max_lines:
return
if lines_read % 100000 == 0:
print("read", lines_read, "lines,", time.time() - start, "secs elapsed")
else:
if max_lines:
doc = []
for line in f:
if do_lower_case:
line = line.lower()
doc.append(line)
lines_read += 1
if max_lines and lines_read >= max_lines:
yield "".join(doc)
return
yield "".join(doc)
else:
yield f.read()
print(time.time() - start, "for reading read file :", filename)
The provided code snippet includes necessary dependencies for implementing the `vocab_token_counts` function. Write a Python function `def vocab_token_counts(text_filepattern, max_lines, do_lower_case=False)` to solve the following problem:
Read a vocab file and return a dictionary of token counts. Reads a two-column CSV file of tokens and their frequency in a dataset. The tokens are presumed to be generated by encode() or the equivalent. Args: text_filepattern: A pattern matching one or more files. max_lines: An integer; maximum total lines to read. Returns: a dictionary mapping token to count.
Here is the function:
def vocab_token_counts(text_filepattern, max_lines, do_lower_case=False):
"""Read a vocab file and return a dictionary of token counts.
Reads a two-column CSV file of tokens and their frequency in a dataset. The
tokens are presumed to be generated by encode() or the equivalent.
Args:
text_filepattern: A pattern matching one or more files.
max_lines: An integer; maximum total lines to read.
Returns:
a dictionary mapping token to count.
"""
ret = {}
for i, line in enumerate(
_read_filepattern(text_filepattern, max_lines=max_lines)):
if "," not in line:
logger.warning("Malformed vocab line #%d '%s'", i, line)
continue
if do_lower_case:
line = line.lower()
token, count = line.rsplit(",", 1)
ret[_native_to_unicode(token)] = int(count)
return ret | Read a vocab file and return a dictionary of token counts. Reads a two-column CSV file of tokens and their frequency in a dataset. The tokens are presumed to be generated by encode() or the equivalent. Args: text_filepattern: A pattern matching one or more files. max_lines: An integer; maximum total lines to read. Returns: a dictionary mapping token to count. |
185,186 | from __future__ import absolute_import
from __future__ import division
from numpy.core.fromnumeric import argsort
from text_encoder import SubwordTextEncoder
import tokenizer
import tempfile
import argparse
from transformers import BertTokenizer
import random
import math
import numpy as np
def merge_output_file_with_bert_vocab(output_filename, bert_vocab, temp_path):
writer = open(output_filename, 'w', encoding='utf-8')
_set = set()
with open(bert_vocab, 'r', encoding='utf-8') as reader:
for line in reader:
writer.write(line)
_set.add(line.strip())
print(temp_path)
with open(temp_path, 'r', encoding='utf-8') as reader:
for line in reader:
if line.strip() not in _set:
writer.write(line)
writer.close()
def build_target_size_vocab(token_counts, reserved_tokens, target_size):
min_val = 1
max_val = len(token_counts) // (target_size ** 0.5)
encoder = SubwordTextEncoder.build_to_target_size(target_size,token_counts,min_val, max_val, num_iterations=5,
reserved_tokens=reserved_tokens, max_subtoken_length=None)
fd, temp_vocab = tempfile.mkstemp()
encoder.store_to_file(temp_vocab, add_single_quotes=False)
return encoder, temp_vocab
def compute_language_model(documents, vocab_file):
all_tokens = 0
tokenized_documents = []
bert_tokenizer = BertTokenizer(vocab_file ,do_lower_case = True)
words = bert_tokenizer.vocab
for word in words.keys():
words[word] = 0
for doc in documents:
tokens = bert_tokenizer.tokenize(doc)
all_tokens += len(tokens)
for token in tokens:
words[token] +=1
tokenized_documents.append(tokens)
for word in words.keys():
words[word] /= all_tokens
probs = []
for doc in tokenized_documents:
p = 0.0
for token in doc:
p += math.log(words[token])
probs.append(p)
return np.mean(probs)
The provided code snippet includes necessary dependencies for implementing the `vocab_extend` function. Write a Python function `def vocab_extend(corpus, raw_vocab, output_filename, interval=10000 , threshold = 0.01)` to solve the following problem:
@description : The function to get the incremental vocabulary for @param : @Returns :
Here is the function:
def vocab_extend(corpus, raw_vocab, output_filename, interval=10000 , threshold = 0.01):
"""
@description : The function to get the incremental vocabulary for
@param :
@Returns :
"""
documents = []
for line in open(corpus, "r",encoding='utf-8'):
line = line.replace('\n','')
if len(line) < 5:
continue
documents.append(line)
print("docunments: "+str(len(documents)))
token_counts = tokenizer.corpus_token_counts(
corpus, corpus_max_lines = 4400000,
split_on_newlines = True, additional_chars="", do_lower_case=True)
lines = open(raw_vocab, 'r', encoding='utf-8').readlines()
lines = [s.strip() for s in lines if len(s) > 0]
reserved_tokens = lines
random.shuffle(documents)
origin_size = (len(reserved_tokens) // interval) * interval
pre_lm = compute_language_model(documents, raw_vocab)
print("origin_size: " + str(origin_size))
print("pre_lm: "+ str(pre_lm))
target_size = origin_size
while True:
target_size = target_size + interval
_, temp_vocab = build_target_size_vocab(token_counts, reserved_tokens, target_size)
now_lm = compute_language_model(documents, temp_vocab)
print('now_lm: '+ str(now_lm))
delta = (pre_lm - now_lm)/pre_lm
print('delta: ' + str(delta))
if delta <= threshold:
merge_output_file_with_bert_vocab(output_filename, raw_vocab, temp_vocab)
break
pre_lm = now_lm | @description : The function to get the incremental vocabulary for @param : @Returns : |
185,187 | from __future__ import absolute_import
from __future__ import division
from numpy.core.fromnumeric import argsort
from text_encoder import SubwordTextEncoder
import tokenizer
import tempfile
import argparse
from transformers import BertTokenizer
import random
import math
import numpy as np
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--corpus", default=None, type=str, required=True,
help="the file of the corpus to train the vocabulary.")
parser.add_argument("--raw_vocab", default=None, type=str, required=True,
help="the path to the file of the origin vocabulary")
parser.add_argument("--output_file", default=None, type=str, required=True,
help="the output file of the final vocabulary")
parser.add_argument('--interval', type=int, default=10000,
help="The interval of the vocabulary size.")
parser.add_argument('--threshold', type=int, default=10000,
help="The final threhold of the P(D)'s increase")
args = parser.parse_args()
return args | null |
185,188 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from itertools import chain
import re
import time
import logging
import six
from six.moves import range
logger = logging.getLogger(__name__)
def is_unicode(s):
return isinstance(s, six.text_type)
def to_unicode(s, ignore_errors=False):
if is_unicode(s):
return s
error_mode = "ignore" if ignore_errors else "strict"
return s.decode("utf-8", errors=error_mode)
def native_to_unicode(s):
if is_unicode(s):
return s
try:
return to_unicode(s)
except UnicodeDecodeError:
res = to_unicode(s, ignore_errors=True)
logger.info("Ignoring Unicode error, outputting: %s" % res)
return res | null |
185,189 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from itertools import chain
import re
import time
import logging
import six
from six.moves import range
if six.PY2:
RESERVED_TOKENS_BYTES = RESERVED_TOKENS
else:
RESERVED_TOKENS_BYTES = [bytes(PAD, "ascii"), bytes(EOS, "ascii")]
def is_unicode(s):
return isinstance(s, six.text_type)
def unicode_to_native(s):
if six.PY2:
return s.encode("utf-8") if is_unicode(s) else s
else:
return s | null |
185,190 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from itertools import chain
import re
import time
import logging
import six
from six.moves import range
if six.PY2:
RESERVED_TOKENS_BYTES = RESERVED_TOKENS
else:
RESERVED_TOKENS_BYTES = [bytes(PAD, "ascii"), bytes(EOS, "ascii")]
The provided code snippet includes necessary dependencies for implementing the `_escape_token` function. Write a Python function `def _escape_token(token, alphabet)` to solve the following problem:
Escape away underscores and OOV characters and append '_'. This allows the token to be expressed as the concatenation of a list of subtokens from the vocabulary. The underscore acts as a sentinel which allows us to invertibly concatenate multiple such lists. Args: token: A unicode string to be escaped. alphabet: A set of all characters in the vocabulary's alphabet. Returns: escaped_token: An escaped unicode string. Raises: ValueError: If the provided token is not unicode.
Here is the function:
def _escape_token(token, alphabet):
"""Escape away underscores and OOV characters and append '_'.
This allows the token to be expressed as the concatenation of a list
of subtokens from the vocabulary. The underscore acts as a sentinel
which allows us to invertibly concatenate multiple such lists.
Args:
token: A unicode string to be escaped.
alphabet: A set of all characters in the vocabulary's alphabet.
Returns:
escaped_token: An escaped unicode string.
Raises:
ValueError: If the provided token is not unicode.
"""
if not isinstance(token, six.text_type):
raise ValueError("Expected string type for token, got %s" % type(token))
token = token.replace(u"\\", u"\\\\").replace(u"_", u"\\u")
ret = [c if c in alphabet and c != u"\n" else r"\%d;" % ord(c) for c in token]
return u"".join(ret) + "_" | Escape away underscores and OOV characters and append '_'. This allows the token to be expressed as the concatenation of a list of subtokens from the vocabulary. The underscore acts as a sentinel which allows us to invertibly concatenate multiple such lists. Args: token: A unicode string to be escaped. alphabet: A set of all characters in the vocabulary's alphabet. Returns: escaped_token: An escaped unicode string. Raises: ValueError: If the provided token is not unicode. |
185,191 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from itertools import chain
import re
import time
import logging
import six
from six.moves import range
if six.PY2:
RESERVED_TOKENS_BYTES = RESERVED_TOKENS
else:
RESERVED_TOKENS_BYTES = [bytes(PAD, "ascii"), bytes(EOS, "ascii")]
def _my_escape_token(token, alphabet):
if not isinstance(token, six.text_type):
raise ValueError("Expected string type for token, got %s" % type(token))
token = token.replace(u"\\", u"\\\\").replace(u"_", u"\\u")
ret = [c if c in alphabet and c != u"\n" else r"\%d;" % ord(c) for c in token]
return "_" + u"".join(ret) | null |
185,192 | from __future__ import absolute_import
from __future__ import division
from text_encoder import SubwordTextEncoder
import tokenizer
import os
import tempfile
import tensorflow as tf
def merge_output_file_with_bert_vocab(output_filename, bert_vocab, temp_path):
writer = open(output_filename, 'w', encoding='utf-8')
_set = set()
with open(bert_vocab, 'r', encoding='utf-8') as reader:
for line in reader:
writer.write(line)
_set.add(line.strip())
print(temp_path)
with open(temp_path, 'r', encoding='utf-8') as reader:
for line in reader:
if line.strip() not in _set:
writer.write(line)
writer.close()
# os.remove(temp_path) | null |
185,193 | import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
from datasets import ClassLabel, load_dataset, load_metric
import layoutlmft.data.datasets.funsd
import transformers
from layoutlmft.data import DataCollatorForKeyValueExtraction
from layoutlmft.data.data_args import DataTrainingArguments
from layoutlmft.models.model_args import ModelArguments
from layoutlmft.trainers import FunsdTrainer as Trainer
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
HfArgumentParser,
PreTrainedTokenizerFast,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
from transformers.utils import check_min_version
def main():
# See all possible arguments in layoutlmft/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
datasets = load_dataset(os.path.abspath(layoutlmft.data.datasets.funsd.__file__))
if training_args.do_train:
column_names = datasets["train"].column_names
features = datasets["train"].features
else:
column_names = datasets["validation"].column_names
features = datasets["validation"].features
text_column_name = "tokens" if "tokens" in column_names else column_names[0]
label_column_name = (
f"{data_args.task_name}_tags" if f"{data_args.task_name}_tags" in column_names else column_names[1]
)
remove_columns = column_names
# In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the
# unique labels.
def get_label_list(labels):
unique_labels = set()
for label in labels:
unique_labels = unique_labels | set(label)
label_list = list(unique_labels)
label_list.sort()
return label_list
if isinstance(features[label_column_name].feature, ClassLabel):
label_list = features[label_column_name].feature.names
# No need to convert the labels since they are already ints.
label_to_id = {i: i for i in range(len(label_list))}
else:
label_list = get_label_list(datasets["train"][label_column_name])
label_to_id = {l: i for i, l in enumerate(label_list)}
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=True,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# Tokenizer check: this script requires a fast tokenizer.
if not isinstance(tokenizer, PreTrainedTokenizerFast):
raise ValueError(
"This example script only works for models that have a fast tokenizer. Checkout the big table of models "
"at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this "
"requirement"
)
# Preprocessing the dataset
# Padding strategy
padding = "max_length" if data_args.pad_to_max_length else False
# Tokenize all texts and align the labels with them.
def tokenize_and_align_labels(examples):
tokenized_inputs = tokenizer(
examples[text_column_name],
padding=padding,
truncation=True,
return_overflowing_tokens=True,
# We use this argument because the texts in our dataset are lists of words (with a label for each word).
is_split_into_words=True,
)
labels = []
bboxes = []
images = []
for batch_index in range(len(tokenized_inputs["input_ids"])):
word_ids = tokenized_inputs.word_ids(batch_index=batch_index)
org_batch_index = tokenized_inputs["overflow_to_sample_mapping"][batch_index]
label = examples[label_column_name][org_batch_index]
bbox = examples["bboxes"][org_batch_index]
image = examples["image"][org_batch_index]
previous_word_idx = None
label_ids = []
bbox_inputs = []
for word_idx in word_ids:
# Special tokens have a word id that is None. We set the label to -100 so they are automatically
# ignored in the loss function.
if word_idx is None:
label_ids.append(-100)
bbox_inputs.append([0, 0, 0, 0])
# We set the label for the first token of each word.
elif word_idx != previous_word_idx:
label_ids.append(label_to_id[label[word_idx]])
bbox_inputs.append(bbox[word_idx])
# For the other tokens in a word, we set the label to either the current label or -100, depending on
# the label_all_tokens flag.
else:
label_ids.append(label_to_id[label[word_idx]] if data_args.label_all_tokens else -100)
bbox_inputs.append(bbox[word_idx])
previous_word_idx = word_idx
labels.append(label_ids)
bboxes.append(bbox_inputs)
images.append(image)
tokenized_inputs["labels"] = labels
tokenized_inputs["bbox"] = bboxes
tokenized_inputs["image"] = images
return tokenized_inputs
if training_args.do_train:
if "train" not in datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
train_dataset = train_dataset.map(
tokenize_and_align_labels,
batched=True,
remove_columns=remove_columns,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
if training_args.do_eval:
if "validation" not in datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = datasets["validation"]
if data_args.max_val_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
eval_dataset = eval_dataset.map(
tokenize_and_align_labels,
batched=True,
remove_columns=remove_columns,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
if training_args.do_predict:
if "test" not in datasets:
raise ValueError("--do_predict requires a test dataset")
test_dataset = datasets["test"]
if data_args.max_test_samples is not None:
test_dataset = test_dataset.select(range(data_args.max_test_samples))
test_dataset = test_dataset.map(
tokenize_and_align_labels,
batched=True,
remove_columns=remove_columns,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
# Data collator
data_collator = DataCollatorForKeyValueExtraction(
tokenizer,
pad_to_multiple_of=8 if training_args.fp16 else None,
padding=padding,
max_length=512,
)
# Metrics
metric = load_metric("seqeval")
def compute_metrics(p):
predictions, labels = p
predictions = np.argmax(predictions, axis=2)
# Remove ignored index (special tokens)
true_predictions = [
[label_list[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
true_labels = [
[label_list[l] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
results = metric.compute(predictions=true_predictions, references=true_labels)
if data_args.return_entity_level_metrics:
# Unpack nested dictionaries
final_results = {}
for key, value in results.items():
if isinstance(value, dict):
for n, v in value.items():
final_results[f"{key}_{n}"] = v
else:
final_results[key] = value
return final_results
else:
return {
"precision": results["overall_precision"],
"recall": results["overall_recall"],
"f1": results["overall_f1"],
"accuracy": results["overall_accuracy"],
}
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
# Training
if training_args.do_train:
checkpoint = last_checkpoint if last_checkpoint else None
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
trainer.save_model() # Saves the tokenizer too for easy upload
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Predict
if training_args.do_predict:
logger.info("*** Predict ***")
predictions, labels, metrics = trainer.predict(test_dataset)
predictions = np.argmax(predictions, axis=2)
# Remove ignored index (special tokens)
true_predictions = [
[label_list[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
trainer.log_metrics("test", metrics)
trainer.save_metrics("test", metrics)
# Save predictions
output_test_predictions_file = os.path.join(training_args.output_dir, "test_predictions.txt")
if trainer.is_world_process_zero():
with open(output_test_predictions_file, "w") as writer:
for prediction in true_predictions:
writer.write(" ".join(prediction) + "\n")
def _mp_fn(index):
# For xla_spawn (TPUs)
main() | null |
185,194 | import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
from datasets import ClassLabel, load_dataset, load_metric
import layoutlmft.data.datasets.xfun
import transformers
from layoutlmft.data import DataCollatorForKeyValueExtraction
from layoutlmft.data.data_args import XFUNDataTrainingArguments
from layoutlmft.models.model_args import ModelArguments
from layoutlmft.trainers import XfunSerTrainer
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
HfArgumentParser,
PreTrainedTokenizerFast,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
from transformers.utils import check_min_version
def main():
parser = HfArgumentParser((ModelArguments, XFUNDataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
datasets = load_dataset(
os.path.abspath(layoutlmft.data.datasets.xfun.__file__),
f"xfun.{data_args.lang}",
additional_langs=data_args.additional_langs,
keep_in_memory=True,
)
if training_args.do_train:
column_names = datasets["train"].column_names
features = datasets["train"].features
else:
column_names = datasets["validation"].column_names
features = datasets["validation"].features
text_column_name = "input_ids"
label_column_name = "labels"
remove_columns = column_names
# In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the
# unique labels.
def get_label_list(labels):
unique_labels = set()
for label in labels:
unique_labels = unique_labels | set(label)
label_list = list(unique_labels)
label_list.sort()
return label_list
if isinstance(features[label_column_name].feature, ClassLabel):
label_list = features[label_column_name].feature.names
# No need to convert the labels since they are already ints.
label_to_id = {i: i for i in range(len(label_list))}
else:
label_list = get_label_list(datasets["train"][label_column_name])
label_to_id = {l: i for i, l in enumerate(label_list)}
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=True,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# Tokenizer check: this script requires a fast tokenizer.
if not isinstance(tokenizer, PreTrainedTokenizerFast):
raise ValueError(
"This example script only works for models that have a fast tokenizer. Checkout the big table of models "
"at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this "
"requirement"
)
# Preprocessing the dataset
# Padding strategy
padding = "max_length" if data_args.pad_to_max_length else False
if training_args.do_train:
if "train" not in datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if "validation" not in datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = datasets["validation"]
if data_args.max_val_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
if training_args.do_predict:
if "test" not in datasets:
raise ValueError("--do_predict requires a test dataset")
test_dataset = datasets["test"]
if data_args.max_test_samples is not None:
test_dataset = test_dataset.select(range(data_args.max_test_samples))
# Data collator
data_collator = DataCollatorForKeyValueExtraction(
tokenizer,
pad_to_multiple_of=8 if training_args.fp16 else None,
padding=padding,
max_length=512,
)
# Metrics
metric = load_metric("seqeval")
def compute_metrics(p):
predictions, labels = p
predictions = np.argmax(predictions, axis=2)
# Remove ignored index (special tokens)
true_predictions = [
[label_list[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
true_labels = [
[label_list[l] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
results = metric.compute(predictions=true_predictions, references=true_labels)
if data_args.return_entity_level_metrics:
# Unpack nested dictionaries
final_results = {}
for key, value in results.items():
if isinstance(value, dict):
for n, v in value.items():
final_results[f"{key}_{n}"] = v
else:
final_results[key] = value
return final_results
else:
return {
"precision": results["overall_precision"],
"recall": results["overall_recall"],
"f1": results["overall_f1"],
"accuracy": results["overall_accuracy"],
}
# Initialize our Trainer
trainer = XfunSerTrainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
# Training
if training_args.do_train:
checkpoint = last_checkpoint if last_checkpoint else None
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
trainer.save_model() # Saves the tokenizer too for easy upload
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Predict
if training_args.do_predict:
logger.info("*** Predict ***")
predictions, labels, metrics = trainer.predict(test_dataset)
predictions = np.argmax(predictions, axis=2)
# Remove ignored index (special tokens)
true_predictions = [
[label_list[p] for (p, l) in zip(prediction, label) if l != -100]
for prediction, label in zip(predictions, labels)
]
trainer.log_metrics("test", metrics)
trainer.save_metrics("test", metrics)
# Save predictions
output_test_predictions_file = os.path.join(training_args.output_dir, "test_predictions.txt")
if trainer.is_world_process_zero():
with open(output_test_predictions_file, "w") as writer:
for prediction in true_predictions:
writer.write(" ".join(prediction) + "\n")
def _mp_fn(index):
# For xla_spawn (TPUs)
main() | null |
185,195 | import logging
import os
import sys
import numpy as np
from datasets import ClassLabel, load_dataset
import layoutlmft.data.datasets.xfun
import transformers
from layoutlmft import AutoModelForRelationExtraction
from layoutlmft.data.data_args import XFUNDataTrainingArguments
from layoutlmft.data.data_collator import DataCollatorForKeyValueExtraction
from layoutlmft.evaluation import re_score
from layoutlmft.models.model_args import ModelArguments
from layoutlmft.trainers import XfunReTrainer
from transformers import (
AutoConfig,
AutoTokenizer,
HfArgumentParser,
PreTrainedTokenizerFast,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, XFUNDataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
datasets = load_dataset(
os.path.abspath(layoutlmft.data.datasets.xfun.__file__),
f"xfun.{data_args.lang}",
additional_langs=data_args.additional_langs,
keep_in_memory=True,
)
if training_args.do_train:
column_names = datasets["train"].column_names
features = datasets["train"].features
else:
column_names = datasets["validation"].column_names
features = datasets["validation"].features
text_column_name = "input_ids"
label_column_name = "labels"
remove_columns = column_names
# In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the
# unique labels.
def get_label_list(labels):
unique_labels = set()
for label in labels:
unique_labels = unique_labels | set(label)
label_list = list(unique_labels)
label_list.sort()
return label_list
if isinstance(features[label_column_name].feature, ClassLabel):
label_list = features[label_column_name].feature.names
# No need to convert the labels since they are already ints.
label_to_id = {i: i for i in range(len(label_list))}
else:
label_list = get_label_list(datasets["train"][label_column_name])
label_to_id = {l: i for i, l in enumerate(label_list)}
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=True,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForRelationExtraction.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
# Tokenizer check: this script requires a fast tokenizer.
if not isinstance(tokenizer, PreTrainedTokenizerFast):
raise ValueError(
"This example script only works for models that have a fast tokenizer. Checkout the big table of models "
"at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this "
"requirement"
)
# Preprocessing the dataset
# Padding strategy
padding = "max_length" if data_args.pad_to_max_length else False
if training_args.do_train:
if "train" not in datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if "validation" not in datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = datasets["validation"]
if data_args.max_val_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
if training_args.do_predict:
if "test" not in datasets:
raise ValueError("--do_predict requires a test dataset")
test_dataset = datasets["test"]
if data_args.max_test_samples is not None:
test_dataset = test_dataset.select(range(data_args.max_test_samples))
# Data collator
data_collator = DataCollatorForKeyValueExtraction(
tokenizer,
pad_to_multiple_of=8 if training_args.fp16 else None,
padding=padding,
max_length=512,
)
def compute_metrics(p):
pred_relations, gt_relations = p
score = re_score(pred_relations, gt_relations, mode="boundaries")
return score
# Initialize our Trainer
trainer = XfunReTrainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
# Training
if training_args.do_train:
checkpoint = last_checkpoint if last_checkpoint else None
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
trainer.save_model() # Saves the tokenizer too for easy upload
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
def _mp_fn(index):
# For xla_spawn (TPUs)
main() | null |
185,205 | from __future__ import print_function
from bleu.bleu import Bleu
from meteor.meteor import Meteor
from rouge.rouge import Rouge
from cider.cider import Cider
from collections import defaultdict
from argparse import ArgumentParser
import string
import sys
_tok_dict = {"(": "-lrb-", ")": "-rrb-",
"[": "-lsb-", "]": "-rsb-",
"{": "-lcb-", "}": "-rcb-",
"[UNK]": "UNK", '&': '&', '<': '<', '>': '>'}
def _is_digit(w):
for ch in w:
if not(ch.isdigit() or ch == ','):
return False
return True
def fix_tokenization(text):
input_tokens = text.split()
output_tokens = []
has_left_quote = False
has_left_single_quote = False
i = 0
prev_dash = False
while i < len(input_tokens):
tok = input_tokens[i]
flag_prev_dash = False
if tok in _tok_dict.keys():
output_tokens.append(_tok_dict[tok])
i += 1
elif tok == "\"":
if has_left_quote:
output_tokens.append("''")
else:
output_tokens.append("``")
has_left_quote = not has_left_quote
i += 1
elif tok == "'" and len(output_tokens) > 0 and output_tokens[-1].endswith("n") and i < len(input_tokens) - 1 and input_tokens[i + 1] == "t":
output_tokens[-1] = output_tokens[-1][:-1]
output_tokens.append("n't")
i += 2
elif tok == "'" and i < len(input_tokens) - 1 and input_tokens[i + 1] in ("s", "d", "ll"):
output_tokens.append("'"+input_tokens[i + 1])
i += 2
elif tok == "'":
if has_left_single_quote:
output_tokens.append("'")
else:
output_tokens.append("`")
has_left_single_quote = not has_left_single_quote
i += 1
elif tok == "." and i < len(input_tokens) - 2 and input_tokens[i + 1] == "." and input_tokens[i + 2] == ".":
output_tokens.append("...")
i += 3
elif tok == "," and len(output_tokens) > 0 and _is_digit(output_tokens[-1]) and i < len(input_tokens) - 1 and _is_digit(input_tokens[i + 1]):
# $ 3 , 000 -> $ 3,000
output_tokens[-1] += ','+input_tokens[i + 1]
i += 2
elif tok == "." and len(output_tokens) > 0 and output_tokens[-1].isdigit() and i < len(input_tokens) - 1 and input_tokens[i + 1].isdigit():
# 3 . 03 -> $ 3.03
output_tokens[-1] += '.'+input_tokens[i + 1]
i += 2
elif tok == "." and len(output_tokens) > 0 and len(output_tokens[-1]) == 1 and output_tokens[-1].isupper() and i < len(input_tokens) - 2 and len(input_tokens[i + 1]) == 1 and input_tokens[i + 1].isupper() and input_tokens[i + 2] == '.':
# U . N . -> U.N.
k = i+3
while k+2 < len(input_tokens):
if len(input_tokens[k + 1]) == 1 and input_tokens[k + 1].isupper() and input_tokens[k + 2] == '.':
k += 2
else:
break
output_tokens[-1] += ''.join(input_tokens[i:k])
i += 2
elif tok == "-":
if i < len(input_tokens) - 1 and input_tokens[i + 1] == "-":
output_tokens.append("--")
i += 2
elif i == len(input_tokens) - 1 or i == 0:
output_tokens.append("-")
i += 1
elif output_tokens[-1] not in string.punctuation and input_tokens[i + 1][0] not in string.punctuation:
output_tokens[-1] += "-"
i += 1
flag_prev_dash = True
else:
output_tokens.append("-")
i += 1
elif prev_dash and len(output_tokens) > 0 and tok[0] not in string.punctuation:
output_tokens[-1] += tok
i += 1
else:
output_tokens.append(tok)
i += 1
prev_dash = flag_prev_dash
return " ".join(output_tokens) | null |
185,206 | from __future__ import print_function
from bleu.bleu import Bleu
from meteor.meteor import Meteor
from rouge.rouge import Rouge
from cider.cider import Cider
from collections import defaultdict
from argparse import ArgumentParser
import string
import sys
def detokenize(tk_list):
r_list = []
for tk in tk_list:
if tk.startswith('##') and len(r_list) > 0:
r_list[-1] = r_list[-1] + tk[2:]
else:
r_list.append(tk)
return r_list
class QGEvalCap:
def __init__(self, gts, res):
self.gts = gts
self.res = res
def evaluate(self):
output = []
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(), "METEOR"),
(Rouge(), "ROUGE_L"),
# (Cider(), "CIDEr")
]
# =================================================
# Compute scores
# =================================================
for scorer, method in scorers:
# print 'computing %s score...'%(scorer.method())
score, scores = scorer.compute_score(self.gts, self.res)
if type(method) == list:
for sc, scs, m in zip(score, scores, method):
print("%s: %0.5f" % (m, sc))
output.append(sc)
else:
print("%s: %0.5f" % (method, score))
output.append(score)
return output
class QGEvalCap:
def __init__(self, gts, res):
self.gts = gts
self.res = res
def evaluate(self):
output = []
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(), "METEOR"),
(Rouge(), "ROUGE_L"),
# (Cider(), "CIDEr")
]
# =================================================
# Compute scores
# =================================================
for scorer, method in scorers:
# print 'computing %s score...'%(scorer.method())
score, scores = scorer.compute_score(self.gts, self.res)
if type(method) == list:
for sc, scs, m in zip(score, scores, method):
print("%s: %0.5f" % (m, sc))
output.append(sc)
else:
print("%s: %0.5f" % (method, score))
output.append(score)
return output
The provided code snippet includes necessary dependencies for implementing the `eval` function. Write a Python function `def eval(out_file, src_file, tgt_file, isDIn=False, num_pairs=500)` to solve the following problem:
Given a filename, calculate the metric scores for that prediction file isDin: boolean value to check whether input file is DirectIn.txt
Here is the function:
def eval(out_file, src_file, tgt_file, isDIn=False, num_pairs=500):
"""
Given a filename, calculate the metric scores for that prediction file
isDin: boolean value to check whether input file is DirectIn.txt
"""
pairs = []
with open(src_file, 'r') as infile:
for line in infile:
pair = {}
pair['tokenized_sentence'] = line[:-1].strip().lower()
pairs.append(pair)
with open(tgt_file, "r") as infile:
cnt = 0
for line in infile:
pairs[cnt]['tokenized_question'] = " ".join(
detokenize(line[:-1].strip().split())).lower()
cnt += 1
output = []
with open(out_file, 'r') as infile:
for line in infile:
line = line[:-1].strip().lower()
output.append(line)
for idx, pair in enumerate(pairs):
pair['prediction'] = output[idx]
# eval
from eval import QGEvalCap
import json
from json import encoder
encoder.FLOAT_REPR = lambda o: format(o, '.4f')
res = defaultdict(lambda: [])
gts = defaultdict(lambda: [])
for pair in pairs[:]:
key = pair['tokenized_sentence']
res[key] = [pair['prediction'].encode('utf-8')]
# gts
gts[key].append(pair['tokenized_question'].encode('utf-8'))
QGEval = QGEvalCap(gts, res)
return QGEval.evaluate() | Given a filename, calculate the metric scores for that prediction file isDin: boolean value to check whether input file is DirectIn.txt |
185,207 | from __future__ import print_function
from bleu.bleu import Bleu
from meteor.meteor import Meteor
from rouge.rouge import Rouge
from cider.cider import Cider
from collections import defaultdict
from argparse import ArgumentParser
import string
import sys
def fix_tokenization(text):
input_tokens = text.split()
output_tokens = []
has_left_quote = False
has_left_single_quote = False
i = 0
prev_dash = False
while i < len(input_tokens):
tok = input_tokens[i]
flag_prev_dash = False
if tok in _tok_dict.keys():
output_tokens.append(_tok_dict[tok])
i += 1
elif tok == "\"":
if has_left_quote:
output_tokens.append("''")
else:
output_tokens.append("``")
has_left_quote = not has_left_quote
i += 1
elif tok == "'" and len(output_tokens) > 0 and output_tokens[-1].endswith("n") and i < len(input_tokens) - 1 and input_tokens[i + 1] == "t":
output_tokens[-1] = output_tokens[-1][:-1]
output_tokens.append("n't")
i += 2
elif tok == "'" and i < len(input_tokens) - 1 and input_tokens[i + 1] in ("s", "d", "ll"):
output_tokens.append("'"+input_tokens[i + 1])
i += 2
elif tok == "'":
if has_left_single_quote:
output_tokens.append("'")
else:
output_tokens.append("`")
has_left_single_quote = not has_left_single_quote
i += 1
elif tok == "." and i < len(input_tokens) - 2 and input_tokens[i + 1] == "." and input_tokens[i + 2] == ".":
output_tokens.append("...")
i += 3
elif tok == "," and len(output_tokens) > 0 and _is_digit(output_tokens[-1]) and i < len(input_tokens) - 1 and _is_digit(input_tokens[i + 1]):
# $ 3 , 000 -> $ 3,000
output_tokens[-1] += ','+input_tokens[i + 1]
i += 2
elif tok == "." and len(output_tokens) > 0 and output_tokens[-1].isdigit() and i < len(input_tokens) - 1 and input_tokens[i + 1].isdigit():
# 3 . 03 -> $ 3.03
output_tokens[-1] += '.'+input_tokens[i + 1]
i += 2
elif tok == "." and len(output_tokens) > 0 and len(output_tokens[-1]) == 1 and output_tokens[-1].isupper() and i < len(input_tokens) - 2 and len(input_tokens[i + 1]) == 1 and input_tokens[i + 1].isupper() and input_tokens[i + 2] == '.':
# U . N . -> U.N.
k = i+3
while k+2 < len(input_tokens):
if len(input_tokens[k + 1]) == 1 and input_tokens[k + 1].isupper() and input_tokens[k + 2] == '.':
k += 2
else:
break
output_tokens[-1] += ''.join(input_tokens[i:k])
i += 2
elif tok == "-":
if i < len(input_tokens) - 1 and input_tokens[i + 1] == "-":
output_tokens.append("--")
i += 2
elif i == len(input_tokens) - 1 or i == 0:
output_tokens.append("-")
i += 1
elif output_tokens[-1] not in string.punctuation and input_tokens[i + 1][0] not in string.punctuation:
output_tokens[-1] += "-"
i += 1
flag_prev_dash = True
else:
output_tokens.append("-")
i += 1
elif prev_dash and len(output_tokens) > 0 and tok[0] not in string.punctuation:
output_tokens[-1] += tok
i += 1
else:
output_tokens.append(tok)
i += 1
prev_dash = flag_prev_dash
return " ".join(output_tokens)
class QGEvalCap:
def __init__(self, gts, res):
self.gts = gts
self.res = res
def evaluate(self):
output = []
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(), "METEOR"),
(Rouge(), "ROUGE_L"),
# (Cider(), "CIDEr")
]
# =================================================
# Compute scores
# =================================================
for scorer, method in scorers:
# print 'computing %s score...'%(scorer.method())
score, scores = scorer.compute_score(self.gts, self.res)
if type(method) == list:
for sc, scs, m in zip(score, scores, method):
print("%s: %0.5f" % (m, sc))
output.append(sc)
else:
print("%s: %0.5f" % (method, score))
output.append(score)
return output
class QGEvalCap:
def __init__(self, gts, res):
self.gts = gts
self.res = res
def evaluate(self):
output = []
scorers = [
(Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]),
(Meteor(), "METEOR"),
(Rouge(), "ROUGE_L"),
# (Cider(), "CIDEr")
]
# =================================================
# Compute scores
# =================================================
for scorer, method in scorers:
# print 'computing %s score...'%(scorer.method())
score, scores = scorer.compute_score(self.gts, self.res)
if type(method) == list:
for sc, scs, m in zip(score, scores, method):
print("%s: %0.5f" % (m, sc))
output.append(sc)
else:
print("%s: %0.5f" % (method, score))
output.append(score)
return output
The provided code snippet includes necessary dependencies for implementing the `eval` function. Write a Python function `def eval(out_file, src_file, tgt_file, isDIn=False, num_pairs=500)` to solve the following problem:
Given a filename, calculate the metric scores for that prediction file isDin: boolean value to check whether input file is DirectIn.txt
Here is the function:
def eval(out_file, src_file, tgt_file, isDIn=False, num_pairs=500):
"""
Given a filename, calculate the metric scores for that prediction file
isDin: boolean value to check whether input file is DirectIn.txt
"""
pairs = []
with open(src_file, 'r') as infile:
for line in infile:
pair = {}
pair['tokenized_sentence'] = line[:-1].strip().lower()
pairs.append(pair)
with open(tgt_file, "r") as infile:
cnt = 0
for line in infile:
pairs[cnt]['tokenized_question'] = line[:-1].strip()
cnt += 1
output = []
with open(out_file, 'r') as infile:
for line in infile:
line = fix_tokenization(line[:-1].strip()).lower()
output.append(line)
for idx, pair in enumerate(pairs):
pair['prediction'] = output[idx]
# eval
from eval import QGEvalCap
import json
from json import encoder
encoder.FLOAT_REPR = lambda o: format(o, '.4f')
res = defaultdict(lambda: [])
gts = defaultdict(lambda: [])
for pair in pairs[:]:
key = pair['tokenized_sentence']
res[key] = [pair['prediction'].encode('utf-8')]
# gts
gts[key].append(pair['tokenized_question'].encode('utf-8'))
QGEval = QGEvalCap(gts, res)
return QGEval.evaluate() | Given a filename, calculate the metric scores for that prediction file isDin: boolean value to check whether input file is DirectIn.txt |
185,208 | import torch
from torch.nn import DataParallel
from torch.cuda._utils import _get_device_index
from torch.nn.parallel._functions import Scatter
from itertools import chain
def scatter_imbalance(inputs, target_gpus, dim=0):
r"""
Slices tensors into approximately equal chunks and
distributes them across given GPUs. Duplicates
references to objects that are not tensors.
"""
def scatter_map(obj):
if isinstance(obj, torch.Tensor):
if (len(target_gpus) == 4) and (obj.size(dim) == 22):
return Scatter.apply(target_gpus, (4, 6, 6, 6), dim, obj)
if (len(target_gpus) == 4) and (obj.size(dim) == 60):
return Scatter.apply(target_gpus, (12, 16, 16, 16), dim, obj)
elif (len(target_gpus) == 4) and (obj.size(dim) == 144):
return Scatter.apply(target_gpus, (24, 40, 40, 40), dim, obj)
elif (len(target_gpus) == 8) and (obj.size(dim) == 46):
return Scatter.apply(target_gpus, (4, 6, 6, 6, 6, 6, 6, 6), dim, obj)
elif (len(target_gpus) == 8) and (obj.size(dim) == 62):
return Scatter.apply(target_gpus, (6, 8, 8, 8, 8, 8, 8, 8), dim, obj)
elif (len(target_gpus) == 8) and (obj.size(dim) == 94):
return Scatter.apply(target_gpus, (10, 12, 12, 12, 12, 12, 12, 12), dim, obj)
elif (len(target_gpus) == 8) and (obj.size(dim) == 110):
return Scatter.apply(target_gpus, (12, 14, 14, 14, 14, 14, 14, 14), dim, obj)
elif (len(target_gpus) == 8) and (obj.size(dim) == 118):
return Scatter.apply(target_gpus, (13, 15, 15, 15, 15, 15, 15, 15), dim, obj)
elif (len(target_gpus) == 8) and (obj.size(dim) == 126):
return Scatter.apply(target_gpus, (14, 16, 16, 16, 16, 16, 16, 16), dim, obj)
elif (len(target_gpus) == 8) and (obj.size(dim) == 134):
return Scatter.apply(target_gpus, (15, 17, 17, 17, 17, 17, 17, 17), dim, obj)
elif (len(target_gpus) == 8) and (obj.size(dim) == 142):
return Scatter.apply(target_gpus, (16, 18, 18, 18, 18, 18, 18, 18), dim, obj)
elif (len(target_gpus) == 16) and (obj.size(dim) == 222):
return Scatter.apply(target_gpus, (12, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14), dim, obj)
return Scatter.apply(target_gpus, None, dim, obj)
if isinstance(obj, tuple) and len(obj) > 0:
return list(zip(*map(scatter_map, obj)))
if isinstance(obj, list) and len(obj) > 0:
return list(map(list, zip(*map(scatter_map, obj))))
if isinstance(obj, dict) and len(obj) > 0:
return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
return [obj for targets in target_gpus]
# After scatter_map is called, a scatter_map cell will exist. This cell
# has a reference to the actual function scatter_map, which has references
# to a closure that has a reference to the scatter_map cell (because the
# fn is recursive). To avoid this reference cycle, we set the function to
# None, clearing the cell
try:
return scatter_map(inputs)
finally:
scatter_map = None
The provided code snippet includes necessary dependencies for implementing the `scatter_kwargs_imbalance` function. Write a Python function `def scatter_kwargs_imbalance(inputs, kwargs, target_gpus, dim=0)` to solve the following problem:
r"""Scatter with support for kwargs dictionary
Here is the function:
def scatter_kwargs_imbalance(inputs, kwargs, target_gpus, dim=0):
r"""Scatter with support for kwargs dictionary"""
inputs = scatter_imbalance(inputs, target_gpus, dim) if inputs else []
kwargs = scatter_imbalance(kwargs, target_gpus, dim) if kwargs else []
if len(inputs) < len(kwargs):
inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
elif len(kwargs) < len(inputs):
kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
inputs = tuple(inputs)
kwargs = tuple(kwargs)
return inputs, kwargs | r"""Scatter with support for kwargs dictionary |
185,209 | import os
import logging
import shutil
import tempfile
import json
from urllib.parse import urlparse
from pathlib import Path
from typing import Optional, Tuple, Union, IO, Callable, Set
from hashlib import sha256
from functools import wraps
from tqdm import tqdm
import boto3
from botocore.exceptions import ClientError
import requests
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
The provided code snippet includes necessary dependencies for implementing the `filename_to_url` function. Write a Python function `def filename_to_url(filename: str, cache_dir: Union[str, Path] = None) -> Tuple[str, str]` to solve the following problem:
Return the url and etag (which may be ``None``) stored for `filename`. Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.
Here is the function:
def filename_to_url(filename: str, cache_dir: Union[str, Path] = None) -> Tuple[str, str]:
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise FileNotFoundError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise FileNotFoundError("file {} not found".format(meta_path))
with open(meta_path) as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag | Return the url and etag (which may be ``None``) stored for `filename`. Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist. |
185,210 | import os
import logging
import shutil
import tempfile
import json
from urllib.parse import urlparse
from pathlib import Path
from typing import Optional, Tuple, Union, IO, Callable, Set
from hashlib import sha256
from functools import wraps
from tqdm import tqdm
import boto3
from botocore.exceptions import ClientError
import requests
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
def get_from_cache(url: str, cache_dir: Union[str, Path] = None) -> str:
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
The provided code snippet includes necessary dependencies for implementing the `cached_path` function. Write a Python function `def cached_path(url_or_filename: Union[str, Path], cache_dir: Union[str, Path] = None) -> str` to solve the following problem:
Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path.
Here is the function:
def cached_path(url_or_filename: Union[str, Path], cache_dir: Union[str, Path] = None) -> str:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise FileNotFoundError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename)) | Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path. |
185,214 | import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
from collections import defaultdict
from torch._six import container_abcs
from copy import deepcopy
from itertools import chain
def warmup_cosine(x, warmup=0.002):
if x < warmup:
return x/warmup
return 0.5 * (1.0 + torch.cos(math.pi * x)) | null |
185,215 | import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
from collections import defaultdict
from torch._six import container_abcs
from copy import deepcopy
from itertools import chain
def warmup_constant(x, warmup=0.002):
if x < warmup:
return x/warmup
return 1.0 | null |
185,216 | import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
from collections import defaultdict
from torch._six import container_abcs
from copy import deepcopy
from itertools import chain
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x/warmup
return max((x-1.)/(warmup-1.), 0) | null |
185,217 | import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
from torch.nn.utils import clip_grad_norm_
from collections import defaultdict
from torch._six import container_abcs
from copy import deepcopy
from itertools import chain
def find_state_dict_subset_finetune(org_state_dict, org_name_list, no_decay, param_optimizer):
# only use the bert encoder and embeddings
want_name_set = set()
for n in org_name_list:
if ('bert.encoder' in n) or ('bert.embeddings' in n):
want_name_set.add(n)
# original: name to pid, pid to name
org_grouped_names = [[n for n in org_name_list if not any(nd in n for nd in no_decay)],
[n for n in org_name_list if any(nd in n for nd in no_decay)]]
org_n2id, org_id2n = {}, {}
for ng, pg in zip(org_grouped_names, org_state_dict['param_groups']):
for n, pid in zip(ng, pg['params']):
org_n2id[n] = pid
org_id2n[pid] = n
# group by: whether pretrained; whether weight decay
g_np_list = [
[(n, p) for n, p in param_optimizer if n in want_name_set and not any(
nd in n for nd in no_decay)],
[(n, p) for n, p in param_optimizer if n in want_name_set and any(
nd in n for nd in no_decay)],
[(n, p) for n, p in param_optimizer if n not in want_name_set and not any(
nd in n for nd in no_decay)],
[(n, p) for n, p in param_optimizer if n not in want_name_set and any(
nd in n for nd in no_decay)],
]
optimizer_grouped_parameters = [
{'params': [p for n, p in g_np_list[0]], 'weight_decay': 0.01},
{'params': [p for n, p in g_np_list[1]], 'weight_decay': 0.0},
{'params': [p for n, p in g_np_list[2]], 'weight_decay': 0.01},
{'params': [p for n, p in g_np_list[3]], 'weight_decay': 0.0}
]
new_state_dict = {}
# regroup the original state_dict
new_state_dict['state'] = {pid: v for pid, v in org_state_dict['state'].items(
) if pid not in org_id2n or org_id2n[pid] in want_name_set}
# reset step count to 0
for pid, st in new_state_dict['state'].items():
st['step'] = 0
def _filter_group(group, g_np_list, i, org_n2id):
packed = {k: v for k, v in group.items() if k != 'params'}
packed['params'] = [pid for pid in group['params']
if pid in org_id2n and org_id2n[pid] in want_name_set]
assert len(g_np_list[i]) == len(packed['params'])
# keep them the same order
packed['params'] = [org_n2id[n] for n, p in g_np_list[i]]
return packed
new_state_dict['param_groups'] = [_filter_group(
g, g_np_list, i, org_n2id) for i, g in enumerate(org_state_dict['param_groups'])]
return new_state_dict, optimizer_grouped_parameters | null |
185,218 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import unicodedata
import os
import logging
from .file_utils import cached_path
The provided code snippet includes necessary dependencies for implementing the `load_vocab` function. Write a Python function `def load_vocab(vocab_file)` to solve the following problem:
Loads a vocabulary file into a dictionary.
Here is the function:
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
# mapping unused tokens to special tokens
extra_map = {}
extra_map['[unused1]'] = '[X_SEP]'
for i in range(10):
extra_map['[unused{}]'.format(i+2)] = '[SEP_{}]'.format(i)
extra_map['[unused12]'] = '[S2S_SEP]'
extra_map['[unused13]'] = '[S2S_CLS]'
extra_map['[unused14]'] = '[L2R_SEP]'
extra_map['[unused15]'] = '[L2R_CLS]'
extra_map['[unused16]'] = '[R2L_SEP]'
extra_map['[unused17]'] = '[R2L_CLS]'
extra_map['[unused18]'] = '[S2S_SOS]'
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
if token in extra_map:
token = extra_map[token]
vocab[token] = index
index += 1
return vocab | Loads a vocabulary file into a dictionary. |
185,223 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import numpy as np
from scipy.stats import truncnorm
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
import torch.nn.functional as F
from .file_utils import cached_path
from .loss import LabelSmoothingLoss
The provided code snippet includes necessary dependencies for implementing the `gelu` function. Write a Python function `def gelu(x)` to solve the following problem:
Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Here is the function:
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) | Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) |
185,224 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import math
import logging
import tarfile
import tempfile
import shutil
import numpy as np
from scipy.stats import truncnorm
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
import torch.nn.functional as F
from .file_utils import cached_path
from .loss import LabelSmoothingLoss
def swish(x):
return x * torch.sigmoid(x) | null |
185,225 | from random import randint, shuffle, choice
from random import random as rand
import math
import torch
from biunilm.loader_utils import get_random_word, batch_list_to_batch_tensors, Pipeline
def truncate_tokens_pair(tokens_a, tokens_b, max_len, max_len_a=0, max_len_b=0, trunc_seg=None, always_truncate_tail=False):
num_truncated_a = [0, 0]
num_truncated_b = [0, 0]
while True:
if len(tokens_a) + len(tokens_b) <= max_len:
break
if (max_len_a > 0) and len(tokens_a) > max_len_a:
trunc_tokens = tokens_a
num_truncated = num_truncated_a
elif (max_len_b > 0) and len(tokens_b) > max_len_b:
trunc_tokens = tokens_b
num_truncated = num_truncated_b
elif trunc_seg:
# truncate the specified segment
if trunc_seg == 'a':
trunc_tokens = tokens_a
num_truncated = num_truncated_a
else:
trunc_tokens = tokens_b
num_truncated = num_truncated_b
else:
# truncate the longer segment
if len(tokens_a) > len(tokens_b):
trunc_tokens = tokens_a
num_truncated = num_truncated_a
else:
trunc_tokens = tokens_b
num_truncated = num_truncated_b
# whether always truncate source sequences
if (not always_truncate_tail) and (rand() < 0.5):
del trunc_tokens[0]
num_truncated[0] += 1
else:
trunc_tokens.pop()
num_truncated[1] += 1
return num_truncated_a, num_truncated_b | null |
185,226 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import glob
import argparse
import math
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler
from torch.utils.data.distributed import DistributedSampler
import random
import pickle
from pytorch_pretrained_bert.tokenization import BertTokenizer, WhitespaceTokenizer
from pytorch_pretrained_bert.modeling import BertForSeq2SeqDecoder
from pytorch_pretrained_bert.optimization import BertAdam, warmup_linear
from nn.data_parallel import DataParallelImbalance
import biunilm.seq2seq_loader as seq2seq_loader
def detokenize(tk_list):
r_list = []
for tk in tk_list:
if tk.startswith('##') and len(r_list) > 0:
r_list[-1] = r_list[-1] + tk[2:]
else:
r_list.append(tk)
return r_list | null |
185,227 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import glob
import argparse
import math
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler
from torch.utils.data.distributed import DistributedSampler
import random
import pickle
from pytorch_pretrained_bert.tokenization import BertTokenizer, WhitespaceTokenizer
from pytorch_pretrained_bert.modeling import BertForSeq2SeqDecoder
from pytorch_pretrained_bert.optimization import BertAdam, warmup_linear
from nn.data_parallel import DataParallelImbalance
import biunilm.seq2seq_loader as seq2seq_loader
def ascii_print(text):
text = text.encode("ascii", "ignore")
print(text) | null |
185,228 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import glob
import math
import json
import argparse
import random
from pathlib import Path
from tqdm import tqdm, trange
import numpy as np
import torch
from torch.utils.data import RandomSampler
from torch.utils.data.distributed import DistributedSampler
from pytorch_pretrained_bert.tokenization import BertTokenizer, WhitespaceTokenizer
from pytorch_pretrained_bert.modeling import BertForPreTrainingLossMask
from pytorch_pretrained_bert.optimization import BertAdam, warmup_linear
from nn.data_parallel import DataParallelImbalance
import biunilm.seq2seq_loader as seq2seq_loader
import torch.distributed as dist
def _get_max_epoch_model(output_dir):
fn_model_list = glob.glob(os.path.join(output_dir, "model.*.bin"))
fn_optim_list = glob.glob(os.path.join(output_dir, "optim.*.bin"))
if (not fn_model_list) or (not fn_optim_list):
return None
both_set = set([int(Path(fn).stem.split('.')[-1]) for fn in fn_model_list]
) & set([int(Path(fn).stem.split('.')[-1]) for fn in fn_optim_list])
if both_set:
return max(both_set)
else:
return None | null |
185,229 | from random import randint, shuffle
from random import random as rand
import numpy as np
import torch
import torch.utils.data
def get_random_word(vocab_words):
i = randint(0, len(vocab_words)-1)
return vocab_words[i] | null |
185,230 | from random import randint, shuffle
from random import random as rand
import numpy as np
import torch
import torch.utils.data
def batch_list_to_batch_tensors(batch):
batch_tensors = []
for x in zip(*batch):
if x[0] is None:
batch_tensors.append(None)
elif isinstance(x[0], torch.Tensor):
batch_tensors.append(torch.stack(x))
else:
batch_tensors.append(torch.tensor(x, dtype=torch.long))
return batch_tensors | null |
185,231 | from random import randint, shuffle
from random import random as rand
import numpy as np
import torch
import torch.utils.data
def _get_word_split_index(tokens, st, end):
split_idx = []
i = st
while i < end:
if (not tokens[i].startswith('##')) or (i == st):
split_idx.append(i)
i += 1
split_idx.append(end)
return split_idx | null |
185,232 | from random import randint, shuffle
from random import random as rand
import numpy as np
import torch
import torch.utils.data
def _expand_whole_word(tokens, st, end):
new_st, new_end = st, end
while (new_st >= 0) and tokens[new_st].startswith('##'):
new_st -= 1
while (new_end < len(tokens)) and tokens[new_end].startswith('##'):
new_end += 1
return new_st, new_end | null |
185,233 | import pickle
import math
import argparse
import glob
from pathlib import Path
from tqdm import tqdm
import unicodedata
from pytorch_pretrained_bert.tokenization import BertTokenizer
def read_traces_from_file(file_name):
with open(file_name, "rb") as fin:
meta = pickle.load(fin)
num_samples = meta["num_samples"]
samples = []
for _ in range(num_samples):
samples.append(pickle.load(fin))
return samples | null |
185,234 | import pickle
import math
import argparse
import glob
from pathlib import Path
from tqdm import tqdm
import unicodedata
from pytorch_pretrained_bert.tokenization import BertTokenizer
def get_best_sequence(sample, eos_id, pad_id, length_penalty=None, alpha=None, expect=None, min_len=None):
# if not any((length_penalty, alpha, expect, min_len)):
# raise ValueError(
# "You can only specify length penalty or alpha, but not both.")
scores = sample["scores"]
wids_list = sample["wids"]
ptrs = sample["ptrs"]
last_frame_id = len(scores) - 1
for i, wids in enumerate(wids_list):
if all(wid in (eos_id, pad_id) for wid in wids):
last_frame_id = i
break
while all(wid == pad_id for wid in wids_list[last_frame_id]):
last_frame_id -= 1
max_score = -math.inf
frame_id = -1
pos_in_frame = -1
for fid in range(last_frame_id + 1):
for i, wid in enumerate(wids_list[fid]):
if fid <= last_frame_id and scores[fid][i] >= 0:
# skip paddings
continue
if (wid in (eos_id, pad_id)) or fid == last_frame_id:
s = scores[fid][i]
if length_penalty:
if expect:
s -= length_penalty * math.fabs(fid+1 - expect)
else:
s += length_penalty * (fid + 1)
elif alpha:
s = s / math.pow((5 + fid + 1) / 6.0, alpha)
if s > max_score:
# if (frame_id != -1) and min_len and (fid+1 < min_len):
# continue
max_score = s
frame_id = fid
pos_in_frame = i
if frame_id == -1:
seq = []
else:
seq = [wids_list[frame_id][pos_in_frame]]
for fid in range(frame_id, 0, -1):
pos_in_frame = ptrs[fid][pos_in_frame]
seq.append(wids_list[fid - 1][pos_in_frame])
seq.reverse()
return seq | null |
185,235 | import pickle
import math
import argparse
import glob
from pathlib import Path
from tqdm import tqdm
import unicodedata
from pytorch_pretrained_bert.tokenization import BertTokenizer
def detokenize(tk_list):
r_list = []
for tk in tk_list:
if tk.startswith('##') and len(r_list) > 0:
r_list[-1] = r_list[-1] + tk[2:]
else:
r_list.append(tk)
return r_list | null |
185,236 | import pickle
import math
import argparse
import glob
from pathlib import Path
from tqdm import tqdm
import unicodedata
from pytorch_pretrained_bert.tokenization import BertTokenizer
def simple_postprocess(tk_list):
# truncate duplicate punctuations
while tk_list and len(tk_list) > 4 and len(tk_list[-1]) == 1 and unicodedata.category(tk_list[-1]).startswith('P') and all(it == tk_list[-1] for it in tk_list[-4:]):
tk_list = tk_list[:-3]
return tk_list | null |
185,238 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import glob
import json
import argparse
import math
import string
from multiprocessing import Pool, cpu_count
from tqdm import tqdm, trange
from pathlib import Path
import numpy as np
import rouge
import time
import tempfile
import shutil
from pytorch_pretrained_bert.tokenization import BertTokenizer
from cnndm.bs_pyrouge import Rouge155
def rouge_results_to_str(results_dict):
return ">> ROUGE-F(1/2/l): {:.2f}/{:.2f}/{:.2f}\nROUGE-R(1/2/3/l): {:.2f}/{:.2f}/{:.2f}\n".format(
results_dict["rouge_1_f_score"] * 100,
results_dict["rouge_2_f_score"] * 100,
results_dict["rouge_l_f_score"] * 100,
results_dict["rouge_1_recall"] * 100,
results_dict["rouge_2_recall"] * 100,
results_dict["rouge_l_recall"] * 100
) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.