id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
184,720 | import logging
import numpy as np
import torch
import os
import itertools
from fairseq.data import FairseqDataset, data_utils
from fairseq.data import (
AppendTokenDataset,
ConcatDataset,
PrependTokenDataset,
data_utils,
indexed_dataset,
)
logger = logging.getLogger(__name__)
def collate(
samples,
pad_idx,
eos_idx,
left_pad_source=True,
left_pad_target=False,
input_feeding=True,
pad_to_length=None,
pad_to_multiple=1,
):
if len(samples) == 0:
return {}
def merge(key, left_pad, move_eos_to_beginning=False, pad_to_length=None):
return data_utils.collate_tokens(
[s[key] for s in samples],
pad_idx,
None,
left_pad,
move_eos_to_beginning,
pad_to_length=pad_to_length,
pad_to_multiple=pad_to_multiple,
)
def check_alignment(alignment, src_len, tgt_len):
if alignment is None or len(alignment) == 0:
return False
if (
alignment[:, 0].max().item() >= src_len - 1
or alignment[:, 1].max().item() >= tgt_len - 1
):
logger.warning("alignment size mismatch found, skipping alignment!")
return False
return True
def compute_alignment_weights(alignments):
"""
Given a tensor of shape [:, 2] containing the source-target indices
corresponding to the alignments, a weight vector containing the
inverse frequency of each target index is computed.
For e.g. if alignments = [[5, 7], [2, 3], [1, 3], [4, 2]], then
a tensor containing [1., 0.5, 0.5, 1] should be returned (since target
index 3 is repeated twice)
"""
align_tgt = alignments[:, 1]
_, align_tgt_i, align_tgt_c = torch.unique(
align_tgt, return_inverse=True, return_counts=True
)
align_weights = align_tgt_c[align_tgt_i[np.arange(len(align_tgt))]]
return 1.0 / align_weights.float()
id = torch.LongTensor([s["id"] for s in samples])
src_tokens = merge(
"source",
left_pad=left_pad_source,
pad_to_length=pad_to_length["source"] if pad_to_length is not None else None,
)
ref_tokens = merge(
"reference",
left_pad=left_pad_source,
pad_to_length=pad_to_length["source"] if pad_to_length is not None else None,
)
# sort by descending source length
src_lengths = torch.LongTensor(
[s["source"].ne(pad_idx).long().sum() for s in samples]
)
ref_lengths = torch.LongTensor(
[s["reference"].ne(pad_idx).long().sum() for s in samples]
)
src_lengths, sort_order = src_lengths.sort(descending=True)
id = id.index_select(0, sort_order)
src_tokens = src_tokens.index_select(0, sort_order)
ref_lengths = ref_lengths.index_select(0, sort_order)
ref_tokens = ref_tokens.index_select(0, sort_order)
prev_output_tokens = None
target = None
if samples[0].get("target", None) is not None:
target = merge(
"target",
left_pad=left_pad_target,
pad_to_length=pad_to_length["target"]
if pad_to_length is not None
else None,
)
target = target.index_select(0, sort_order)
tgt_lengths = torch.LongTensor(
[s["target"].ne(pad_idx).long().sum() for s in samples]
).index_select(0, sort_order)
ntokens = tgt_lengths.sum().item()
if samples[0].get("prev_output_tokens", None) is not None:
prev_output_tokens = merge("prev_output_tokens", left_pad=left_pad_target)
elif input_feeding:
# we create a shifted version of targets for feeding the
# previous output token(s) into the next decoder step
prev_output_tokens = merge(
"target",
left_pad=left_pad_target,
move_eos_to_beginning=True,
pad_to_length=pad_to_length["target"]
if pad_to_length is not None
else None,
)
else:
ntokens = src_lengths.sum().item()
batch = {
"id": id,
"nsentences": len(samples),
"ntokens": ntokens,
"net_input": {
"src_tokens": src_tokens,
"src_lengths": src_lengths,
},
"target": target,
"ref_tokens": ref_tokens,
"ref_lengths": ref_lengths,
}
if prev_output_tokens is not None:
batch["net_input"]["prev_output_tokens"] = prev_output_tokens.index_select(
0, sort_order
)
if samples[0].get("alignment", None) is not None:
bsz, tgt_sz = batch["target"].shape
src_sz = batch["net_input"]["src_tokens"].shape[1]
offsets = torch.zeros((len(sort_order), 2), dtype=torch.long)
offsets[:, 1] += torch.arange(len(sort_order), dtype=torch.long) * tgt_sz
if left_pad_source:
offsets[:, 0] += src_sz - src_lengths
if left_pad_target:
offsets[:, 1] += tgt_sz - tgt_lengths
alignments = [
alignment + offset
for align_idx, offset, src_len, tgt_len in zip(
sort_order, offsets, src_lengths, tgt_lengths
)
for alignment in [samples[align_idx]["alignment"].view(-1, 2)]
if check_alignment(alignment, src_len, tgt_len)
]
if len(alignments) > 0:
alignments = torch.cat(alignments, dim=0)
align_weights = compute_alignment_weights(alignments)
batch["alignments"] = alignments
batch["align_weights"] = align_weights
if samples[0].get("constraints", None) is not None:
# Collate the packed constraints across the samples, padding to
# the length of the longest sample.
lens = [sample.get("constraints").size(0) for sample in samples]
max_len = max(lens)
constraints = torch.zeros((len(samples), max(lens))).long()
for i, sample in enumerate(samples):
constraints[i, 0 : lens[i]] = samples[i].get("constraints")
batch["constraints"] = constraints.index_select(0, sort_order)
return batch | null |
184,721 | import itertools
import logging
import io
import os
import sys
import time
from pathlib import Path
from typing import Any, List, Optional, Union, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from fairseq.data import data_utils, Dictionary
from fairseq.data.fairseq_dataset import FairseqDataset
from fairseq.data.audio.audio_utils import (
read_from_stored_zip,
is_sf_audio_data,
)
FEATURE_OR_SF_AUDIO_FILE_EXTENSIONS = {".npy", ".wav", ".flac", ".ogg"}
The provided code snippet includes necessary dependencies for implementing the `parse_path` function. Write a Python function `def parse_path(path: str) -> Tuple[str, List[int]]` to solve the following problem:
Parse data path which is either a path to 1. a .npy/.wav/.flac/.ogg file 2. a stored ZIP file with slicing info: "[zip_path]:[offset]:[length]" Args: path (str): the data path to parse Returns: file_path (str): the file path slice_ptr (list of int): empty in case 1; byte offset and length for the slice in case 2
Here is the function:
def parse_path(path: str) -> Tuple[str, List[int]]:
"""Parse data path which is either a path to
1. a .npy/.wav/.flac/.ogg file
2. a stored ZIP file with slicing info: "[zip_path]:[offset]:[length]"
Args:
path (str): the data path to parse
Returns:
file_path (str): the file path
slice_ptr (list of int): empty in case 1;
byte offset and length for the slice in case 2
"""
if Path(path).suffix in FEATURE_OR_SF_AUDIO_FILE_EXTENSIONS:
_path, slice_ptr = path, []
else:
_path, *slice_ptr = path.split(":")
if not Path(_path).is_file():
raise FileNotFoundError(f"File not found: {_path}")
assert len(slice_ptr) in {0, 1, 2}, f"Invalid path: {path}"
slice_ptr = [int(i) for i in slice_ptr]
return _path, slice_ptr | Parse data path which is either a path to 1. a .npy/.wav/.flac/.ogg file 2. a stored ZIP file with slicing info: "[zip_path]:[offset]:[length]" Args: path (str): the data path to parse Returns: file_path (str): the file path slice_ptr (list of int): empty in case 1; byte offset and length for the slice in case 2 |
184,722 | import itertools
import logging
import io
import os
import sys
import time
from pathlib import Path
from typing import Any, List, Optional, Union, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from fairseq.data import data_utils, Dictionary
from fairseq.data.fairseq_dataset import FairseqDataset
from fairseq.data.audio.audio_utils import (
read_from_stored_zip,
is_sf_audio_data,
)
logger = logging.getLogger(__name__)
def load_audio(manifest_path, max_keep, min_keep, retry_times=5):
n_long, n_short = 0, 0
names, inds, sizes, chunk_names, chunk_indices = [], [], [], [], []
for i in range(retry_times):
with open(manifest_path) as f:
root = f.readline().strip()
for ind, line in enumerate(f):
items = line.strip().split("\t")
assert len(items) == 2, line
sz = int(items[1])
if min_keep is not None and sz < min_keep:
n_short += 1
elif max_keep is not None and sz > max_keep:
n_long += 1
else:
fname = items[0].split(":")
if len(fname) > 2:
if len(chunk_names) == 0 or fname[0] != chunk_names[-1]:
chunk_names.append(fname[0])
chunk_indices.append(len(names))
names.append(items[0])
inds.append(ind)
sizes.append(sz)
if len(names) == 0:
logger.warn(f"Fail to load manifest for the {i} time")
time.sleep(1)
continue
else:
break
tot = ind + 1
logger.info(
(
f"max_keep={max_keep}, min_keep={min_keep}, "
f"loaded {len(names)}, skipped {n_short} short and {n_long} long, "
f"longest-loaded={max(sizes)}, shortest-loaded={min(sizes)}"
)
)
return root, names, inds, tot, sizes, chunk_names, chunk_indices | null |
184,723 | import itertools
import logging
import io
import os
import sys
import time
from pathlib import Path
from typing import Any, List, Optional, Union, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from fairseq.data import data_utils, Dictionary
from fairseq.data.fairseq_dataset import FairseqDataset
from fairseq.data.audio.audio_utils import (
read_from_stored_zip,
is_sf_audio_data,
)
logger = logging.getLogger(__name__)
def load_label(label_path, inds, tot, retry_times=5):
for i in range(retry_times):
with open(label_path) as f:
labels = [line.rstrip() for line in f]
if len(labels) == 0:
logger.warn(f"Fail to load label for the {i} time")
time.sleep(1)
continue
else:
break
assert (
len(labels) == tot
), f"number of labels does not match ({len(labels)} != {tot})"
labels = [labels[i] for i in inds]
return labels | null |
184,724 | import itertools
import logging
import io
import os
import sys
import time
from pathlib import Path
from typing import Any, List, Optional, Union, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from fairseq.data import data_utils, Dictionary
from fairseq.data.fairseq_dataset import FairseqDataset
from fairseq.data.audio.audio_utils import (
read_from_stored_zip,
is_sf_audio_data,
)
logger = logging.getLogger(__name__)
def load_label_offset(label_path, inds, tot, retry_times=5):
for i in range(retry_times):
with open(label_path) as f:
code_lengths = [len(line.encode("utf-8")) for line in f]
if len(code_lengths) == 0:
logger.warn(f"Fail to load label for the {i} time")
time.sleep(1)
continue
else:
break
assert (
len(code_lengths) == tot
), f"number of labels does not match ({len(code_lengths)} != {tot})"
offsets = list(itertools.accumulate([0] + code_lengths))
offsets = [(offsets[i], offsets[i + 1]) for i in inds]
return offsets | null |
184,725 | import itertools
import logging
import io
import os
import sys
import time
from pathlib import Path
from typing import Any, List, Optional, Union, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from fairseq.data import data_utils, Dictionary
from fairseq.data.fairseq_dataset import FairseqDataset
from fairseq.data.audio.audio_utils import (
read_from_stored_zip,
is_sf_audio_data,
)
logger = logging.getLogger(__name__)
def verify_label_lengths(
audio_sizes,
audio_rate,
label_path,
label_rate,
inds,
tot,
tol=0.1, # tolerance in seconds
):
if label_rate < 0:
logger.info(f"{label_path} is sequence label. skipped")
return
with open(label_path) as f:
lengths = [len(line.rstrip().split()) for line in f]
assert len(lengths) == tot
lengths = [lengths[i] for i in inds]
num_invalid = 0
for i, ind in enumerate(inds):
dur_from_audio = audio_sizes[i] / audio_rate
dur_from_label = lengths[i] / label_rate
if abs(dur_from_audio - dur_from_label) > tol:
logger.warning(
(
f"audio and label duration differ too much "
f"(|{dur_from_audio} - {dur_from_label}| > {tol}) "
f"in line {ind+1} of {label_path}. Check if `label_rate` "
f"is correctly set (currently {label_rate}). "
f"num. of samples = {audio_sizes[i]}; "
f"label length = {lengths[i]}"
)
)
num_invalid += 1
if num_invalid > 0:
logger.warning(
f"total {num_invalid} (audio, label) pairs with mismatched lengths"
) | null |
184,726 | import itertools
import logging
import os
from fairseq.data import (
AppendTokenDataset,
LanguagePairDataset,
PrependTokenDataset,
StripTokenDataset,
TruncateDataset,
RandomCropDataset,
data_utils,
indexed_dataset,
)
from speechlm.data.concat_dataset import ConcatDataset
logger = logging.getLogger(__name__)
def load_langpair_dataset(
data_path,
split,
src,
src_dict,
tgt,
tgt_dict,
combine,
dataset_impl,
upsample_primary,
left_pad_source,
left_pad_target,
max_source_positions,
max_target_positions,
prepend_bos=False,
load_alignments=False,
truncate_source=False,
append_source_id=False,
num_buckets=0,
shuffle=True,
pad_to_multiple=1,
prepend_bos_src=None,
lang_format="[{}]",
input_feeding=True,
):
def split_exists(split, src, tgt, lang, data_path):
filename = os.path.join(data_path, "{}.{}-{}.{}".format(split, src, tgt, lang))
return indexed_dataset.dataset_exists(filename, impl=dataset_impl)
src_datasets = []
tgt_datasets = []
for k in itertools.count():
split_k = split + (str(k) if k > 0 else "")
# infer langcode
if split_exists(split_k, src, tgt, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, src, tgt))
elif split_exists(split_k, tgt, src, src, data_path):
prefix = os.path.join(data_path, "{}.{}-{}.".format(split_k, tgt, src))
else:
if k > 0:
break
else:
raise FileNotFoundError(
"Dataset not found: {} ({})".format(split, data_path)
)
src_dataset = data_utils.load_indexed_dataset(
prefix + src, src_dict, dataset_impl
)
if truncate_source:
src_dataset = AppendTokenDataset(
RandomCropDataset(
StripTokenDataset(src_dataset, src_dict.eos()),
max_source_positions - 1,
),
src_dict.eos(),
)
src_datasets.append(src_dataset)
tgt_dataset = data_utils.load_indexed_dataset(
prefix + tgt, tgt_dict, dataset_impl
)
if tgt_dataset is not None:
tgt_datasets.append(tgt_dataset)
logger.info(
"{} {} {}-{} {} examples".format(
data_path, split_k, src, tgt, len(src_datasets[-1])
)
)
if not combine:
break
assert len(src_datasets) == len(tgt_datasets) or len(tgt_datasets) == 0
if len(src_datasets) == 1:
src_dataset = src_datasets[0]
tgt_dataset = tgt_datasets[0] if len(tgt_datasets) > 0 else None
else:
sample_ratios = [1] * len(src_datasets)
sample_ratios[0] = upsample_primary
src_dataset = ConcatDataset(src_datasets, sample_ratios)
if len(tgt_datasets) > 0:
tgt_dataset = ConcatDataset(tgt_datasets, sample_ratios)
else:
tgt_dataset = None
if prepend_bos:
assert hasattr(src_dict, "bos_index") and hasattr(tgt_dict, "bos_index")
src_dataset = PrependTokenDataset(src_dataset, src_dict.bos())
if tgt_dataset is not None:
tgt_dataset = PrependTokenDataset(tgt_dataset, tgt_dict.bos())
elif prepend_bos_src is not None:
logger.info(f"prepending src bos: {prepend_bos_src}")
src_dataset = PrependTokenDataset(src_dataset, prepend_bos_src)
eos = None
if append_source_id:
src_dataset = AppendTokenDataset(
src_dataset, src_dict.index(lang_format.format(src))
)
if tgt_dataset is not None:
tgt_dataset = AppendTokenDataset(
tgt_dataset, tgt_dict.index(lang_format.format(tgt))
)
eos = tgt_dict.index(lang_format.format(tgt))
align_dataset = None
if load_alignments:
align_path = os.path.join(data_path, "{}.align.{}-{}".format(split, src, tgt))
if indexed_dataset.dataset_exists(align_path, impl=dataset_impl):
align_dataset = data_utils.load_indexed_dataset(
align_path, None, dataset_impl
)
tgt_dataset_sizes = tgt_dataset.sizes if tgt_dataset is not None else None
return LanguagePairDataset(
src_dataset,
src_dataset.sizes,
src_dict,
tgt_dataset,
tgt_dataset_sizes,
tgt_dict,
left_pad_source=left_pad_source,
left_pad_target=left_pad_target,
align_dataset=align_dataset,
eos=eos,
num_buckets=num_buckets,
shuffle=shuffle,
pad_to_multiple=pad_to_multiple,
input_feeding=input_feeding,
) | null |
184,727 | import ast
import logging
import math
import os
import sys
from argparse import Namespace
from itertools import chain
import numpy as np
import torch
from omegaconf import DictConfig
from fairseq import checkpoint_utils, options, scoring, tasks, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter, TimeMeter
def get_symbols_to_strip_from_output(generator):
if hasattr(generator, "symbols_to_strip_from_output"):
return generator.symbols_to_strip_from_output
else:
return {generator.eos} | null |
184,728 | import ast
import logging
import math
import os
import sys
from argparse import Namespace
from itertools import chain
import numpy as np
import torch
from omegaconf import DictConfig
from fairseq import checkpoint_utils, options, scoring, tasks, utils
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter, TimeMeter
def progress_bar(
iterator,
log_format: Optional[str] = None,
log_interval: int = 100,
log_file: Optional[str] = None,
epoch: Optional[int] = None,
prefix: Optional[str] = None,
tensorboard_logdir: Optional[str] = None,
default_log_format: str = "tqdm",
wandb_project: Optional[str] = None,
wandb_run_name: Optional[str] = None,
azureml_logging: Optional[bool] = False,
):
if log_format is None:
log_format = default_log_format
if log_file is not None:
handler = logging.FileHandler(filename=log_file)
logger.addHandler(handler)
if log_format == "tqdm" and not sys.stderr.isatty():
log_format = "simple"
if log_format == "json":
bar = JsonProgressBar(iterator, epoch, prefix, log_interval)
elif log_format == "none":
bar = NoopProgressBar(iterator, epoch, prefix)
elif log_format == "simple":
bar = SimpleProgressBar(iterator, epoch, prefix, log_interval)
elif log_format == "tqdm":
bar = TqdmProgressBar(iterator, epoch, prefix)
else:
raise ValueError("Unknown log format: {}".format(log_format))
if tensorboard_logdir:
try:
# [FB only] custom wrapper for TensorBoard
import palaas # noqa
from .fb_tbmf_wrapper import FbTbmfWrapper
bar = FbTbmfWrapper(bar, log_interval)
except ImportError:
bar = TensorboardProgressBarWrapper(bar, tensorboard_logdir)
if wandb_project:
bar = WandBProgressBarWrapper(bar, wandb_project, run_name=wandb_run_name)
if azureml_logging:
bar = AzureMLProgressBarWrapper(bar)
return bar
class TimeMeter(Meter):
"""Computes the average occurrence of some event per second"""
def __init__(
self,
init: int = 0,
n: int = 0,
round: Optional[int] = None,
):
self.round = round
self.reset(init, n)
def reset(self, init=0, n=0):
self.init = init
self.start = time.perf_counter()
self.n = n
self.i = 0
def update(self, val=1):
self.n = type_as(self.n, val) + val
self.i += 1
def state_dict(self):
return {
"init": self.elapsed_time,
"n": self.n,
"round": self.round,
}
def load_state_dict(self, state_dict):
if "start" in state_dict:
# backwards compatibility for old state_dicts
self.reset(init=state_dict["init"])
else:
self.reset(init=state_dict["init"], n=state_dict["n"])
self.round = state_dict.get("round", None)
def avg(self):
return self.n / self.elapsed_time
def elapsed_time(self):
return self.init + (time.perf_counter() - self.start)
def smoothed_value(self) -> float:
val = self.avg
if self.round is not None and val is not None:
val = safe_round(val, self.round)
return val
class StopwatchMeter(Meter):
"""Computes the sum/avg duration of some event in seconds"""
def __init__(self, round: Optional[int] = None):
self.round = round
self.sum = 0
self.n = 0
self.start_time = None
def start(self):
self.start_time = time.perf_counter()
def stop(self, n=1, prehook=None):
if self.start_time is not None:
if prehook is not None:
prehook()
delta = time.perf_counter() - self.start_time
self.sum = self.sum + delta
self.n = type_as(self.n, n) + n
def reset(self):
self.sum = 0 # cumulative time during which stopwatch was active
self.n = 0 # total n across all start/stop
self.start()
def state_dict(self):
return {
"sum": self.sum,
"n": self.n,
"round": self.round,
}
def load_state_dict(self, state_dict):
self.sum = state_dict["sum"]
self.n = state_dict["n"]
self.start_time = None
self.round = state_dict.get("round", None)
def avg(self):
return self.sum / self.n if self.n > 0 else self.sum
def elapsed_time(self):
if self.start_time is None:
return 0.0
return time.perf_counter() - self.start_time
def smoothed_value(self) -> float:
val = self.avg if self.sum > 0 else self.elapsed_time
if self.round is not None and val is not None:
val = safe_round(val, self.round)
return val
def _main(cfg: DictConfig, output_file):
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=output_file,
)
logger = logging.getLogger("fairseq_cli.generate")
utils.import_user_module(cfg.common)
if cfg.dataset.max_tokens is None and cfg.dataset.batch_size is None:
cfg.dataset.max_tokens = 12000
logger.info(cfg)
# Fix seed for stochastic decoding
if cfg.common.seed is not None and not cfg.generation.no_seed_provided:
np.random.seed(cfg.common.seed)
utils.set_torch_seed(cfg.common.seed)
use_cuda = torch.cuda.is_available() and not cfg.common.cpu
# Load dataset splits
task = tasks.setup_task(cfg.task)
# Set dictionaries
try:
src_dict = getattr(task, "source_dictionary", None)
except NotImplementedError:
src_dict = None
tgt_dict = task.target_dictionary
overrides = ast.literal_eval(cfg.common_eval.model_overrides)
# Load ensemble
logger.info("loading model(s) from {}".format(cfg.common_eval.path))
models, saved_cfg = checkpoint_utils.load_model_ensemble(
utils.split_paths(cfg.common_eval.path),
arg_overrides=overrides,
task=task,
suffix=cfg.checkpoint.checkpoint_suffix,
strict=(cfg.checkpoint.checkpoint_shard_count == 1),
num_shards=cfg.checkpoint.checkpoint_shard_count,
)
# loading the dataset should happen after the checkpoint has been loaded so we can give it the saved task config
task.load_dataset(cfg.dataset.gen_subset, task_cfg=saved_cfg.task)
if cfg.generation.lm_path is not None:
overrides["data"] = cfg.task.data
try:
lms, _ = checkpoint_utils.load_model_ensemble(
[cfg.generation.lm_path], arg_overrides=overrides, task=None
)
except:
logger.warning(
f"Failed to load language model! Please make sure that the language model dict is the same "
f"as target dict and is located in the data dir ({cfg.task.data})"
)
raise
assert len(lms) == 1
else:
lms = [None]
# Optimize ensemble for generation
for model in chain(models, lms):
if model is None:
continue
if cfg.common.fp16:
model.half()
if use_cuda and not cfg.distributed_training.pipeline_model_parallel:
model.cuda()
model.prepare_for_inference_(cfg)
def _fp_convert_sample(sample):
def apply_half(t):
if t.dtype is torch.float32:
return t.to(dtype=torch.half)
return t
def apply_bfloat16(t):
if t.dtype is torch.float32:
return t.to(dtype=torch.bfloat16)
return t
if cfg.common.fp16:
sample = utils.apply_to_sample(apply_half, sample)
if cfg.common.bf16:
sample = utils.apply_to_sample(apply_bfloat16, sample)
return sample
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(cfg.generation.replace_unk)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(cfg.dataset.gen_subset),
max_tokens=cfg.dataset.max_tokens,
max_sentences=cfg.dataset.batch_size,
max_positions=utils.resolve_max_positions(
task.max_positions(), *[m.max_positions() for m in models]
),
ignore_invalid_inputs=cfg.dataset.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=cfg.dataset.required_batch_size_multiple,
seed=cfg.common.seed,
num_shards=cfg.distributed_training.distributed_world_size,
shard_id=cfg.distributed_training.distributed_rank,
num_workers=cfg.dataset.num_workers,
data_buffer_size=cfg.dataset.data_buffer_size,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=cfg.common.log_format,
log_interval=cfg.common.log_interval,
default_log_format=("tqdm" if not cfg.common.no_progress_bar else "simple"),
)
# Initialize generator
gen_timer = StopwatchMeter()
extra_gen_cls_kwargs = {"lm_model": lms[0], "lm_weight": cfg.generation.lm_weight}
generator = task.build_generator(
models, cfg.generation, extra_gen_cls_kwargs=extra_gen_cls_kwargs
)
# Handle tokenization and BPE
tokenizer = task.build_tokenizer(cfg.tokenizer)
bpe = task.build_bpe(cfg.bpe)
def decode_fn(x):
if bpe is not None:
x = bpe.decode(x)
if tokenizer is not None:
x = tokenizer.decode(x)
return x
scorer = scoring.build_scorer(cfg.scoring, None)
num_sentences = 0
has_target = True
wps_meter = TimeMeter()
for sample in progress:
sample = utils.move_to_cuda(sample) if use_cuda else sample
sample = _fp_convert_sample(sample)
if "net_input" not in sample:
continue
prefix_tokens = None
if cfg.generation.prefix_size > 0:
prefix_tokens = sample["target"][:, : cfg.generation.prefix_size]
constraints = None
if "constraints" in sample:
constraints = sample["constraints"]
gen_timer.start()
hypos = task.inference_step(
generator,
models[0],
sample,
prefix_tokens=prefix_tokens,
constraints=constraints,
)
num_generated_tokens = sum(len(h["unit"]) for h in hypos)
gen_timer.stop(num_generated_tokens)
for i, sample_id in enumerate(sample["id"].tolist()):
has_target = sample["target"] is not None
# Remove padding
if "src_tokens" in sample["net_input"]:
src_tokens = utils.strip_pad(
sample["net_input"]["src_tokens"][i, :], tgt_dict.pad()
).cpu()
else:
src_tokens = None
target_tokens = None
if has_target:
target_tokens = (
utils.strip_pad(sample["target"][i, :], tgt_dict.pad()).cpu()
)
# Either retrieve the original sentences or regenerate them from tokens.
if align_dict is not None:
src_str = task.dataset(cfg.dataset.gen_subset).src.get_original_text(
sample_id
)
target_str = task.dataset(cfg.dataset.gen_subset).tgt.get_original_text(
sample_id
)
else:
if src_dict is not None:
src_str = src_dict.string(src_tokens, cfg.common_eval.post_process)
else:
src_str = ""
if has_target:
target_str = " ".join(map(str, target_tokens.numpy().tolist()))
src_str = decode_fn(src_str)
if not cfg.common_eval.quiet:
if src_dict is not None:
print("S-{}\t{}".format(sample_id, src_str), file=output_file)
if has_target:
print("T-{}\t{}".format(sample_id, target_str), file=output_file)
# Process top predictions
j = 0
hypo = hypos[i]
hypo_tokens = hypo["unit"].int().cpu()
hypo_str = " ".join(map(str, hypo_tokens.numpy().tolist()))
alignment = None
detok_hypo_str = hypo_str
# add duration prediction
hypo_duration = " ".join(map(str, hypo["duration"].int().cpu().numpy().tolist()))
hypo_fa_src_str = src_dict.string(hypo["fa_src"].cpu().numpy(), cfg.common_eval.post_process)
# hypo_fa_src_str = " ".join(map(str, hypo["fa_src"].int().cpu().numpy() - 4))
if not cfg.common_eval.quiet:
# score = hypo["score"] / math.log(2) # convert to base 2
score = 0.00
# original hypothesis (after tokenization and BPE)
# print(
# "H-{}\t{}\t{}".format(sample_id, score, hypo_str),
# file=output_file,
# )
# detokenized hypothesis
print(
"D-{}\t{}\t{}".format(sample_id, score, detok_hypo_str),
file=output_file,
)
# duration prediction
print(
"L-{}\t{}\t{}".format(sample_id, score, hypo_duration),
file=output_file,
)
# force-aligned upsampled src-tokens
print(
"U-{}\t{}\t{}".format(sample_id, score, hypo_fa_src_str),
file=output_file,
)
# print(
# "P-{}\t{}".format(
# sample_id,
# " ".join(
# map(
# lambda x: "{:.4f}".format(x),
# # convert from base e to base 2
# hypo["positional_scores"]
# .div_(math.log(2))
# .tolist(),
# )
# ),
# ),
# file=output_file,
# )
if cfg.generation.print_alignment == "hard":
print(
"A-{}\t{}".format(
sample_id,
" ".join(
[
"{}-{}".format(src_idx, tgt_idx)
for src_idx, tgt_idx in alignment
]
),
),
file=output_file,
)
if cfg.generation.print_alignment == "soft":
print(
"A-{}\t{}".format(
sample_id,
" ".join(
[",".join(src_probs) for src_probs in alignment]
),
),
file=output_file,
)
# Score only the top hypothesis
if has_target and j == 0:
if hasattr(scorer, "add_string"):
scorer.add_string(target_str, detok_hypo_str)
else:
scorer.add(target_tokens, hypo_tokens)
wps_meter.update(num_generated_tokens)
progress.log({"wps": round(wps_meter.avg)})
num_sentences += (
sample["nsentences"] if "nsentences" in sample else sample["id"].numel()
)
logger.info("NOTE: hypothesis and token scores are output in base 2")
logger.info(
"Translated {:,} sentences ({:,} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)".format(
num_sentences,
gen_timer.n,
gen_timer.sum,
num_sentences / gen_timer.sum,
1.0 / gen_timer.avg,
)
)
if has_target:
if cfg.bpe and not cfg.generation.sacrebleu:
if cfg.common_eval.post_process:
logger.warning(
"BLEU score is being computed by splitting detokenized string on spaces, this is probably not what you want. Use --sacrebleu for standard 13a BLEU tokenization"
)
else:
logger.warning(
"If you are using BPE on the target side, the BLEU score is computed on BPE tokens, not on proper words. Use --sacrebleu for standard 13a BLEU tokenization"
)
# use print to be consistent with other main outputs: S-, H-, T-, D- and so on
print(
"Generate {} with beam={}: {}".format(
cfg.dataset.gen_subset, cfg.generation.beam, scorer.result_string()
),
file=output_file,
)
return scorer | null |
184,730 | import logging
import os
import sys
from typing import Dict, List, Optional, Tuple
from pathlib import Path
import numpy as np
from argparse import Namespace
from collections import OrderedDict
import torch
from dataclasses import dataclass, field
from fairseq.data import (
Dictionary,
encoders,
data_utils,
StripTokenDataset,
PrependTokenDataset,
AppendTokenDataset,
DenoisingDataset,
ConcatDataset,
FairseqDataset,
iterators,
ResamplingDataset,
MaskTokensDataset,
LanguagePairDataset,
)
from fairseq.data.audio.speech_to_text_joint_dataset import S2TJointDataConfig
from fairseq.data.shorten_dataset import maybe_shorten_dataset
from fairseq.dataclass.configs import FairseqDataclass
from fairseq.tasks import register_task
from fairseq.tasks.fairseq_task import FairseqTask
from fairseq.dataclass.constants import ChoiceEnum
from omegaconf import MISSING
from speechlm.data.multimodal_corpus_dataset import MultiCorpusDataset
from speechlm.data.load_langpair_dataset import load_langpair_dataset
from speechlm.data.language_trible_dataset import LanguageTripleDataset, load_langtriple_dataset
from speechlm.data.hubert_dataset import HubertDataset
def _lang_token(lang: str):
return "<lang:{}>".format(lang)
The provided code snippet includes necessary dependencies for implementing the `_lang_token_index` function. Write a Python function `def _lang_token_index(dic: Dictionary, lang: str)` to solve the following problem:
Return language token index.
Here is the function:
def _lang_token_index(dic: Dictionary, lang: str):
"""Return language token index."""
idx = dic.index(_lang_token(lang))
assert idx != dic.unk_index, "cannot find language token for lang {}".format(lang)
return idx | Return language token index. |
184,731 | import logging
import os
import sys
from typing import Dict, List, Optional, Tuple
from pathlib import Path
import numpy as np
from argparse import Namespace
from collections import OrderedDict
import torch
from dataclasses import dataclass, field
from fairseq.data import (
Dictionary,
encoders,
data_utils,
StripTokenDataset,
PrependTokenDataset,
AppendTokenDataset,
DenoisingDataset,
ConcatDataset,
FairseqDataset,
iterators,
ResamplingDataset,
MaskTokensDataset,
LanguagePairDataset,
)
from fairseq.data.audio.speech_to_text_joint_dataset import S2TJointDataConfig
from fairseq.data.shorten_dataset import maybe_shorten_dataset
from fairseq.dataclass.configs import FairseqDataclass
from fairseq.tasks import register_task
from fairseq.tasks.fairseq_task import FairseqTask
from fairseq.dataclass.constants import ChoiceEnum
from omegaconf import MISSING
from speechlm.data.multimodal_corpus_dataset import MultiCorpusDataset
from speechlm.data.load_langpair_dataset import load_langpair_dataset
from speechlm.data.language_trible_dataset import LanguageTripleDataset, load_langtriple_dataset
from speechlm.data.hubert_dataset import HubertDataset
def get_whole_word_mask(args, dictionary):
def is_beginning_of_word(i):
if i < dictionary.nspecial:
# special elements are always considered beginnings
return True
tok = dictionary[i]
if tok.startswith("madeupword"):
return True
elif tok in ["<unk>", "<s>", "</s>", "<pad>", "|", "<eps>"]:
return True
else:
return False
mask_whole_words = torch.ByteTensor(
list(map(is_beginning_of_word, range(len(dictionary))))
)
return mask_whole_words | null |
184,732 | import logging
import os
import sys
from typing import Dict, List, Optional, Tuple
from pathlib import Path
import numpy as np
from argparse import Namespace
from collections import OrderedDict
import torch
from dataclasses import dataclass, field
from fairseq.data import (
Dictionary,
encoders,
data_utils,
StripTokenDataset,
PrependTokenDataset,
AppendTokenDataset,
DenoisingDataset,
ConcatDataset,
FairseqDataset,
iterators,
ResamplingDataset,
MaskTokensDataset,
LanguagePairDataset,
)
from fairseq.data.audio.speech_to_text_joint_dataset import S2TJointDataConfig
from fairseq.data.shorten_dataset import maybe_shorten_dataset
from fairseq.dataclass.configs import FairseqDataclass
from fairseq.tasks import register_task
from fairseq.tasks.fairseq_task import FairseqTask
from fairseq.dataclass.constants import ChoiceEnum
from omegaconf import MISSING
from speechlm.data.multimodal_corpus_dataset import MultiCorpusDataset
from speechlm.data.load_langpair_dataset import load_langpair_dataset
from speechlm.data.language_trible_dataset import LanguageTripleDataset, load_langtriple_dataset
from speechlm.data.hubert_dataset import HubertDataset
The provided code snippet includes necessary dependencies for implementing the `get_repeative_start` function. Write a Python function `def get_repeative_start(tokens)` to solve the following problem:
tokens: torch.Tensor with repeative tokens
Here is the function:
def get_repeative_start(tokens):
"""
tokens: torch.Tensor with repeative tokens
"""
length = len(tokens)
rep_start_id = tokens[:-1] != tokens[1:]
return torch.cat([torch.tensor([True]), rep_start_id]) | tokens: torch.Tensor with repeative tokens |
184,733 | from typing import List, Dict, Any
from dataclasses import dataclass, field
import torch
import torch.nn.functional as F
from fairseq import metrics, utils
from fairseq.criterions import FairseqCriterion, register_criterion
from fairseq.dataclass import FairseqDataclass
from fairseq.data.data_utils import lengths_to_mask
from fairseq.models.fairseq_model import FairseqEncoderModel
def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=None, reduce=True):
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(-1)
nll_loss = -lprobs.gather(dim=-1, index=target)
smooth_loss = -lprobs.sum(dim=-1, keepdim=True)
if ignore_index is not None:
pad_mask = target.eq(ignore_index)
nll_loss.masked_fill_(pad_mask, 0.0)
smooth_loss.masked_fill_(pad_mask, 0.0)
else:
nll_loss = nll_loss.squeeze(-1)
smooth_loss = smooth_loss.squeeze(-1)
if reduce:
ntokens = (~pad_mask).sum()
nll_loss = nll_loss.sum() / ntokens
smooth_loss = smooth_loss.sum() / ntokens
eps_i = epsilon / (lprobs.size(-1) - 1)
loss = (1.0 - epsilon - eps_i) * nll_loss + eps_i * smooth_loss
return loss, nll_loss | null |
184,734 | import argparse
from tqdm import tqdm
from pydub import AudioSegment
import torchaudio
import os
def mp3_convert_wav(mp3_file, wav_file):
try:
sound = AudioSegment.from_mp3(mp3_file)
sound=sound.set_frame_rate(16000)
sound=sound.set_channels(1)
sound=sound.set_sample_width(2)
sound.export(wav_file, format="wav")
except Exception as e:
print(e) | null |
184,735 | import argparse
import logging
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import Optional, Tuple
import pandas as pd
import torchaudio
from examples.speech_to_text.data_utils import (
filter_manifest_df,
gen_config_yaml,
gen_vocab,
load_df_from_tsv,
save_df_to_tsv,
)
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import download_url, extract_archive
from tqdm import tqdm
from pydub import AudioSegment
import soundfile as sf
import sacremoses
def mp3_convert_wav(mp3_file, wav_file):
sound = AudioSegment.from_mp3(mp3_file)
sound=sound.set_frame_rate(16000)
sound=sound.set_channels(1)
sound=sound.set_sample_width(2)
sound.export(wav_file, format="wav") | null |
184,736 | import argparse
import logging
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import Optional, Tuple
import pandas as pd
import torchaudio
from examples.speech_to_text.data_utils import (
filter_manifest_df,
gen_config_yaml,
gen_vocab,
load_df_from_tsv,
save_df_to_tsv,
)
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import download_url, extract_archive
from tqdm import tqdm
from pydub import AudioSegment
import soundfile as sf
import sacremoses
MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text"]
class CoVoST(Dataset):
def __init__(
self,
root: str,
split: str,
source_language: str,
target_language: Optional[str] = None,
version: int = 2,
) -> None:
def __getitem__(
self, n: int
) -> Tuple[Tensor, int, str, str, Optional[str], str, str]:
def __len__(self) -> int:
def gen_vocab(
input_path: Path, output_path_prefix: Path, model_type="bpe",
vocab_size=1000, special_symbols: Optional[List[str]] = None
):
def save_df_to_tsv(dataframe, path: Union[str, Path]):
def filter_manifest_df(
df, is_train_split=False, extra_filters=None, min_n_frames=5, max_n_frames=3000
):
def process(args):
root = Path(args.data_root).absolute() / args.src_lang
outroot = root / f"{args.src_lang}-{args.tgt_lang}"
if args.vocab_type != "char":
outroot = root / f"{args.src_lang}-{args.tgt_lang}-{args.vocab_type}"
if not root.is_dir():
raise NotADirectoryError(f"{root} does not exist")
#1. Extract featuress
# mp3-to-wav can take long long time, better run it externally with multi threads.
feature_root = root / "wav"
# feature_root.mkdir(exist_ok=True)
# for split in CoVoST.SPLITS:
# print(f"Fetching split {split}...")
# dataset = CoVoST(root, split, args.src_lang, args.tgt_lang)
# print("Converting mp3 to wav...")
# handle = open(root / f"{split}.id", "w")
# for waveform, _, _, _, _, utt_id in tqdm(dataset):
# wav_file = feature_root / f"{utt_id}.wav"
# print(waveform, file=handle)
# mp3_convert_wav(waveform, wav_file)
#2. Generate TSV manifest
print("Generating manifest...")
train_text = []
task = f"asr_{args.src_lang}"
if args.tgt_lang is not None:
task = f"st_{args.src_lang}_{args.tgt_lang}"
for split in CoVoST.SPLITS:
manifest = {c: [] for c in MANIFEST_COLUMNS}
dataset = CoVoST(root, split, args.src_lang, args.tgt_lang)
for waveform, _, src_utt, tgt_utt, speaker_id, utt_id in tqdm(dataset):
wav_file = feature_root / f"{utt_id}.wav"
manifest["id"].append(utt_id)
manifest["audio"].append(wav_file.as_posix().replace("/data/", "/mnt/default/"))
manifest["n_frames"].append(sf.info(wav_file).frames)
manifest["tgt_text"].append(src_utt if args.tgt_lang is None else tgt_utt)
is_train_split = split.startswith("train")
if is_train_split:
train_text.extend(manifest["tgt_text"])
df = pd.DataFrame.from_dict(manifest)
df = filter_manifest_df(df, is_train_split=is_train_split, min_n_frames=320, max_n_frames=480000)
save_df_to_tsv(df, outroot / f"{split}_{task}.tsv")
# Generate vocab
vocab_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{vocab_size_str}_{task}"
with NamedTemporaryFile(mode="w") as f:
for t in train_text:
f.write(t + "\n")
gen_vocab(
Path(f.name),
outroot / spm_filename_prefix,
args.vocab_type,
args.vocab_size
)
# Generate config YAML
# gen_config_yaml(
# outroot,
# spm_filename=spm_filename_prefix + ".model",
# yaml_filename=f"config_{task}.yaml",
# specaugment_policy="lb",
# ) | null |
184,737 | import os
import argparse
from tqdm import tqdm
import numpy as np
def writefile(filename, lines):
with open(filename, 'w', encoding='utf-8') as f:
f.writelines(lines) | null |
184,738 | import argparse
import logging
from pathlib import Path
from collections import defaultdict
import pandas as pd
import torchaudio
from tqdm import tqdm
import numpy as np
import torch
from fairseq.data.audio.audio_utils import convert_waveform
from examples.speech_to_text.data_utils import save_df_to_tsv
from examples.speech_synthesis.data_utils import extract_pitch
def get_duration(fa_phone):
def convert_waveform(
waveform: Union[np.ndarray, torch.Tensor], sample_rate: int,
normalize_volume: bool = False, to_mono: bool = False,
to_sample_rate: Optional[int] = None
) -> Tuple[Union[np.ndarray, torch.Tensor], int]:
def save_df_to_tsv(dataframe, path: Union[str, Path]):
def extract_pitch(
waveform: torch.Tensor, sample_rate: int,
output_path: Optional[Path] = None, hop_length: int = 256,
log_scale: bool = True, phoneme_durations: Optional[List[int]] = None
):
def process(args):
# assert "train" in args.splits
out_root = Path(args.output_root).absolute()
out_root.mkdir(exist_ok=True)
print("Fetching data...")
audio_manifest_root = Path(args.audio_manifest_root).absolute()
for s in args.splits:
if args.add_pitch:
pitch_root = out_root / "pitch" / s
pitch_root.mkdir(exist_ok=True)
manifest = defaultdict(list)
with open(audio_manifest_root / f"{s}.audio.tsv") as f1, \
open(audio_manifest_root / f"{s}.phn") as f2, \
open(audio_manifest_root / f"{s}.km") as f3:
audio_root = f1.readline().strip()
audio_root = Path(audio_root)
for audio_path, fa_phone, fa_unit in tqdm(zip(f1, f2, f3)):
record = True
audio_path, n_frames = audio_path.strip().split("\t")
fa_phone = fa_phone.strip().split()
fa_unit = fa_unit.strip()
uttid = audio_path.split("/")[-1].split(".")[0]
speaker = uttid.split("-")[0]
if args.add_duration:
assert len(fa_phone) == len(fa_unit.split())
fa_phone = np.array(list(map(int, fa_phone)))
duration = get_duration(fa_phone)
reduced_phone = torch.LongTensor(fa_phone).unique_consecutive().numpy()
if args.add_pitch:
pitch_path = pitch_root / f"{uttid}.npy"
if not pitch_path.is_file():
waveform, sample_rate = torchaudio.load(audio_root / audio_path)
waveform, sample_rate = convert_waveform(
waveform, sample_rate, normalize_volume=args.normalize_volume,
)
pitch = extract_pitch(
waveform, sample_rate, None,
hop_length=args.hop_length, log_scale=True,
phoneme_durations=duration
)
if pitch is not None:
np.save(pitch_path.as_posix(), pitch)
else:
record = False
else:
reduced_phone = fa_phone
if record:
manifest["id"].append(uttid)
manifest["speaker"].append(speaker)
manifest["n_frames"].append(len(fa_unit.split()))
manifest["tgt_text"].append(" ".join(map(str, reduced_phone)))
manifest["unit"].append(fa_unit)
if args.add_duration:
manifest["duration"].append(" ".join(map(str, duration)))
if args.add_pitch:
manifest["pitch"].append(f"pitch/{s}/{uttid}.npy")
save_df_to_tsv(
pd.DataFrame.from_dict(manifest),
out_root / f"{s}.tsv"
) | null |
184,739 | import argparse
import numpy as np
import sys
from g2p_en import G2p
from tqdm import tqdm
import logging
def get_parser():
parser = argparse.ArgumentParser(
description="converts words to phones adding optional silences around in between words"
)
parser.add_argument(
"--sil-prob",
"-s",
type=float,
default=0,
help="probability of inserting silence between each word",
)
parser.add_argument(
"--surround",
action="store_true",
help="if set, surrounds each example with silence",
)
parser.add_argument(
"--lexicon",
help="lexicon to convert to phones",
required=True,
)
parser.add_argument(
"--strict",
action="store_true",
help="if set, OOV words will raise a error (for train/valid set)",
)
parser.add_argument(
"--input",
"-i",
help="input text file",
required=True,
)
parser.add_argument(
"--output",
"-o",
help="input text file",
required=True,
)
return parser | null |
184,740 | import argparse
import numpy as np
import sys
from g2p_en import G2p
from tqdm import tqdm
import logging
The provided code snippet includes necessary dependencies for implementing the `normalize_phn` function. Write a Python function `def normalize_phn(phons)` to solve the following problem:
convert g2p style phone to 39-phone set
Here is the function:
def normalize_phn(phons):
"""
convert g2p style phone to 39-phone set
"""
return [p.rstrip('0123456789') for p in phons] | convert g2p style phone to 39-phone set |
184,741 | import argparse
import logging
from pathlib import Path
from collections import defaultdict
import pandas as pd
from tqdm import tqdm
import numpy as np
from examples.speech_to_text.data_utils import save_df_to_tsv
The provided code snippet includes necessary dependencies for implementing the `get_duration` function. Write a Python function `def get_duration(fa_phone)` to solve the following problem:
fa_phone: force-aligned phone, 1-D numpy
Here is the function:
def get_duration(fa_phone):
"""fa_phone: force-aligned phone, 1-D numpy"""
same = np.concatenate(([True], fa_phone[:-1] != fa_phone[1:], [True]))
index = np.where(same)[0]
count = np.diff(index)
return count | fa_phone: force-aligned phone, 1-D numpy |
184,742 | import argparse
import logging
from pathlib import Path
from collections import defaultdict
import pandas as pd
from tqdm import tqdm
import numpy as np
from examples.speech_to_text.data_utils import save_df_to_tsv
def save_df_to_tsv(dataframe, path: Union[str, Path]):
_path = path if isinstance(path, str) else path.as_posix()
dataframe.to_csv(
_path,
sep="\t",
header=True,
index=False,
encoding="utf-8",
escapechar="\\",
quoting=csv.QUOTE_NONE,
)
def process(args):
# assert "train" in args.splits
out_root = Path(args.output_root).absolute()
out_root.mkdir(exist_ok=True)
print("Fetching data...")
audio_manifest_root = Path(args.audio_manifest_root).absolute()
for s in args.splits:
manifest = defaultdict(list)
with open(audio_manifest_root / f"{s}.phn") as f1:
for i, reduced_phone in tqdm(enumerate(f1)):
reduced_phone = reduced_phone.strip()
uttid = f"librilm-{i}"
speaker = uttid.split("-")[0]
manifest["id"].append(uttid)
manifest["speaker"].append(speaker)
manifest["n_frames"].append(len(reduced_phone))
manifest["tgt_text"].append(reduced_phone)
manifest["unit"].append(0)
save_df_to_tsv(
pd.DataFrame.from_dict(manifest),
out_root / f"{s}.tsv"
) | null |
184,743 | import ast
import hashlib
import logging
import os
import shutil
import sys
from dataclasses import dataclass, field, is_dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple, Union
import editdistance
import torch
import torch.distributed as dist
import examples
from examples.speech_recognition.new.decoders.decoder_config import (
DecoderConfig,
FlashlightDecoderConfig,
)
from examples.speech_recognition.new.decoders.decoder import Decoder
from fairseq import checkpoint_utils, distributed_utils, progress_bar, tasks, utils
from fairseq.data.data_utils import post_process
from fairseq.dataclass.configs import (
CheckpointConfig,
CommonConfig,
CommonEvalConfig,
DatasetConfig,
DistributedTrainingConfig,
FairseqDataclass,
)
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from fairseq.logging.progress_bar import BaseProgressBar
from fairseq.models.fairseq_model import FairseqModel
from omegaconf import OmegaConf
import hydra
from hydra.core.config_store import ConfigStore
logger = logging.getLogger(__name__)
class InferConfig(FairseqDataclass):
task: Any = None
decoding: DecodingConfig = DecodingConfig()
common: CommonConfig = CommonConfig()
common_eval: CommonEvalConfig = CommonEvalConfig()
checkpoint: CheckpointConfig = CheckpointConfig()
distributed_training: DistributedTrainingConfig = DistributedTrainingConfig()
dataset: DatasetConfig = DatasetConfig()
is_ax: bool = field(
default=False,
metadata={
"help": "if true, assumes we are using ax for tuning and returns a tuple for ax to consume"
},
)
def hydra_main(cfg: InferConfig) -> Union[float, Tuple[float, Optional[float]]]:
container = OmegaConf.to_container(cfg, resolve=True, enum_to_str=True)
cfg = OmegaConf.create(container)
OmegaConf.set_struct(cfg, True)
if cfg.common.reset_logging:
reset_logging()
utils.import_user_module(cfg.common)
# logger.info("Config:\n%s", OmegaConf.to_yaml(cfg))
wer = float("inf")
try:
if cfg.common.profile:
with torch.cuda.profiler.profile():
with torch.autograd.profiler.emit_nvtx():
distributed_utils.call_main(cfg, main)
else:
distributed_utils.call_main(cfg, main)
wer = parse_wer(get_wer_file(cfg))
except BaseException as e: # pylint: disable=broad-except
if not cfg.common.suppress_crashes:
raise
else:
logger.error("Crashed! %s", str(e))
logger.info("Word error rate: %.4f", wer)
if cfg.is_ax:
return wer, None
return wer
def cli_main() -> None:
try:
from hydra._internal.utils import (
get_args,
) # pylint: disable=import-outside-toplevel
cfg_name = get_args().config_name or "infer"
except ImportError:
logger.warning("Failed to get config name from hydra args")
cfg_name = "infer"
cs = ConfigStore.instance()
cs.store(name=cfg_name, node=InferConfig)
for k in InferConfig.__dataclass_fields__:
if is_dataclass(InferConfig.__dataclass_fields__[k].type):
v = InferConfig.__dataclass_fields__[k].default
cs.store(name=k, node=v)
hydra_main() # pylint: disable=no-value-for-parameter | null |
184,744 | import logging
import torch
from fairseq import utils
from fairseq.models import (
FairseqEncoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.text_to_speech import fastspeech2
def base_architecture(args):
args.dropout = getattr(args, "dropout", 0.2)
args.output_frame_dim = getattr(args, "output_frame_dim", -1)
args.speaker_embed_dim = getattr(args, "speaker_embed_dim", 256)
# FFT blocks
args.fft_hidden_dim = getattr(args, "fft_hidden_dim", 1024)
args.fft_kernel_size = getattr(args, "fft_kernel_size", 9)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.encoder_layers = getattr(args, "encoder_layers", 4)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 2)
args.decoder_layers = getattr(args, "decoder_layers", 4)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 2)
# variance predictor
args.var_pred_n_bins = getattr(args, "var_pred_n_bins", 256)
args.var_pred_hidden_dim = getattr(args, "var_pred_hidden_dim", 256)
args.var_pred_kernel_size = getattr(args, "var_pred_kernel_size", 3)
args.var_pred_dropout = getattr(args, "var_pred_dropout", 0.5)
# postnet
args.add_postnet = getattr(args, "add_postnet", False)
args.postnet_dropout = getattr(args, "postnet_dropout", 0.5)
args.postnet_layers = getattr(args, "postnet_layers", 5)
args.postnet_conv_dim = getattr(args, "postnet_conv_dim", 512)
args.postnet_conv_kernel_size = getattr(args, "postnet_conv_kernel_size", 5)
# pitch & energe
args.use_pitch = getattr(args, "use_pitch", False)
args.use_energe = getattr(args, "use_energe", False) | null |
184,745 | import logging
import torch
from fairseq import utils
from fairseq.models import (
FairseqEncoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.text_to_speech import fastspeech2
def base_architecture(args):
args.dropout = getattr(args, "dropout", 0.2)
args.output_frame_dim = getattr(args, "output_frame_dim", -1)
args.speaker_embed_dim = getattr(args, "speaker_embed_dim", 256)
# FFT blocks
args.fft_hidden_dim = getattr(args, "fft_hidden_dim", 1024)
args.fft_kernel_size = getattr(args, "fft_kernel_size", 9)
args.attention_dropout = getattr(args, "attention_dropout", 0.0)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 256)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 2)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 256)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 2)
# variance predictor
args.var_pred_n_bins = getattr(args, "var_pred_n_bins", 256)
args.var_pred_hidden_dim = getattr(args, "var_pred_hidden_dim", 256)
args.var_pred_kernel_size = getattr(args, "var_pred_kernel_size", 3)
args.var_pred_dropout = getattr(args, "var_pred_dropout", 0.5)
# postnet
args.add_postnet = getattr(args, "add_postnet", False)
args.postnet_dropout = getattr(args, "postnet_dropout", 0.5)
args.postnet_layers = getattr(args, "postnet_layers", 5)
args.postnet_conv_dim = getattr(args, "postnet_conv_dim", 512)
args.postnet_conv_kernel_size = getattr(args, "postnet_conv_kernel_size", 5)
# pitch & energe
args.use_pitch = getattr(args, "use_pitch", False)
args.use_energe = getattr(args, "use_energe", False) | null |
184,746 | import logging
import torch
from fairseq import utils
from fairseq.models import (
FairseqEncoderModel,
register_model,
register_model_architecture,
)
from fairseq.models.text_to_speech import fastspeech2
def base_architecture(args):
args.dropout = getattr(args, "dropout", 0.2)
args.output_frame_dim = getattr(args, "output_frame_dim", -1)
args.speaker_embed_dim = getattr(args, "speaker_embed_dim", 256)
# FFT blocks
args.fft_hidden_dim = getattr(args, "fft_hidden_dim", 1536)
args.fft_kernel_size = getattr(args, "fft_kernel_size", 9)
args.attention_dropout = getattr(args, "attention_dropout", 0.1)
args.encoder_layers = getattr(args, "encoder_layers", 6)
args.encoder_embed_dim = getattr(args, "encoder_embed_dim", 384)
args.encoder_attention_heads = getattr(args, "encoder_attention_heads", 6)
args.decoder_layers = getattr(args, "decoder_layers", 6)
args.decoder_embed_dim = getattr(args, "decoder_embed_dim", 384)
args.decoder_attention_heads = getattr(args, "decoder_attention_heads", 6)
# variance predictor
args.var_pred_n_bins = getattr(args, "var_pred_n_bins", 256)
args.var_pred_hidden_dim = getattr(args, "var_pred_hidden_dim", 256)
args.var_pred_kernel_size = getattr(args, "var_pred_kernel_size", 3)
args.var_pred_dropout = getattr(args, "var_pred_dropout", 0.5)
# postnet
args.add_postnet = getattr(args, "add_postnet", False)
args.postnet_dropout = getattr(args, "postnet_dropout", 0.5)
args.postnet_layers = getattr(args, "postnet_layers", 5)
args.postnet_conv_dim = getattr(args, "postnet_conv_dim", 512)
args.postnet_conv_kernel_size = getattr(args, "postnet_conv_kernel_size", 5)
# pitch & energe
args.use_pitch = getattr(args, "use_pitch", False)
args.use_energe = getattr(args, "use_energe", False) | null |
184,747 | import contextlib
import torch
import torch.nn as nn
from argparse import Namespace
from dataclasses import dataclass, field
from typing import Any
from fairseq import checkpoint_utils, tasks, utils
from fairseq.models import FairseqEncoderDecoderModel, register_model
from fairseq.models.fairseq_decoder import FairseqDecoder
from fairseq.models.fairseq_encoder import FairseqEncoder
from fairseq.tasks import FairseqTask
from fairseq.dataclass import ChoiceEnum
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.models.hubert import HubertAsrConfig
from speechlm.modules.transformer_decoder import TransformerDecoderScriptable
def Embedding(num_embeddings, embedding_dim, padding_idx):
m = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
nn.init.normal_(m.weight, mean=0, std=embedding_dim**-0.5)
nn.init.constant_(m.weight[padding_idx], 0)
return m | null |
184,748 | import contextlib
import torch
import torch.nn as nn
from argparse import Namespace
from dataclasses import dataclass, field
from typing import Any
from fairseq import checkpoint_utils, tasks, utils
from fairseq.models import FairseqEncoderDecoderModel, register_model
from fairseq.models.fairseq_decoder import FairseqDecoder
from fairseq.models.fairseq_encoder import FairseqEncoder
from fairseq.tasks import FairseqTask
from fairseq.dataclass import ChoiceEnum
from fairseq.dataclass.utils import convert_namespace_to_omegaconf
from fairseq.data.data_utils import lengths_to_padding_mask
from fairseq.models.hubert import HubertAsrConfig
from speechlm.modules.transformer_decoder import TransformerDecoderScriptable
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.0)
return m | null |
184,749 | import logging
import os
import torch
from transformers.data.processors.utils import (
DataProcessor, InputExample, InputFeatures)
from torch.utils.data import (
DataLoader, RandomSampler, SequentialSampler, TensorDataset)
logger = logging.getLogger(__name__)
class TatoebaProcesser(DataProcessor):
def convert_examples_to_features(cls, examples, tokenizer, max_length, pad_token_segment_id, pad_token, mask_padding_with_zero=True):
def get_examples(self, data_dir, langpair, lang, prefix="tatoeba"):
def load_and_cache_examples(args, langpair, lang, tokenizer, key="", prefix="tatoeba"):
cache_dir = os.path.join(args.data_dir, "pequod_cache")
os.makedirs(cache_dir, exist_ok=True)
cache_filename = os.path.join(
cache_dir, "cached_%s_%s_%s" % (langpair, lang, key))
if os.path.exists(cache_filename) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" % cache_filename)
features = torch.load(cache_filename)
else:
processer = TatoebaProcesser()
logger.info("Creating features from dataset file at %s" % args.data_dir)
examples = processer.get_examples(args.data_dir, langpair, lang, prefix)
features = TatoebaProcesser.convert_examples_to_features(
examples, tokenizer, args.max_seq_length, 0,
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0],)
#logger.info("Saving features to cache file %s" % cache_filename)
#torch.save(features, cache_filename)
all_input_ids = torch.tensor(
[f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor(
[f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor(
[f.token_type_ids for f in features], dtype=torch.long)
dataset = TensorDataset(
all_input_ids, all_attention_mask, all_token_type_ids)
return dataset | null |
184,750 | import logging
import os
import torch
from transformers.data.processors.utils import (DataProcessor,
InputExample, InputFeatures)
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from src.data import convert_examples_to_features
from src.io import lines_gen
logger = logging.getLogger(__name__)
def get_alias2lang(data_dir):
if len(_alias2lang) > 0: return _alias2lang, _lang2id, _langs
for line, in lines_gen(os.path.join(data_dir, "labels-new")):
value = None
for alias in line.split(";"):
alias = alias.strip()
if alias == "": continue
if value is None: value = alias
_alias2lang[alias] = value
_langs.append(value)
for i, lang in enumerate(_langs): _lang2id[lang] = i
return _alias2lang, _lang2id, _langs
class WiliProcessor(DataProcessor):
def get_examples(self, data_dir, split):
examples = []
filename_x = os.path.join(data_dir, "x_%s.txt" % split)
filename_y = os.path.join(data_dir, "y_%s.txt" % split)
for i, (line_x, line_y) in enumerate(lines_gen(filename_x, filename_y)):
guid = "%s-%s" % (split, i)
examples.append(
InputExample(guid=guid, text_a=line_x, text_b=None, label=line_y))
return examples
def get_labels(self, data_dir):
_, _, langs = get_alias2lang(data_dir)
return langs
def load_and_cache_examples(args, data_dir, split, run_lang2id, tokenizer, key=""):
cache_filename = os.path.join(
data_dir, "cached_%s_%s" % (split, key))
if os.path.exists(cache_filename) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" % cache_filename)
features = torch.load(cache_filename)
else:
processor = WiliProcessor()
logger.info("Creating features from dataset file at %s" % data_dir)
label_list = processor.get_labels(data_dir)
examples = processor.get_examples(data_dir, split)
logger.info("%d Examples loaded" % len(examples))
features = convert_examples_to_features(
processor, examples, tokenizer, max_length=args.max_seq_length,
label_list=label_list, pad_token_segment_id=0,
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0])
logger.info("Saving features to cache file %s" % cache_filename)
torch.save(features, cache_filename)
# Cut dataset to test langs
alias2lang, lang2id, _ = get_alias2lang(data_dir)
test_lang_ids = {lang2id[alias2lang[lang]] for lang in run_lang2id.keys()}
wili_id2run_langid = {
lang2id[alias2lang[lang]]:val for lang, val in run_lang2id.items()}
all_input_ids, all_attention_mask = [], []
all_token_type_ids, all_labels = [], []
for f in features:
if f.label not in test_lang_ids: continue
all_input_ids.append(f.input_ids)
all_attention_mask.append(f.attention_mask)
all_token_type_ids.append(f.token_type_ids)
all_labels.append(wili_id2run_langid[f.label])
all_input_ids = torch.tensor(all_input_ids, dtype=torch.long)
all_attention_mask = torch.tensor(all_attention_mask, dtype=torch.long)
all_token_type_ids = torch.tensor(all_token_type_ids, dtype=torch.long)
all_labels = torch.tensor(all_labels, dtype=torch.long)
dataset = TensorDataset(
all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset | null |
184,751 | import os
import logging
import torch
from torch.utils.data import TensorDataset
from src.pequod.data.utils_squad import (read_squad_examples,
convert_examples_to_features)
logger = logging.getLogger(__name__)
def read_squad_examples(input_file, is_training, version_2_with_negative):
"""Read a SQuAD json file into a list of SquadExample."""
with open(input_file, "r", encoding='utf-8') as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
is_impossible = False
if is_training:
if version_2_with_negative:
is_impossible = qa["is_impossible"]
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError(
"For training, each question should have exactly 1 answer.")
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length - 1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(
whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
logger.warning("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
else:
start_position = -1
end_position = -1
orig_answer_text = ""
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
return examples
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training,
cls_token_at_end=False,
cls_token='[CLS]', sep_token='[SEP]', pad_token=0,
sequence_a_segment_id=0, sequence_b_segment_id=1,
cls_token_segment_id=0, pad_token_segment_id=0,
mask_padding_with_zero=True):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
# cnt_pos, cnt_neg = 0, 0
# max_N, max_M = 1024, 1024
# f = np.zeros((max_N, max_M), dtype=np.float32)
features = []
for (example_index, example) in enumerate(examples):
# if example_index % 100 == 0:
# logger.info('Converting %s/%s pos %s neg %s', example_index, len(examples), cnt_pos, cnt_neg)
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
# p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
# Original TF implem also keep the classification token (set to 0) (not sure why...)
p_mask = []
# CLS token at the beginning
if not cls_token_at_end:
tokens.append(cls_token)
segment_ids.append(cls_token_segment_id)
p_mask.append(0)
cls_index = 0
# Query
for token in query_tokens:
tokens.append(token)
segment_ids.append(sequence_a_segment_id)
p_mask.append(1)
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_a_segment_id)
p_mask.append(1)
# Paragraph
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(sequence_b_segment_id)
p_mask.append(0)
paragraph_len = doc_span.length
# SEP token
tokens.append(sep_token)
segment_ids.append(sequence_b_segment_id)
p_mask.append(1)
# CLS token at the end
if cls_token_at_end:
tokens.append(cls_token)
segment_ids.append(cls_token_segment_id)
p_mask.append(0)
cls_index = len(tokens) - 1 # Index of classification token
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(pad_token)
input_mask.append(0 if mask_padding_with_zero else 1)
segment_ids.append(pad_token_segment_id)
p_mask.append(1)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
span_is_impossible = example.is_impossible
start_position = None
end_position = None
if is_training and not span_is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
span_is_impossible = True
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and span_is_impossible:
start_position = cls_index
end_position = cls_index
if example_index < 2:
logger.info("*** Example ***")
logger.info("unique_id: %s" % (unique_id))
logger.info("example_index: %s" % (example_index))
logger.info("doc_span_index: %s" % (doc_span_index))
logger.info("tokens: %s" % " ".join(tokens))
logger.info("token_to_orig_map: %s" % " ".join([
"%d:%d" % (x, y) for (x, y) in token_to_orig_map.items()]))
logger.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in token_is_max_context.items()
]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training and span_is_impossible:
logger.info("impossible example")
if is_training and not span_is_impossible:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
logger.info("start_position: %d" % (start_position))
logger.info("end_position: %d" % (end_position))
logger.info(
"answer: %s" % (answer_text))
features.append(
InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
cls_index=cls_index,
p_mask=p_mask,
paragraph_len=paragraph_len,
start_position=start_position,
end_position=end_position,
is_impossible=span_is_impossible))
unique_id += 1
return features
def load_and_cache_examples(args, split, lang, tokenizer, key="", evaluate=False):
cache_filename = os.path.join(
args.data_dir, "cached_%s_%s_%s" % (split, lang, key))
input_file = os.path.join(args.data_dir, "%s-%s.json" % (split, lang))
if os.path.exists(cache_filename):
logger.info("Loading features from cached file %s", cache_filename)
features = torch.load(cache_filename)
if evaluate:
examples = read_squad_examples(input_file=input_file,
is_training=not evaluate,
version_2_with_negative=args.version_2_with_negative)
else: examples = None
else:
logger.info("Creating features from dataset file at %s", input_file)
examples = read_squad_examples(input_file=input_file,
is_training=not evaluate,
version_2_with_negative=args.version_2_with_negative)
features = convert_examples_to_features(examples=examples,
tokenizer=tokenizer, max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride, max_query_length=args.max_query_length,
is_training=not evaluate, cls_token=tokenizer.cls_token,
sep_token=tokenizer.sep_token)
logger.info("Saving features into cached file %s", cache_filename)
torch.save(features, cache_filename)
# Convert to Tensors and build dataset
all_input_ids = torch.tensor(
[f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor(
[f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor(
[f.segment_ids for f in features], dtype=torch.long)
all_cls_index = torch.tensor(
[f.cls_index for f in features], dtype=torch.long)
all_p_mask = torch.tensor(
[f.p_mask for f in features], dtype=torch.float)
if evaluate:
all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_example_index, all_cls_index, all_p_mask)
else:
all_start_positions = torch.tensor(
[f.start_position for f in features], dtype=torch.long)
all_end_positions = torch.tensor(
[f.end_position for f in features], dtype=torch.long)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids,
all_start_positions, all_end_positions, all_cls_index, all_p_mask)
return dataset, examples, features | null |
184,755 | import argparse
import collections
import json
import numpy as np
import os
import re
import string
import sys
def merge_eval(main_eval, new_eval, prefix):
def make_precision_recall_eval(scores, na_probs, num_true_pos, qid_to_has_ans,
out_image=None, title=None):
def run_precision_recall_analysis(main_eval, exact_raw, f1_raw, na_probs,
qid_to_has_ans, out_image_dir):
if out_image_dir and not os.path.exists(out_image_dir):
os.makedirs(out_image_dir)
num_true_pos = sum(1 for v in qid_to_has_ans.values() if v)
if num_true_pos == 0:
return
pr_exact = make_precision_recall_eval(
exact_raw, na_probs, num_true_pos, qid_to_has_ans,
out_image=os.path.join(out_image_dir, 'pr_exact.png'),
title='Precision-Recall curve for Exact Match score')
pr_f1 = make_precision_recall_eval(
f1_raw, na_probs, num_true_pos, qid_to_has_ans,
out_image=os.path.join(out_image_dir, 'pr_f1.png'),
title='Precision-Recall curve for F1 score')
oracle_scores = {k: float(v) for k, v in qid_to_has_ans.items()}
pr_oracle = make_precision_recall_eval(
oracle_scores, na_probs, num_true_pos, qid_to_has_ans,
out_image=os.path.join(out_image_dir, 'pr_oracle.png'),
title='Oracle Precision-Recall curve (binary task of HasAns vs. NoAns)')
merge_eval(main_eval, pr_exact, 'pr_exact')
merge_eval(main_eval, pr_f1, 'pr_f1')
merge_eval(main_eval, pr_oracle, 'pr_oracle') | null |
184,758 | from __future__ import absolute_import, division, print_function
import json
import logging
import math
import collections
from io import open
from transformers.tokenization_bert import BasicTokenizer, whitespace_tokenize
from src.pequod.data.utils_squad_evaluate import find_all_best_thresh_v2, make_qid_to_has_ans, get_raw_scores
logger = logging.getLogger(__name__)
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heuristic between
# `pred_text` and `orig_text` to get a character-to-character alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
logger.info(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
logger.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
logger.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
logger.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
The provided code snippet includes necessary dependencies for implementing the `write_predictions` function. Write a Python function `def write_predictions(all_examples, all_features, all_results, n_best_size, max_answer_length, do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, verbose_logging, version_2_with_negative, null_score_diff_threshold)` to solve the following problem:
Write final predictions to the json file and log-odds of null if needed.
Here is the function:
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, output_prediction_file,
output_nbest_file, output_null_log_odds_file, verbose_logging,
version_2_with_negative, null_score_diff_threshold):
"""Write final predictions to the json file and log-odds of null if needed."""
logger.info("Writing predictions to: %s" % (output_prediction_file))
logger.info("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min null score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
if version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
if version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't include the empty option in the n-best, include it
if version_2_with_negative:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="",
start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could only have single null prediction.
# So we just create a nonce prediction in this case to avoid failure.
if len(nbest)==1:
nbest.insert(0,
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
if not version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
# predict "" iff the null score - the score of best non-null > threshold
score_diff = score_null - best_non_null_entry.start_logit - (
best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if score_diff > null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with open(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
with open(output_null_log_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
return all_predictions | Write final predictions to the json file and log-odds of null if needed. |
184,759 | from __future__ import absolute_import, division, print_function
import json
import logging
import math
import collections
from io import open
from transformers.tokenization_bert import BasicTokenizer, whitespace_tokenize
from src.pequod.data.utils_squad_evaluate import find_all_best_thresh_v2, make_qid_to_has_ans, get_raw_scores
logger = logging.getLogger(__name__)
def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heuristic between
# `pred_text` and `orig_text` to get a character-to-character alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if verbose_logging:
logger.info(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if verbose_logging:
logger.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in tok_ns_to_s_map.items():
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if verbose_logging:
logger.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if verbose_logging:
logger.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
def make_qid_to_has_ans(dataset):
qid_to_has_ans = {}
for article in dataset:
for p in article['paragraphs']:
for qa in p['qas']:
qid_to_has_ans[qa['id']] = bool(qa['answers'])
return qid_to_has_ans
def get_raw_scores(dataset, preds):
exact_scores = {}
f1_scores = {}
for article in dataset:
for p in article['paragraphs']:
for qa in p['qas']:
qid = qa['id']
gold_answers = [a['text'] for a in qa['answers']
if normalize_answer(a['text'])]
if not gold_answers:
# For unanswerable questions, only correct answer is empty string
gold_answers = ['']
if qid not in preds:
print('Missing prediction for %s' % qid)
continue
a_pred = preds[qid]
# Take max over all gold answers
exact_scores[qid] = max(compute_exact(a, a_pred) for a in gold_answers)
f1_scores[qid] = max(compute_f1(a, a_pred) for a in gold_answers)
return exact_scores, f1_scores
def find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2(preds, exact_raw, na_probs, qid_to_has_ans)
best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2(preds, f1_raw, na_probs, qid_to_has_ans)
main_eval['best_exact'] = best_exact
main_eval['best_exact_thresh'] = exact_thresh
main_eval['best_f1'] = best_f1
main_eval['best_f1_thresh'] = f1_thresh
main_eval['has_ans_exact'] = has_ans_exact
main_eval['has_ans_f1'] = has_ans_f1
The provided code snippet includes necessary dependencies for implementing the `write_predictions_extended` function. Write a Python function `def write_predictions_extended(all_examples, all_features, all_results, n_best_size, max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, orig_data_file, start_n_top, end_n_top, version_2_with_negative, tokenizer, verbose_logging)` to solve the following problem:
XLNet write prediction logic (more complex than Bert's). Write final predictions to the json file and log-odds of null if needed. Requires utils_squad_evaluate.py
Here is the function:
def write_predictions_extended(all_examples, all_features, all_results, n_best_size,
max_answer_length, output_prediction_file,
output_nbest_file,
output_null_log_odds_file, orig_data_file,
start_n_top, end_n_top, version_2_with_negative,
tokenizer, verbose_logging):
""" XLNet write prediction logic (more complex than Bert's).
Write final predictions to the json file and log-odds of null if needed.
Requires utils_squad_evaluate.py
"""
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index",
"start_log_prob", "end_log_prob"])
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_log_prob", "end_log_prob"])
logger.info("Writing predictions to: %s", output_prediction_file)
# logger.info("Writing nbest to: %s" % (output_nbest_file))
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
cur_null_score = result.cls_logits
# if we could have irrelevant answers, get the min score of irrelevant
score_null = min(score_null, cur_null_score)
for i in range(start_n_top):
for j in range(end_n_top):
start_log_prob = result.start_top_log_probs[i]
start_index = result.start_top_index[i]
j_index = i * end_n_top + j
end_log_prob = result.end_top_log_probs[j_index]
end_index = result.end_top_index[j_index]
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= feature.paragraph_len - 1:
continue
if end_index >= feature.paragraph_len - 1:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_log_prob=start_log_prob,
end_log_prob=end_log_prob))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_log_prob + x.end_log_prob),
reverse=True)
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
# XLNet un-tokenizer
# Let's keep it simple for now and see if we need all this later.
#
# tok_start_to_orig_index = feature.tok_start_to_orig_index
# tok_end_to_orig_index = feature.tok_end_to_orig_index
# start_orig_pos = tok_start_to_orig_index[pred.start_index]
# end_orig_pos = tok_end_to_orig_index[pred.end_index]
# paragraph_text = example.paragraph_text
# final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip()
# Previously used Bert untokenizer
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = tokenizer.convert_tokens_to_string(tok_tokens)
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, tokenizer.do_lower_case,
verbose_logging)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_log_prob=pred.start_log_prob,
end_log_prob=pred.end_log_prob))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="", start_log_prob=-1e6,
end_log_prob=-1e6))
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_log_prob + entry.end_log_prob)
if not best_non_null_entry:
best_non_null_entry = entry
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_log_prob"] = entry.start_log_prob
output["end_log_prob"] = entry.end_log_prob
nbest_json.append(output)
assert len(nbest_json) >= 1
assert best_non_null_entry is not None
score_diff = score_null
scores_diff_json[example.qas_id] = score_diff
# note(zhiliny): always predict best_non_null_entry
# and the evaluation script will search for the best threshold
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
with open(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with open(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if version_2_with_negative:
with open(output_null_log_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
with open(orig_data_file, "r", encoding='utf-8') as reader:
orig_data = json.load(reader)["data"]
qid_to_has_ans = make_qid_to_has_ans(orig_data)
has_ans_qids = [k for k, v in qid_to_has_ans.items() if v]
no_ans_qids = [k for k, v in qid_to_has_ans.items() if not v]
exact_raw, f1_raw = get_raw_scores(orig_data, all_predictions)
out_eval = {}
find_all_best_thresh_v2(out_eval, all_predictions, exact_raw, f1_raw, scores_diff_json, qid_to_has_ans)
return out_eval | XLNet write prediction logic (more complex than Bert's). Write final predictions to the json file and log-odds of null if needed. Requires utils_squad_evaluate.py |
184,760 | import logging
import os
import torch
from transformers.data.processors.utils import (DataProcessor,
InputExample, InputFeatures)
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
class MLDocProcessor(XDocProcessor):
def get_labels(self): return ["ECAT", "CCAT", "GCAT", "MCAT"]
class CLSProcessor(XDocProcessor):
def get_labels(self): return ["0", "1"]
class XNLIProcesser(XDocProcessor):
"""data format: a pair: (label, text)"""
def get_labels(self): return ["neutral", "entailment", "contradiction"]
class TriXNLIProcesser(XNLIProcesser):
"""data format: a 3-tuple: (label, text-a, text-b)"""
def _create_examples(self, lines, set_type):
examples = []
for i, line in enumerate(lines):
guid = "%s-%s" % (set_type, i)
label, text_a, text_b = line[0], line[1], line[2]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_processor_class(dataset_name):
if dataset_name == "MLDoc": return MLDocProcessor
elif dataset_name == "CLS": return CLSProcessor
elif dataset_name == "XNLI": return XNLIProcesser
elif dataset_name == "TriXNLI": return TriXNLIProcesser
else: raise ValueError | null |
184,761 | import logging
import os
import torch
from transformers.data.processors.utils import (DataProcessor,
InputExample, InputFeatures)
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
logger = logging.getLogger(__name__)
def xdoc_convert_examples_to_features(
processor, examples, tokenizer, max_length, label_list,
pad_token=0, pad_token_segment_id=0, mask_padding_with_zero=True):
if label_list is None: label_list = processor.get_labels()
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for ex_index, example in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d" % ex_index)
inputs = tokenizer.encode_plus(
example.text_a,
example.text_b,
add_special_tokens=True,
max_length=max_length)
input_ids, token_type_ids = inputs["input_ids"], inputs["token_type_ids"]
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
padding_length = max_length - len(input_ids)
input_ids = input_ids + ([pad_token] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
assert len(input_ids) == max_length, "Error with input length {} vs {}".format(len(input_ids), max_length)
assert len(attention_mask) == max_length, "Error with input length {} vs {}".format(len(attention_mask), max_length)
assert len(token_type_ids) == max_length, "Error with input length {} vs {}".format(len(token_type_ids), max_length)
label = label_map[example.label]
if ex_index < 3:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
logger.info("label: %s (id = %d)" % (example.label, label))
features.append(InputFeatures(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
label=label))
return features
def load_and_cache_examples(args, processor, split, lang, tokenizer, key=""):
cache_filename = os.path.join(
args.data_dir, "cached_%s_%s_%s" % (split, lang, key))
if os.path.exists(cache_filename) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" % cache_filename)
features = torch.load(cache_filename)
else:
logger.info("Creating features from dataset file at %s" % args.data_dir)
label_list = processor.get_labels()
examples = processor.get_examples(args.data_dir, split, lang)
logger.info("%d Examples loaded" % len(examples))
features = xdoc_convert_examples_to_features(
processor, examples, tokenizer, max_length=args.max_seq_length,
label_list=label_list, pad_token_segment_id=0,
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[0])
logger.info("Saving features to cache file %s" % cache_filename)
torch.save(features, cache_filename)
all_input_ids = torch.tensor(
[f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor(
[f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor(
[f.token_type_ids for f in features], dtype=torch.long)
all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
dataset = TensorDataset(
all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset | null |
184,762 | import logging
import numpy as np
import os
import torch
import random
from torch.autograd import Variable
from torch.utils.data import DataLoader, TensorDataset
from src.pequod.trainer import (Trainer,
XClassificationTrainer, XQATrainer, SelfTrainer)
from transformers import AdamW, ConstantLRSchedule, WarmupLinearSchedule
def get_model_class(proto_train_class=None, is_qa=False):
class ProtoXClassificationTrainer(XClassificationTrainer, proto_train_class):
def __init__(self, args, model, tokenizer):
proto_train_class.__init__(self, args, model, tokenizer)
# _, self.optimizer, self.scheduler = self.init_optimizer(
# model, args.learning_rate)
def train_full_epoch(self, train_ds_keys, epoch_id, algo=None):
proto_train_class.train_full_epoch(
self, train_ds_keys, epoch_id, is_qa=False, algo=algo)
def before_loop(self):
# args = self.args
# if args.labeling_unlabeled_data:
# assert args.semi_split != ""
# for lang in args.test_langs.split(","):
# logger.info("Labeling lang: %s" % lang)
# self.labeling_dataset(self.model, (args.semi_split, lang))
pass
def init_optimizer(self, *args, **kwargs):
return proto_train_class.init_optimizer(self, *args, **kwargs)
class ProtoXQATrainer(XQATrainer, proto_train_class):
def __init__(self, args, model, tokenizer):
proto_train_class.__init__(self, args, model, tokenizer)
# _, self.optimizer, self.scheduler = self.init_optimizer(
# model, args.learning_rate)
self.example_feature_cache = {}
def train_full_epoch(self, train_ds_keys, epoch_id, algo=None):
proto_train_class.train_full_epoch(
self, train_ds_keys, epoch_id, is_qa=True, algo=algo)
def init_optimizer(self, *args, **kwargs):
return proto_train_class.init_optimizer(self, *args, **kwargs)
return ProtoXQATrainer if is_qa else ProtoXClassificationTrainer | null |
184,763 | def _lines_gen_from_single_file(filename):
with open(filename) as fp:
for line in fp: yield line.strip()
def lines_gen(*filenames):
for ret in zip(*map(_lines_gen_from_single_file, filenames)): yield ret | null |
184,764 | import logging
import torch
from transformers.modeling_bert import (BertConfig, BertEncoder,
BertIntermediate, BertLayer,
BertModel, BertOutput,
BertSelfAttention,
BertSelfOutput)
from transformers.modeling_roberta import (RobertaEmbeddings,
RobertaForMaskedLM,
RobertaForSequenceClassification,
RobertaModel)
"The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.",
)
class RobertaForMaskedLM(BertPreTrainedModel):
config_class = RobertaConfig
pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "roberta"
def __init__(self, config):
super().__init__(config)
self.roberta = RobertaModel(config)
self.lm_head = RobertaLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head.decoder
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
masked_lm_labels=None,
):
r"""
masked_lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs:
masked_lm_loss (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import RobertaTokenizer, RobertaForMaskedLM
import torch
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaForMaskedLM.from_pretrained('roberta-base')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, masked_lm_labels=input_ids)
loss, prediction_scores = outputs[:2]
"""
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
outputs = (masked_lm_loss,) + outputs
return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
)
)
)
)
)
def convert_cxlm_to_transformers(ckpt_path):
ckpt = torch.load(ckpt_path, map_location="cpu")
args = ckpt["args"]
config = BertConfig(
vocab_size_or_config_json_file=250002,
hidden_size=args.encoder_embed_dim,
num_hidden_layers=args.encoder_layers,
num_attention_heads=args.encoder_attention_heads,
intermediate_size=args.encoder_ffn_embed_dim,
max_position_embeddings=args.max_positions + 2,
type_vocab_size=1,
layer_norm_eps=1e-5, # PyTorch default used in fairseq
)
print("Our BERT config:", config)
stat_dict = ckpt["model"]
new_stat_dict = {}
model = RobertaForMaskedLM(config)
model.eval()
sent_enc = "model_fast.decoder.sentence_encoder"
new_stat_dict["roberta.embeddings.word_embeddings.weight"] = stat_dict[sent_enc + ".embed_tokens.weight"]
new_stat_dict["roberta.embeddings.position_embeddings.weight"] = stat_dict[sent_enc + ".embed_positions.weight"]
new_stat_dict["roberta.embeddings.token_type_embeddings.weight"] = torch.zeros_like(model.roberta.embeddings.token_type_embeddings.weight)
new_stat_dict["roberta.embeddings.LayerNorm.weight"] = stat_dict[sent_enc +".emb_layer_norm.weight"]
new_stat_dict["roberta.embeddings.LayerNorm.bias"] = stat_dict[sent_enc + ".emb_layer_norm.bias"]
for i in range(config.num_hidden_layers):
# Encoder: start of layer
# layer: BertLayer = model.roberta.encoder.layer[i]
layer = "roberta.encoder.layer.%d" % i
roberta_layer = sent_enc + (".layers.%d" % i)
### self attention
# self_attn: BertSelfAttention = layer.attention.self
self_attn = layer + ".attention.self"
assert(
stat_dict[roberta_layer+".self_attn.k_proj.weight"].data.shape == \
stat_dict[roberta_layer+".self_attn.q_proj.weight"].data.shape == \
stat_dict[roberta_layer+".self_attn.v_proj.weight"].data.shape == \
torch.Size((config.hidden_size, config.hidden_size))
)
new_stat_dict[self_attn+".query.weight"] = stat_dict[roberta_layer+".self_attn.q_proj.weight"]
new_stat_dict[self_attn+".query.bias"] = stat_dict[roberta_layer+".self_attn.q_proj.bias"]
new_stat_dict[self_attn+".key.weight"] = stat_dict[roberta_layer+".self_attn.k_proj.weight"]
new_stat_dict[self_attn+".key.bias"] = stat_dict[roberta_layer+".self_attn.k_proj.bias"]
new_stat_dict[self_attn+".value.weight"] = stat_dict[roberta_layer+".self_attn.v_proj.weight"]
new_stat_dict[self_attn+".value.bias"] = stat_dict[roberta_layer+".self_attn.v_proj.bias"]
### self-attention output
# self_output: BertSelfOutput = layer.attention.output
self_output = layer + ".attention.output"
assert(
model.roberta.encoder.layer[i].attention.output.dense.weight.shape == stat_dict[roberta_layer+".self_attn.out_proj.weight"].shape
)
new_stat_dict[self_output+".dense.weight"] = stat_dict[roberta_layer+".self_attn.out_proj.weight"]
new_stat_dict[self_output+".dense.bias"] = stat_dict[roberta_layer+".self_attn.out_proj.bias"]
new_stat_dict[self_output+".LayerNorm.weight"] = stat_dict[roberta_layer+".self_attn_layer_norm.weight"]
new_stat_dict[self_output+".LayerNorm.bias"] = stat_dict[roberta_layer+".self_attn_layer_norm.bias"]
### intermediate
# intermediate: BertIntermediate = layer.intermediate
intermediate = layer + ".intermediate"
assert(
model.roberta.encoder.layer[i].intermediate.dense.weight.shape == stat_dict[roberta_layer+".fc1.weight"].shape
)
#TODO
new_stat_dict[intermediate+".dense.weight"] = stat_dict[roberta_layer+".fc1.weight"]
new_stat_dict[intermediate+".dense.bias"] = stat_dict[roberta_layer+".fc1.bias"]
### output
# bert_output: BertOutput = layer.output
bert_output = layer + ".output"
assert(
model.roberta.encoder.layer[i].output.dense.weight.shape == stat_dict[roberta_layer+".fc2.weight"].shape
)
new_stat_dict[bert_output+".dense.weight"] = stat_dict[roberta_layer+".fc2.weight"]
new_stat_dict[bert_output+".dense.bias"] = stat_dict[roberta_layer+".fc2.bias"]
new_stat_dict[bert_output+".LayerNorm.weight"] = stat_dict[roberta_layer+".final_layer_norm.weight"]
new_stat_dict[bert_output+".LayerNorm.bias"] = stat_dict[roberta_layer+".final_layer_norm.bias"]
#### end of layer
new_stat_dict["lm_head.dense.weight"] = stat_dict["model_fast.decoder.lm_head.dense.weight"]
new_stat_dict["lm_head.dense.bias"] = stat_dict["model_fast.decoder.lm_head.dense.bias"]
new_stat_dict["lm_head.layer_norm.weight"] = stat_dict["model_fast.decoder.lm_head.layer_norm.weight"]
new_stat_dict["lm_head.layer_norm.bias"] = stat_dict["model_fast.decoder.lm_head.layer_norm.bias"]
new_stat_dict["lm_head.decoder.weight"] = stat_dict["model_fast.decoder.lm_head.weight"]
new_stat_dict["lm_head.bias"] = stat_dict["model_fast.decoder.lm_head.bias"]
new_stat_dict["roberta.pooler.dense.weight"] = model.roberta.pooler.dense.weight
new_stat_dict["roberta.pooler.dense.bias"] = model.roberta.pooler.dense.bias
if "proj_matrix_fast" in stat_dict:
new_stat_dict["proj_matrix_fast"] = stat_dict["proj_matrix_fast"]
# model.load_state_dict(new_stat_dict)
return new_stat_dict | null |
184,765 | import logging
import torch
from transformers.modeling_bert import (BertConfig, BertEncoder,
BertIntermediate, BertLayer,
BertModel, BertOutput,
BertSelfAttention,
BertSelfOutput)
from transformers.modeling_roberta import (RobertaEmbeddings,
RobertaForMaskedLM,
RobertaForSequenceClassification,
RobertaModel)
"The bare RoBERTa Model transformer outputting raw hidden-states without any specific head on top.",
)
class RobertaForMaskedLM(BertPreTrainedModel):
config_class = RobertaConfig
pretrained_model_archive_map = ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
base_model_prefix = "roberta"
def __init__(self, config):
super().__init__(config)
self.roberta = RobertaModel(config)
self.lm_head = RobertaLMHead(config)
self.init_weights()
def get_output_embeddings(self):
return self.lm_head.decoder
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
masked_lm_labels=None,
):
r"""
masked_lm_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`):
Labels for computing the masked language modeling loss.
Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
Returns:
:obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.RobertaConfig`) and inputs:
masked_lm_loss (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
prediction_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`)
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):
Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)
of shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):
Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape
:obj:`(batch_size, num_heads, sequence_length, sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
Examples::
from transformers import RobertaTokenizer, RobertaForMaskedLM
import torch
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
model = RobertaForMaskedLM.from_pretrained('roberta-base')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, masked_lm_labels=input_ids)
loss, prediction_scores = outputs[:2]
"""
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
)
sequence_output = outputs[0]
prediction_scores = self.lm_head(sequence_output)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
outputs = (masked_lm_loss,) + outputs
return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
)
)
)
)
)
def convert_roberta_to_transformers(ckpt_path):
ckpt = torch.load(ckpt_path, map_location="cpu")
args = ckpt["args"]
config = BertConfig(
vocab_size_or_config_json_file=250002,
hidden_size=args.encoder_embed_dim,
num_hidden_layers=args.encoder_layers,
num_attention_heads=args.encoder_attention_heads,
intermediate_size=args.encoder_ffn_embed_dim,
max_position_embeddings=args.max_positions + 2,
type_vocab_size=1,
layer_norm_eps=1e-5, # PyTorch default used in fairseq
)
print("Our BERT config:", config)
stat_dict = ckpt["model"]
new_stat_dict = {}
model = RobertaForMaskedLM(config)
model.eval()
sent_enc = "decoder.sentence_encoder"
new_stat_dict["roberta.embeddings.word_embeddings.weight"] = stat_dict[sent_enc + ".embed_tokens.weight"]
new_stat_dict["roberta.embeddings.position_embeddings.weight"] = stat_dict[sent_enc + ".embed_positions.weight"]
new_stat_dict["roberta.embeddings.token_type_embeddings.weight"] = torch.zeros_like(model.roberta.embeddings.token_type_embeddings.weight)
new_stat_dict["roberta.embeddings.LayerNorm.weight"] = stat_dict[sent_enc +".emb_layer_norm.weight"]
new_stat_dict["roberta.embeddings.LayerNorm.bias"] = stat_dict[sent_enc + ".emb_layer_norm.bias"]
for i in range(config.num_hidden_layers):
# Encoder: start of layer
# layer: BertLayer = model.roberta.encoder.layer[i]
layer = "roberta.encoder.layer.%d" % i
roberta_layer = sent_enc + (".layers.%d" % i)
### self attention
# self_attn: BertSelfAttention = layer.attention.self
self_attn = layer + ".attention.self"
assert(
stat_dict[roberta_layer+".self_attn.k_proj.weight"].data.shape == \
stat_dict[roberta_layer+".self_attn.q_proj.weight"].data.shape == \
stat_dict[roberta_layer+".self_attn.v_proj.weight"].data.shape == \
torch.Size((config.hidden_size, config.hidden_size))
)
new_stat_dict[self_attn+".query.weight"] = stat_dict[roberta_layer+".self_attn.q_proj.weight"]
new_stat_dict[self_attn+".query.bias"] = stat_dict[roberta_layer+".self_attn.q_proj.bias"]
new_stat_dict[self_attn+".key.weight"] = stat_dict[roberta_layer+".self_attn.k_proj.weight"]
new_stat_dict[self_attn+".key.bias"] = stat_dict[roberta_layer+".self_attn.k_proj.bias"]
new_stat_dict[self_attn+".value.weight"] = stat_dict[roberta_layer+".self_attn.v_proj.weight"]
new_stat_dict[self_attn+".value.bias"] = stat_dict[roberta_layer+".self_attn.v_proj.bias"]
### self-attention output
# self_output: BertSelfOutput = layer.attention.output
self_output = layer + ".attention.output"
assert(
model.roberta.encoder.layer[i].attention.output.dense.weight.shape == stat_dict[roberta_layer+".self_attn.out_proj.weight"].shape
)
new_stat_dict[self_output+".dense.weight"] = stat_dict[roberta_layer+".self_attn.out_proj.weight"]
new_stat_dict[self_output+".dense.bias"] = stat_dict[roberta_layer+".self_attn.out_proj.bias"]
new_stat_dict[self_output+".LayerNorm.weight"] = stat_dict[roberta_layer+".self_attn_layer_norm.weight"]
new_stat_dict[self_output+".LayerNorm.bias"] = stat_dict[roberta_layer+".self_attn_layer_norm.bias"]
### intermediate
# intermediate: BertIntermediate = layer.intermediate
intermediate = layer + ".intermediate"
assert(
model.roberta.encoder.layer[i].intermediate.dense.weight.shape == stat_dict[roberta_layer+".fc1.weight"].shape
)
#TODO
new_stat_dict[intermediate+".dense.weight"] = stat_dict[roberta_layer+".fc1.weight"]
new_stat_dict[intermediate+".dense.bias"] = stat_dict[roberta_layer+".fc1.bias"]
### output
# bert_output: BertOutput = layer.output
bert_output = layer + ".output"
assert(
model.roberta.encoder.layer[i].output.dense.weight.shape == stat_dict[roberta_layer+".fc2.weight"].shape
)
new_stat_dict[bert_output+".dense.weight"] = stat_dict[roberta_layer+".fc2.weight"]
new_stat_dict[bert_output+".dense.bias"] = stat_dict[roberta_layer+".fc2.bias"]
new_stat_dict[bert_output+".LayerNorm.weight"] = stat_dict[roberta_layer+".final_layer_norm.weight"]
new_stat_dict[bert_output+".LayerNorm.bias"] = stat_dict[roberta_layer+".final_layer_norm.bias"]
#### end of layer
new_stat_dict["lm_head.dense.weight"] = stat_dict["decoder.lm_head.dense.weight"]
new_stat_dict["lm_head.dense.bias"] = stat_dict["decoder.lm_head.dense.bias"]
new_stat_dict["lm_head.layer_norm.weight"] = stat_dict["decoder.lm_head.layer_norm.weight"]
new_stat_dict["lm_head.layer_norm.bias"] = stat_dict["decoder.lm_head.layer_norm.bias"]
new_stat_dict["lm_head.decoder.weight"] = stat_dict["decoder.lm_head.weight"]
new_stat_dict["lm_head.bias"] = stat_dict["decoder.lm_head.bias"]
new_stat_dict["roberta.pooler.dense.weight"] = model.roberta.pooler.dense.weight
new_stat_dict["roberta.pooler.dense.bias"] = model.roberta.pooler.dense.bias
return new_stat_dict | null |
184,766 | import os
import sys
import faiss
import tempfile
import numpy as np
def knn(x, y, k, use_gpu, dist='cosine'):
def score(x, y, fwd_mean, bwd_mean, margin, dist='cosine'):
def score_candidates(x, y, candidate_inds, fwd_mean, bwd_mean, margin, dist='cosine'):
def text_load_unify(fname, encoding, unify=True):
def unique_embeddings(emb, ind):
def shift_embeddings(x, y):
def mine_bitext(x, y, src_text_file, trg_text_file, output_file, mode='mine',
retrieval='max', margin='ratio', threshold=0,
neighborhood=4, use_gpu=False, encoding='utf-8', dist='cosine', use_shift_embeds=False):
src_inds, src_sents = text_load_unify(src_text_file, encoding, True)
trg_inds, trg_sents = text_load_unify(trg_text_file, encoding, True)
x = unique_embeddings(x, src_inds)
y = unique_embeddings(y, trg_inds)
if dist == 'cosine':
faiss.normalize_L2(x)
faiss.normalize_L2(y)
if use_shift_embeds:
x2y, y2x = shift_embeddings(x, y)
# calculate knn in both directions
if retrieval is not 'bwd':
print(' - perform {:d}-nn source against target, dist={}'.format(neighborhood, dist))
if use_shift_embeds:
# project x to y space, and search k-nn ys for each x
x2y_sim, x2y_ind = knn(x2y, y, min(y.shape[0], neighborhood), use_gpu, dist)
x2y_mean = x2y_sim.mean(axis=1)
else:
x2y_sim, x2y_ind = knn(x, y, min(y.shape[0], neighborhood), use_gpu, dist)
x2y_mean = x2y_sim.mean(axis=1)
if retrieval is not 'fwd':
print(' - perform {:d}-nn target against source, dist={}'.format(neighborhood, dist))
if use_shift_embeds:
y2x_sim, y2x_ind = knn(y2x, x, min(x.shape[0], neighborhood), use_gpu, dist)
y2x_mean = y2x_sim.mean(axis=1)
else:
y2x_sim, y2x_ind = knn(y, x, min(x.shape[0], neighborhood), use_gpu, dist)
y2x_mean = y2x_sim.mean(axis=1)
# margin function
if margin == 'absolute':
margin = lambda a, b: a
elif margin == 'distance':
margin = lambda a, b: a - b
else: # margin == 'ratio':
margin = lambda a, b: a / b
fout = open(output_file, mode='w', encoding=encoding, errors='surrogateescape')
if mode == 'search':
print(' - Searching for closest sentences in target')
print(' - writing alignments to {:s}'.format(output_file))
scores = score_candidates(x, y, x2y_ind, x2y_mean, y2x_mean, margin)
best = x2y_ind[np.arange(x.shape[0]), scores.argmax(axis=1)]
nbex = x.shape[0]
ref = np.linspace(0, nbex-1, nbex).astype(int) # [0, nbex)
err = nbex - np.equal(best.reshape(nbex), ref).astype(int).sum()
print(' - errors: {:d}={:.2f}%'.format(err, 100*err/nbex))
for i in src_inds:
print(trg_sents[best[i]], file=fout)
elif mode == 'score':
for i, j in zip(src_inds, trg_inds):
s = score(x[i], y[j], x2y_mean[i], y2x_mean[j], margin)
print(s, src_sents[i], trg_sents[j], sep='\t', file=fout)
elif mode == 'mine':
print(' - mining for parallel data')
if use_shift_embeds:
fwd_scores = score_candidates(x2y, y, x2y_ind, x2y_mean, y2x_mean, margin)
bwd_scores = score_candidates(y2x, x, y2x_ind, y2x_mean, x2y_mean, margin)
else:
fwd_scores = score_candidates(x, y, x2y_ind, x2y_mean, y2x_mean, margin)
bwd_scores = score_candidates(y, x, y2x_ind, y2x_mean, x2y_mean, margin)
fwd_best = x2y_ind[np.arange(x.shape[0]), fwd_scores.argmax(axis=1)]
bwd_best = y2x_ind[np.arange(y.shape[0]), bwd_scores.argmax(axis=1)]
print(' - writing alignments to {:s}'.format(output_file))
if threshold > 0:
print(' - with threshold of {:f}'.format(threshold))
if retrieval == 'fwd':
for i, j in enumerate(fwd_best):
print(fwd_scores[i].max(), src_sents[i], trg_sents[j], sep='\t', file=fout)
if retrieval == 'bwd':
for j, i in enumerate(bwd_best):
print(bwd_scores[j].max(), src_sents[i], trg_sents[j], sep='\t', file=fout)
if retrieval == 'intersect':
for i, j in enumerate(fwd_best):
if bwd_best[j] == i:
print(fwd_scores[i].max(), src_sents[i], trg_sents[j], sep='\t', file=fout)
if retrieval == 'max':
indices = np.stack((np.concatenate((np.arange(x.shape[0]), bwd_best)),
np.concatenate((fwd_best, np.arange(y.shape[0])))), axis=1)
scores = np.concatenate((fwd_scores.max(axis=1), bwd_scores.max(axis=1)))
seen_src, seen_trg = set(), set()
for i in np.argsort(-scores):
src_ind, trg_ind = indices[i]
if not src_ind in seen_src and not trg_ind in seen_trg:
seen_src.add(src_ind)
seen_trg.add(trg_ind)
if scores[i] > threshold:
print(scores[i], src_sents[src_ind], trg_sents[trg_ind], sep='\t', file=fout)
fout.close() | null |
184,767 | import os
import sys
import faiss
import tempfile
import numpy as np
def bucc_optimize(candidate2score, gold):
def bucc_extract(cand2score, th, fname):
def read_candidate2score(candidates_file, src_text_file, trg_text_file, src_id_file, trg_id_file, encoding='utf-8'):
def bucc_eval(candidates_file, gold_file, src_file, trg_file, src_id_file, trg_id_file, predict_file, threshold=None, encoding='utf-8'):
candidate2score = read_candidate2score(candidates_file, src_file, trg_file, src_id_file, trg_id_file, encoding)
if threshold is not None and gold_file is None:
print(' - using threshold {}'.format(threshold))
else:
print(' - optimizing threshold on gold alignments {}'.format(gold_file))
gold = {line.strip() for line in open(gold_file)}
threshold = bucc_optimize(candidate2score, gold)
bitexts = bucc_extract(candidate2score, threshold, predict_file)
if gold_file is not None:
ncorrect = len(gold.intersection(bitexts))
if ncorrect > 0:
precision = ncorrect / len(bitexts)
recall = ncorrect / len(gold)
f1 = 2*precision*recall / (precision + recall)
else:
precision = recall = f1 = 0
print(' - best threshold={:f}: precision={:.2f}, recall={:.2f}, F1={:.2f}'
.format(threshold, 100*precision, 100*recall, 100*f1))
return {'best-threshold': threshold, 'precision': 100*precision, 'recall': 100*recall, 'F1': 100*f1}
else:
return None | null |
184,768 | import os
import sys
import faiss
import tempfile
import numpy as np
def similarity_search(x, y, dim, normalize=False):
num = x.shape[0]
idx = faiss.IndexFlatL2(dim)
if normalize:
faiss.normalize_L2(x)
faiss.normalize_L2(y)
idx.add(x)
scores, prediction = idx.search(y, 1)
return prediction | null |
184,769 | import faiss
import json
import logging
import numpy as np
import os
import torch
from src.pequod.data.xretrieval import load_and_cache_examples
from src.pequod.eval.evaluator import Evaluator
def similarity_search(x, y, dim, normalize=False, dist='L2'):
top_k = 10
num = x.shape[0]
if dist == 'cosine':
idx = faiss.IndexFlatIP(dim)
else:
idx = faiss.IndexFlatL2(dim)
if normalize:
faiss.normalize_L2(x)
faiss.normalize_L2(y)
idx.add(x)
scores, prediction = idx.search(y, top_k)
return prediction, scores | null |
184,770 | import faiss
import json
import logging
import numpy as np
import os
import torch
from src.pequod.data.xretrieval import load_and_cache_examples
from src.pequod.eval.evaluator import Evaluator
from src.pequod.eval.utils_retrieve import mine_bitext, bucc_eval
logger = logging.getLogger(__name__)
def load_embeddings(embed_file, num_sentences=None):
logger.info(' loading from {}'.format(embed_file))
embeds = np.load(embed_file)
return embeds | null |
184,771 | import argparse
import glob
import logging
import os
import random
import json
import copy
import math
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset, ConcatDataset, Subset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertForSequenceClassification,
BertTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer,
XLMConfig,
XLMForSequenceClassification,
XLMTokenizer,
XLMRobertaConfig,
XLMRobertaForSequenceClassificationStable,
XLMRobertaTokenizer,
get_linear_schedule_with_warmup,
)
from transformers import xtreme_convert_examples_to_features as convert_examples_to_features
from transformers import xtreme_compute_metrics as compute_metrics
from transformers import xtreme_output_modes as output_modes
from transformers import xtreme_processors as processors
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
def ConcatDataset(dataset_list):
all_input_ids = torch.cat([dataset.tensors[0] for dataset in dataset_list], dim=0)
all_attention_mask = torch.cat([dataset.tensors[1] for dataset in dataset_list], dim=0)
all_token_type_ids = torch.cat([dataset.tensors[2] for dataset in dataset_list], dim=0)
all_labels = torch.cat([dataset.tensors[3] for dataset in dataset_list], dim=0)
dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
return dataset | null |
184,772 | import argparse
import glob
import logging
import os
import random
import json
import copy
import math
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset, ConcatDataset, Subset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertForSequenceClassification,
BertTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer,
XLMConfig,
XLMForSequenceClassification,
XLMTokenizer,
XLMRobertaConfig,
XLMRobertaForSequenceClassificationStable,
XLMRobertaTokenizer,
get_linear_schedule_with_warmup,
)
from transformers import xtreme_convert_examples_to_features as convert_examples_to_features
from transformers import xtreme_compute_metrics as compute_metrics
from transformers import xtreme_output_modes as output_modes
from transformers import xtreme_processors as processors
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def evaluate(args, model, tokenizer, prefix="", single_gpu=False, verbose=True):
if single_gpu:
args = copy.deepcopy(args)
args.local_rank = -1
args.n_gpu = 1
eval_task_names = (args.task_name,)
eval_outputs_dirs = (args.output_dir,)
eval_datasets = []
eval_langs = args.language.split(',')
splits = ["valid", "test"] if args.do_train else ["test"]
for split in splits:
for lang in eval_langs:
eval_datasets.append((split, lang))
results = {}
# leave interface for multi-task evaluation
eval_task = eval_task_names[0]
eval_output_dir = eval_outputs_dirs[0]
# multi-gpu eval
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
for split, lang in eval_datasets:
task_name = "{0}-{1}".format(split, lang)
eval_dataset, guids = load_and_cache_examples(args, eval_task, tokenizer, lang, split=split)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
guids = np.array(guids)
for batch in eval_dataloader:
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert"] else None
) # XLM and DistilBERT don't use segment_ids
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
else:
raise ValueError("No other `output_mode` for XGLUE.")
# print("pred:" + split + str([i for i in preds[:500]]), flush=True)
# print("label:" + split + str([i for i in out_label_ids[:500]]), flush=True)
result = compute_metrics(eval_task, preds, out_label_ids, guids)
results[task_name] = result
if args.do_train:
results["valid_avg"] = average_dic([value for key, value in results.items() if key.startswith("valid")])
results["test_avg"] = average_dic([value for key, value in results.items() if key.startswith("test")])
return results
The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(args, train_examples, train_dataset, model, first_stage_model, tokenizer, noised_data_generator=None)` to solve the following problem:
Train the model
Here is the function:
def train(args, train_examples, train_dataset, model, first_stage_model, tokenizer, noised_data_generator=None):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter(os.path.join(args.output_dir, "tb-log"))
log_writer = open(os.path.join(args.output_dir, "evaluate_logs.txt"), 'w')
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if noised_data_generator is not None and noised_data_generator.enable_data_augmentation:
t_total = noised_data_generator.get_train_steps(len(train_dataloader), args)
else:
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
logger.info(" Logging steps = %d", args.logging_steps)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path) and False:
# set global_step to gobal_step of last saved checkpoint from model path
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
tr_loss, logging_loss, best_avg = 0.0, 0.0, 0.0
tr_original_loss, logging_original_loss = 0.0, 0.0
tr_noised_loss, logging_noised_loss = 0.0, 0.0
tr_r1_loss, logging_r1_loss = 0.0, 0.0
tr_r2_loss, logging_r2_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
set_seed(args) # Added here for reproductibility
def logging(eval=False):
results = None
if args.evaluate_during_training and eval:
results = evaluate(args, model, tokenizer, single_gpu=True)
for task, result in results.items():
for key, value in result.items():
tb_writer.add_scalar("eval_{}_{}".format(task, key), value, global_step)
logger.info("eval_%s_%s: %s" % (task, key, value))
log_writer.write("{0}\t{1}\n".format(global_step, json.dumps(results)))
log_writer.flush()
logger.info(
"global_step: {}, lr: {:.6f}, loss: {:.6f}, original_loss: {:.6f}, noised_loss: {:.6f}, r1_loss: {:.6f}, r2_loss: {:.6f}".format(
global_step, scheduler.get_lr()[0], (tr_loss - logging_loss) / args.logging_steps,
(tr_original_loss - logging_original_loss) / args.logging_steps,
(tr_noised_loss - logging_noised_loss) / args.logging_steps,
(tr_r1_loss - logging_r1_loss) / args.logging_steps,
(tr_r2_loss - logging_r2_loss) / args.logging_steps))
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
tb_writer.add_scalar("original_loss", (tr_original_loss - logging_original_loss) / args.logging_steps,
global_step)
tb_writer.add_scalar("noised_loss", (tr_noised_loss - logging_noised_loss) / args.logging_steps, global_step)
tb_writer.add_scalar("r1_loss", (tr_r1_loss - logging_r1_loss) / args.logging_steps, global_step)
tb_writer.add_scalar("r2_loss", (tr_r2_loss - logging_r2_loss) / args.logging_steps, global_step)
return results
def save_checkpoint_best(result):
task_metric = "acc"
if args.task_name == "rel":
task_metric = "ndcg"
if result is not None and best_avg < result["valid_avg"][task_metric]:
output_dir = os.path.join(args.output_dir, "checkpoint-best")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
return result["valid_avg"][task_metric]
else:
return best_avg
for _ in train_iterator:
if noised_data_generator is not None:
assert noised_data_generator.enable_r1_loss or noised_data_generator.noised_loss or noised_data_generator.enable_data_augmentation
noised_train_dataset = noised_data_generator.get_noised_dataset(train_examples)
train_sampler = RandomSampler(noised_train_dataset) if args.local_rank == -1 else DistributedSampler(
noised_train_dataset)
train_dataloader = DataLoader(noised_train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
# if not args.max_steps > 0:
# assert t_total == len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=True)
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
if first_stage_model is not None:
first_stage_model.eval()
batch = tuple(t.to(args.device) for t in batch)
if len(batch) == 4:
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert"] else None
) # XLM and DistilBERT don't use segment_ids
elif len(batch) == 5:
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert"] else None
) # XLM and DistilBERT don't use segment_ids
inputs["is_augmented"] = batch[4]
else:
assert len(batch) == 9
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3],
"is_augmented": batch[4],
"noised_input_ids": batch[5],
"noised_attention_mask": batch[6],
"r1_mask": batch[8]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert"] else None
) # XLM and DistilBERT don't use segment_ids
inputs["noised_token_type_ids"] = (
batch[7] if args.model_type in ["bert"] else None
) # XLM and DistilBERT don't use segment_ids
if first_stage_model is not None:
first_stage_model_inputs = {"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"token_type_ids": inputs["token_type_ids"],
"labels": inputs["labels"]}
with torch.no_grad():
inputs["first_stage_model_logits"] = first_stage_model(**first_stage_model_inputs)[1]
outputs = model(**inputs)
loss = outputs[0] # model outputs are always tuple in transformers (see doc)
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if noised_data_generator is not None:
original_loss, noised_loss, r1_loss, r2_loss = outputs[1:5]
if args.n_gpu > 1:
original_loss = original_loss.mean()
noised_loss = noised_loss.mean()
r1_loss = r1_loss.mean()
r2_loss = r2_loss.mean()
if args.gradient_accumulation_steps > 1:
original_loss = original_loss / args.gradient_accumulation_steps
noised_loss = noised_loss / args.gradient_accumulation_steps
r1_loss = r1_loss / args.gradient_accumulation_steps
r2_loss = r2_loss / args.gradient_accumulation_steps
tr_original_loss += original_loss.item()
tr_noised_loss += noised_loss.item()
tr_r1_loss += r1_loss.item()
tr_r2_loss += r2_loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
do_eval = args.evaluate_steps > 0 and global_step % args.evaluate_steps == 0
cur_result = logging(eval=do_eval)
logging_loss = tr_loss
logging_original_loss = tr_original_loss
logging_noised_loss = tr_noised_loss
logging_r1_loss = tr_r1_loss
logging_r2_loss = tr_r2_loss
best_avg = save_checkpoint_best(cur_result)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.local_rank in [-1, 0] and args.logging_each_epoch:
cur_result = logging(eval=True)
logging_loss = tr_loss
logging_original_loss = tr_original_loss
logging_noised_loss = tr_noised_loss
logging_r1_loss = tr_r1_loss
logging_r2_loss = tr_r2_loss
best_avg = save_checkpoint_best(cur_result)
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
log_writer.close()
return global_step, tr_loss / (global_step + 1) | Train the model |
184,773 | import argparse
import glob
import logging
import os
import random
import json
import copy
import math
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset, ConcatDataset, Subset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
BertConfig,
BertForSequenceClassification,
BertTokenizer,
DistilBertConfig,
DistilBertForSequenceClassification,
DistilBertTokenizer,
XLMConfig,
XLMForSequenceClassification,
XLMTokenizer,
XLMRobertaConfig,
XLMRobertaForSequenceClassificationStable,
XLMRobertaTokenizer,
get_linear_schedule_with_warmup,
)
from transformers import xtreme_convert_examples_to_features as convert_examples_to_features
from transformers import xtreme_compute_metrics as compute_metrics
from transformers import xtreme_output_modes as output_modes
from transformers import xtreme_processors as processors
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
def load_and_cache_examples(args, task, tokenizer, language, split="train", return_examples=False):
def predict(args, model, tokenizer, label_list, prefix="", single_gpu=False, verbose=True):
if single_gpu:
args = copy.deepcopy(args)
args.local_rank = -1
args.n_gpu = 1
eval_task_names = (args.task_name,)
eval_outputs_dirs = (args.output_dir,)
eval_datasets = []
eval_langs = args.language.split(',')
for split in ["test"]:
for lang in eval_langs:
eval_datasets.append((split, lang))
results = {}
# leave interface for multi-task evaluation
eval_task = eval_task_names[0]
eval_output_dir = eval_outputs_dirs[0]
# multi-gpu eval
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
for split, lang in eval_datasets:
task_name = "{0}-{1}".format(split, lang)
eval_dataset, guids = load_and_cache_examples(args, eval_task, tokenizer, lang, split=split)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
guids = np.array(guids)
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1]}
if args.model_type != "distilbert":
inputs["token_type_ids"] = (
batch[2] if args.model_type in ["bert"] else None
) # XLM and DistilBERT don't use segment_ids
outputs = model(**inputs)
logits = outputs[0]
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
else:
raise ValueError("No other `output_mode` for XGLUE.")
results[lang] = preds
for lang in results.keys():
output_eval_file = os.path.join(eval_output_dir, prefix, "{}.prediction".format(lang))
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
print("results:", results)
for item in results[lang]:
writer.write(str(label_list[item]) + "\n") | null |
184,774 | import argparse
import glob
import logging
import os
import random
import timeit
import itertools
import json
import copy
import math
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
AlbertConfig,
AlbertForQuestionAnswering,
AlbertTokenizer,
BertConfig,
BertForQuestionAnswering,
BertTokenizer,
XLMRobertaConfig,
XLMRobertaForQuestionAnsweringStable,
XLMRobertaTokenizer,
CamembertConfig,
CamembertForQuestionAnswering,
CamembertTokenizer,
DistilBertConfig,
DistilBertForQuestionAnswering,
DistilBertTokenizer,
RobertaConfig,
RobertaForQuestionAnswering,
RobertaTokenizer,
XLMConfig,
XLMForQuestionAnswering,
XLMTokenizer,
XLNetConfig,
XLNetForQuestionAnswering,
XLNetTokenizer,
get_linear_schedule_with_warmup,
squad_convert_examples_to_features,
)
from transformers.data.metrics.squad_metrics import (
compute_predictions_log_probs,
compute_predictions_logits,
)
from transformers.data.metrics.evaluate_mlqa import evaluate_with_path as mlqa_evaluate_with_path
from transformers.data.metrics.evaluate_squad import evaluate_with_path as squad_evaluate_with_path
from transformers.data.processors.squad import SquadResult, SquadV1Processor, SquadV2Processor, MLQAProcessor, \
TyDiQAProcessor, XQuADProcessor
from transformers.tokenization_bert import whitespace_tokenize
from transformers.data.processors.squad import _improve_answer_span, _new_check_is_max_context, SquadFeatures
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def evaluate(args, model, tokenizer, prefix=""):
languages = args.language.split(',')
all_languages_results = {}
if args.task_name.lower() == "mlqa" or args.task_name == "mlqa_dev":
processor = MLQAProcessor()
elif args.task_name.lower() == "xquad":
processor = XQuADProcessor()
elif args.task_name.lower() == "tydiqa":
processor = TyDiQAProcessor()
elif args.task_name.lower() == "squad":
processor = SquadV1Processor()
else:
assert False
split_lang_list = []
# split_lang_list.append(("run_dev", "en"))
for lang in languages:
split_lang_list.append(("dev", lang))
if args.task_name.lower() == "mlqa":
for lang in languages:
split_lang_list.append(("test", lang))
for split, lang in split_lang_list:
# for split, lang in itertools.product(["dev", "test"], languages):
print("evaluating on {0} {1}".format(split, lang))
dataset, examples, features = load_and_cache_examples(args, tokenizer, language=lang, split=split,
output_examples=True)
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(dataset)
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
all_results = []
start_time = timeit.default_timer()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2],
}
if args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
example_indices = batch[3]
# XLNet and XLM use more arguments for their predictions
if args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": batch[4], "p_mask": batch[5]})
# for lang_id-sensitive xlm models
if hasattr(model, "config") and hasattr(model.config, "lang2id"):
inputs.update(
{"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)}
)
outputs = model(**inputs)
for i, example_index in enumerate(example_indices):
eval_feature = features[example_index.item()]
unique_id = int(eval_feature.unique_id)
output = [to_list(output[i]) for output in outputs]
# Some models (XLNet, XLM) use 5 arguments for their predictions, while the other "simpler"
# models only use two.
if len(output) >= 5:
start_logits = output[0]
start_top_index = output[1]
end_logits = output[2]
end_top_index = output[3]
cls_logits = output[4]
result = SquadResult(
unique_id,
start_logits,
end_logits,
start_top_index=start_top_index,
end_top_index=end_top_index,
cls_logits=cls_logits,
)
else:
start_logits, end_logits = output
result = SquadResult(unique_id, start_logits, end_logits)
all_results.append(result)
evalTime = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset))
# Compute predictions
output_prediction_file = os.path.join(args.output_dir, "{}.prediction".format(lang))
output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}_{}_{}.json".format(prefix, split, lang))
if args.version_2_with_negative:
output_null_log_odds_file = os.path.join(args.output_dir,
"null_odds_{}_{}_{}.json".format(prefix, split, lang))
else:
output_null_log_odds_file = None
# XLNet and XLM use a more complex post-processing procedure
if args.model_type in ["xlnet", "xlm"]:
start_n_top = model.config.start_n_top if hasattr(model, "config") else model.module.config.start_n_top
end_n_top = model.config.end_n_top if hasattr(model, "config") else model.module.config.end_n_top
predictions = compute_predictions_log_probs(
examples,
features,
all_results,
args.n_best_size,
args.max_answer_length,
output_prediction_file,
output_nbest_file,
output_null_log_odds_file,
start_n_top,
end_n_top,
args.version_2_with_negative,
tokenizer,
args.verbose_logging,
)
else:
predictions = compute_predictions_logits(
examples,
features,
all_results,
args.n_best_size,
args.max_answer_length,
args.do_lower_case,
output_prediction_file,
output_nbest_file,
output_null_log_odds_file,
args.verbose_logging,
args.version_2_with_negative,
args.null_score_diff_threshold,
tokenizer,
map_to_origin=not (args.model_type == "xlmr" and (lang == 'zh' or lang == "ko")),
# map_to_origin=False,
)
# Compute the F1 and exact scores.
if args.task_name.lower() == "mlqa" or args.task_name.lower() == "mlqa_dev":
results = mlqa_evaluate_with_path(processor.get_dataset_path(args.data_dir, split, lang),
output_prediction_file, lang)
else:
results = squad_evaluate_with_path(processor.get_dataset_path(args.data_dir, split, lang),
output_prediction_file)
# results = squad_evaluate(examples, predictions)
# results = evaluate_with_path(processor.get_dataset_path(args.data_dir, split, lang), output_prediction_file,
# lang)
all_languages_results["{0}_{1}".format(split, lang)] = results
for split in ["dev", "test"]:
all_languages_results["{0}_avg".format(split)] = average_dic(
[value for key, value in all_languages_results.items() if split in key])
return all_languages_results
The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(args, train_examples, train_dataset, model, first_stage_model, tokenizer, noised_data_generator=None)` to solve the following problem:
Train the model
Here is the function:
def train(args, train_examples, train_dataset, model, first_stage_model, tokenizer, noised_data_generator=None):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_log_dir = os.getenv("PHILLY_JOB_DIRECTORY", None)
tb_writer = SummaryWriter(log_dir=tb_log_dir)
log_writer = open(os.path.join(args.output_dir, "evaluate_logs.txt"), 'w')
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
# args.warmup_steps == -1 means 0.1 warmup ratio
if args.warmup_steps == -1:
args.warmup_steps = int(t_total * 0.1)
logger.info("Warmup steps: %d" % args.warmup_steps)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 1
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
tr_loss, logging_loss, best_avg_f1 = 0.0, 0.0, 0.0
tr_original_loss, logging_original_loss = 0.0, 0.0
tr_noised_loss, logging_noised_loss = 0.0, 0.0
tr_r1_loss, logging_r1_loss = 0.0, 0.0
tr_r2_loss, logging_r2_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
# Added here for reproductibility
set_seed(args)
def logging(eval=False):
results = None
# Only evaluate when single GPU otherwise metrics may not average well
if args.local_rank in [-1, 0] and args.evaluate_during_training and eval:
results = evaluate(args, model, tokenizer)
for key, value in results.items():
logger.info("eval_{}: {}".format(key, value))
# for key, value in results.items():
# tb_writer.add_scalar("eval_{}".format(key), value, global_step)
log_writer.write("{0}\t{1}".format(global_step, json.dumps(results)) + '\n')
log_writer.flush()
logger.info(
"global_step: {}, lr: {:.6f}, loss: {:.6f}, original_loss: {:.6f}, noised_loss: {:.6f}, r1_loss: {:.6f}, r2_loss: {:.6f}".format(
global_step, scheduler.get_lr()[0], (tr_loss - logging_loss) / args.logging_steps,
(tr_original_loss - logging_original_loss) / args.logging_steps,
(tr_noised_loss - logging_noised_loss) / args.logging_steps,
(tr_r1_loss - logging_r1_loss) / args.logging_steps,
(tr_r2_loss - logging_r2_loss) / args.logging_steps))
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
tb_writer.add_scalar("original_loss", (tr_original_loss - logging_original_loss) / args.logging_steps,
global_step)
tb_writer.add_scalar("noised_loss", (tr_noised_loss - logging_noised_loss) / args.logging_steps, global_step)
tb_writer.add_scalar("r1_loss", (tr_r1_loss - logging_r1_loss) / args.logging_steps, global_step)
tb_writer.add_scalar("r2_loss", (tr_r2_loss - logging_r2_loss) / args.logging_steps, global_step)
if results is not None:
return results["dev_avg"]["f1"]
else:
return None
for _ in train_iterator:
use_noised_ids = False
if noised_data_generator is not None:
assert noised_data_generator.enable_r1_loss or noised_data_generator.noised_loss or noised_data_generator.enable_data_augmentation
noised_train_dataset = noised_data_generator.get_noised_dataset(train_examples)
train_sampler = RandomSampler(noised_train_dataset) if args.local_rank == -1 else DistributedSampler(
noised_train_dataset)
train_dataloader = DataLoader(noised_train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=True)
# epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
if first_stage_model is not None:
first_stage_model.eval()
batch = tuple(t.to(args.device) for t in batch)
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": batch[2],
"start_positions": batch[3],
"end_positions": batch[4],
}
if first_stage_model is not None:
with torch.no_grad():
inputs["first_stage_model_start_logits"], inputs["first_stage_model_end_logits"] = first_stage_model(**inputs)[1:3]
if noised_data_generator is not None:
inputs.update({"noised_input_ids": batch[8], "noised_attention_mask": batch[9],
"noised_token_type_ids": batch[10], "noised_r1_mask": batch[11],
"original_r1_mask": batch[12], "noised_start_positions": batch[13],
"noised_end_positions": batch[14], "is_augmented": batch[15]})
if args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if use_noised_ids:
del inputs["noised_token_type_ids"]
if args.model_type in ["xlnet", "xlm"]:
assert False
inputs.update({"cls_index": batch[5], "p_mask": batch[6]})
if args.version_2_with_negative:
inputs.update({"is_impossible": batch[7]})
if hasattr(model, "config") and hasattr(model.config, "lang2id"):
inputs.update(
{"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)}
)
outputs = model(**inputs)
# model outputs are always tuple in transformers (see doc)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if True or noised_data_generator is not None:
original_loss, noised_loss, r1_loss, r2_loss = outputs[1:5]
if args.n_gpu > 1:
original_loss = original_loss.mean()
noised_loss = noised_loss.mean()
r1_loss = r1_loss.mean()
r2_loss = r2_loss.mean()
if args.gradient_accumulation_steps > 1:
original_loss = original_loss / args.gradient_accumulation_steps
noised_loss = noised_loss / args.gradient_accumulation_steps
r1_loss = r1_loss / args.gradient_accumulation_steps
r2_loss = r2_loss / args.gradient_accumulation_steps
tr_original_loss += original_loss.item()
tr_noised_loss += noised_loss.item()
tr_r1_loss += r1_loss.item()
tr_r2_loss += r2_loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
cur_result = logging(eval=args.evaluate_steps > 0 and global_step % args.evaluate_steps == 0)
logging_loss = tr_loss
logging_original_loss = tr_original_loss
logging_noised_loss = tr_noised_loss
logging_r1_loss = tr_r1_loss
logging_r2_loss = tr_r2_loss
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.local_rank in [-1, 0] and args.logging_each_epoch:
avg_f1 = logging(eval=True)
logging_loss = tr_loss
logging_original_loss = tr_original_loss
logging_noised_loss = tr_noised_loss
logging_r1_loss = tr_r1_loss
logging_r2_loss = tr_r2_loss
if avg_f1 > best_avg_f1:
best_avg_f1 = avg_f1
output_dir = os.path.join(args.output_dir, "checkpoint-best")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
log_writer.close()
return global_step, tr_loss / global_step | Train the model |
184,775 | from __future__ import absolute_import, division, print_function
import logging
import os
import random
from io import open
from transformers import XLMTokenizer
def get_labels(path):
with open(path, "r") as f:
labels = f.read().splitlines()
if "O" not in labels:
labels = ["O"] + labels
return labels | null |
184,776 | from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import copy
import json
import random
import math
import numpy as np
import torch
from seqeval.metrics import precision_score, recall_score, f1_score
from tensorboardX import SummaryWriter
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from torch.utils.data import RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from utils_tag import convert_examples_to_features
from utils_tag import get_labels
from utils_tag import read_examples_from_file
from utils_tag import InputExample
from transformers import (
AdamW,
get_linear_schedule_with_warmup,
WEIGHTS_NAME,
RobertaConfig,
XLMRobertaConfig,
XLMRobertaTokenizer,
XLMRobertaForTokenClassificationPoolingStable,
)
def get_root(x, parent):
if x == parent[x]: return x
parent[x] = get_root(parent[x], parent)
return parent[x] | null |
184,777 | from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import copy
import json
import random
import math
import numpy as np
import torch
from seqeval.metrics import precision_score, recall_score, f1_score
from tensorboardX import SummaryWriter
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from torch.utils.data import RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from utils_tag import convert_examples_to_features
from utils_tag import get_labels
from utils_tag import read_examples_from_file
from utils_tag import InputExample
from transformers import (
AdamW,
get_linear_schedule_with_warmup,
WEIGHTS_NAME,
RobertaConfig,
XLMRobertaConfig,
XLMRobertaTokenizer,
XLMRobertaForTokenClassificationPoolingStable,
)
logger = logging.getLogger(__name__)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def evaluate(args, model, tokenizer, labels, pad_token_label_id, prefix=""):
# eval_task_names = (args.task_name,)
eval_outputs_dirs = (args.output_dir,)
eval_datasets = []
eval_langs = args.predict_langs.split(',')
splits = ["dev", "test"] if args.do_train else ["test"]
for split in splits:
for lang in eval_langs:
eval_datasets.append((split, lang))
all_languages_results = {}
# leave interface for multi-task evaluation
# eval_task = eval_task_names[0]
eval_output_dir = eval_outputs_dirs[0]
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
for split, lang in eval_datasets:
task_name = "{0}-{1}".format(split, lang)
eval_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=split, lang=lang)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# Eval!
logger.info("***** Running evaluation %s in %s *****" % (prefix, lang))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
for batch in eval_dataloader:
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
"pooling_ids": batch[4]}
if args.model_type != "distilbert":
# XLM and RoBERTa don"t use segment_ids
inputs["token_type_ids"] = batch[2] if args.model_type in ["bert", "xlnet"] else None
if args.model_type == 'xlm':
inputs["langs"] = batch[5]
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if args.n_gpu > 1:
# mean() to average on multi-gpu parallel evaluating
tmp_eval_loss = tmp_eval_loss.mean()
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
if nb_eval_steps == 0:
results = {k: 0 for k in ["precision", "recall", "f1"]}
continue
else:
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
results = {
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list)
}
all_languages_results["{0}_{1}".format(split, lang)] = results
for split in splits:
all_languages_results["{0}_avg".format(split)] = average_dic(
[value for key, value in all_languages_results.items() if split in key])
return all_languages_results
The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(args, train_examples, train_dataset, model, first_stage_model, tokenizer, labels, pad_token_label_id, noised_data_generator=None)` to solve the following problem:
Train the model.
Here is the function:
def train(args, train_examples, train_dataset, model, first_stage_model, tokenizer, labels, pad_token_label_id,
noised_data_generator=None):
"""Train the model."""
if args.local_rank in [-1, 0]:
tb_writer = SummaryWriter()
tb_log_dir = os.getenv("PHILLY_JOB_DIRECTORY", None)
tb_writer = SummaryWriter(log_dir=tb_log_dir)
log_writer = open(os.path.join(args.output_dir, "evaluate_logs.txt"), 'w')
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if noised_data_generator is not None and noised_data_generator.enable_data_augmentation:
t_total = noised_data_generator.get_train_steps(train_examples, args)
else:
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
# args.warmup_steps == -1 means 0.1 warmup ratio
if args.warmup_steps == -1:
args.warmup_steps = int(t_total * 0.1)
logger.info("Warmup steps: %d" % args.warmup_steps)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps,
num_training_steps=t_total)
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank,
find_unused_parameters=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
best_score = 0.0
best_checkpoint = None
patience = 0
global_step = 0
tr_loss, logging_loss, best_avg = 0.0, 0.0, 0.0
tr_original_loss, logging_original_loss = 0.0, 0.0
tr_noised_loss, logging_noised_loss = 0.0, 0.0
tr_r1_loss, logging_r1_loss = 0.0, 0.0
tr_r2_loss, logging_r2_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0])
set_seed(args) # Add here for reproductibility (even between python 2 and 3)
def logging(eval=False):
results = None
# Only evaluate when single GPU otherwise metrics may not average well
if args.local_rank in [-1, 0] and args.evaluate_during_training and eval:
results = evaluate(args, model, tokenizer, labels, pad_token_label_id)
for key, value in results.items():
logger.info("eval_{}: {}".format(key, value))
# for key, value in results.items():
# tb_writer.add_scalar("eval_{}".format(key), value, global_step)
log_writer.write("{0}\t{1}".format(global_step, json.dumps(results)) + '\n')
log_writer.flush()
logger.info(
"global_step: {}, lr: {:.6f}, loss: {:.6f}, original_loss: {:.6f}, noised_loss: {:.6f}, r1_loss: {:.6f}, r2_loss: {:.6f}".format(
global_step, scheduler.get_lr()[0], (tr_loss - logging_loss) / args.logging_steps,
(tr_original_loss - logging_original_loss) / args.logging_steps,
(tr_noised_loss - logging_noised_loss) / args.logging_steps,
(tr_r1_loss - logging_r1_loss) / args.logging_steps,
(tr_r2_loss - logging_r2_loss) / args.logging_steps))
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
tb_writer.add_scalar("original_loss", (tr_original_loss - logging_original_loss) / args.logging_steps,
global_step)
tb_writer.add_scalar("noised_loss", (tr_noised_loss - logging_noised_loss) / args.logging_steps, global_step)
tb_writer.add_scalar("r1_loss", (tr_r1_loss - logging_r1_loss) / args.logging_steps, global_step)
tb_writer.add_scalar("r2_loss", (tr_r2_loss - logging_r2_loss) / args.logging_steps, global_step)
return results
for _ in train_iterator:
if noised_data_generator is not None:
assert noised_data_generator.enable_r1_loss or noised_data_generator.noised_loss or noised_data_generator.enable_data_augmentation
noised_train_dataset, all_align_pooling_ids = noised_data_generator.get_noised_dataset(train_examples)
train_sampler = RandomSampler(noised_train_dataset) if args.local_rank == -1 else DistributedSampler(
noised_train_dataset)
train_dataloader = DataLoader(noised_train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
# if not args.max_steps > 0:
# assert t_total == len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=True)
for step, batch in enumerate(epoch_iterator):
model.train()
if first_stage_model is not None:
first_stage_model.eval()
batch = tuple(t.to(args.device) for t in batch if t is not None)
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
"pooling_ids": batch[4]}
if args.model_type != "distilbert":
# XLM and RoBERTa don"t use segment_ids
inputs["token_type_ids"] = batch[2] if args.model_type in ["bert", "xlnet"] else None
# if args.model_type == "xlm":
# inputs["langs"] = batch[5]
if first_stage_model is not None:
with torch.no_grad():
inputs["first_stage_model_logits"] = first_stage_model(**inputs)[1]
# if noised_data_generator is not None and noised_data_generator.enable_r1_loss and \
# noised_data_generator.enable_translate_data and noised_data_generator.use_align_label_probs:
# inputs.update({"src_pooling_ids": batch[-2],
# "tgt_pooling_ids": batch[-1]})
# batch = batch[:-2]
if noised_data_generator is not None:
inputs.update({"noised_input_ids": batch[5],
"noised_attention_mask": batch[6],
"noised_token_type_ids": None,
"noised_labels": batch[8],
"noised_pooling_ids": batch[9],
"noised_r1_mask": batch[10],
"original_r1_mask": batch[11],
"is_augmented": batch[12]})
outputs = model(**inputs)
loss = outputs[0]
if args.n_gpu > 1:
# mean() to average on multi-gpu parallel training
loss = loss.mean()
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if noised_data_generator is not None:
original_loss, noised_loss, r1_loss, r2_loss = outputs[1:5]
if args.n_gpu > 1:
original_loss = original_loss.mean()
noised_loss = noised_loss.mean()
r1_loss = r1_loss.mean()
r2_loss = r2_loss.mean()
if args.gradient_accumulation_steps > 1:
original_loss = original_loss / args.gradient_accumulation_steps
noised_loss = noised_loss / args.gradient_accumulation_steps
r1_loss = r1_loss / args.gradient_accumulation_steps
r2_loss = r2_loss / args.gradient_accumulation_steps
tr_original_loss += original_loss.item()
tr_noised_loss += noised_loss.item()
tr_r1_loss += r1_loss.item()
tr_r2_loss += r2_loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
scheduler.step() # Update learning rate schedule
optimizer.step()
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
do_eval = args.evaluate_steps > 0 and global_step % args.evaluate_steps == 0
cur_result = logging(do_eval)
logging_loss = tr_loss
logging_original_loss = tr_original_loss
logging_noised_loss = tr_noised_loss
logging_r1_loss = tr_r1_loss
logging_r2_loss = tr_r2_loss
if do_eval:
print(cur_result)
if cur_result["dev_avg"]["f1"] > best_score:
logger.info(
"result['f1']={} > best_score={}".format(cur_result["dev_avg"]["f1"], best_score))
best_score = cur_result["dev_avg"]["f1"]
# Save the best model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-best")
best_checkpoint = output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving the best model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
logger.info("Reset patience to 0")
patience = 0
else:
patience += 1
logger.info("Hit patience={}".format(patience))
if args.eval_patience > 0 and patience > args.eval_patience:
logger.info("early stop! patience={}".format(patience))
epoch_iterator.close()
train_iterator.close()
if args.local_rank in [-1, 0]:
tb_writer.close()
log_writer.close()
return global_step, tr_loss / global_step
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
log_writer.close()
return global_step, tr_loss / global_step | Train the model. |
184,778 | from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import copy
import json
import random
import math
import numpy as np
import torch
from seqeval.metrics import precision_score, recall_score, f1_score
from tensorboardX import SummaryWriter
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from torch.utils.data import RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from utils_tag import convert_examples_to_features
from utils_tag import get_labels
from utils_tag import read_examples_from_file
from utils_tag import InputExample
from transformers import (
AdamW,
get_linear_schedule_with_warmup,
WEIGHTS_NAME,
RobertaConfig,
XLMRobertaConfig,
XLMRobertaTokenizer,
XLMRobertaForTokenClassificationPoolingStable,
)
logger = logging.getLogger(__name__)
def load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode, lang, few_shot=-1,
return_examples=False):
# Make sure only the first process in distributed training process
# the dataset, and the others will use the cache
if args.local_rank not in [-1, 0] and not evaluate:
torch.distributed.barrier()
# Load data features from cache or dataset file
model_name = "xlm-roberta-base"
if args.word_dropout_rate > 0:
assert mode != "train"
cached_features_file = os.path.join(args.data_dir, "cached_{}_{}_{}_{}_wdr{}".format(mode, lang,
model_name,
str(args.max_seq_length),
str(
args.word_dropout_rate)))
else:
cached_features_file = os.path.join(args.data_dir, "cached_{}_{}_{}_{}".format(mode, lang,
model_name,
str(args.max_seq_length)))
cached_features_file += "_pooling"
if args.languages_without_spaces is not None and lang in args.languages_without_spaces.split(','):
cached_features_file += "_lws"
data_file = os.path.join(args.data_dir, lang, "{}.{}".format(mode, model_name))
logger.info("Creating features from dataset file at {} in language {}".format(data_file, lang))
examples = read_examples_from_file(data_file, lang)
if os.path.exists(cached_features_file) and not args.overwrite_cache:
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
logger.info("all languages = {}".format(lang))
features = convert_examples_to_features(examples, labels, args.max_seq_length, tokenizer,
cls_token_at_end=bool(args.model_type in ["xlnet"]),
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if args.model_type in ["xlnet"] else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=bool(args.model_type in ["roberta", "xlmr"]),
pad_on_left=bool(args.model_type in ["xlnet"]),
pad_token=tokenizer.convert_tokens_to_ids([tokenizer.pad_token])[
0],
pad_token_segment_id=4 if args.model_type in ["xlnet"] else 0,
pad_token_label_id=pad_token_label_id,
lang=lang,
languages_without_spaces=args.languages_without_spaces.split(
',') if args.languages_without_spaces is not None else None,
word_dropout_rate=args.word_dropout_rate,
)
if args.local_rank in [-1, 0]:
logger.info(
"Saving features into cached file {}, len(features)={}".format(cached_features_file, len(features)))
torch.save(features, cached_features_file)
# Make sure only the first process in distributed training process
# the dataset, and the others will use the cache
if args.local_rank == 0 and not evaluate:
torch.distributed.barrier()
if few_shot > 0 and mode == 'train':
logger.info("Original no. of examples = {}".format(len(features)))
features = features[: few_shot]
logger.info('Using few-shot learning on {} examples'.format(len(features)))
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in features], dtype=torch.long)
all_pooling_ids = torch.tensor([f.pooling_ids for f in features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_ids for f in features], dtype=torch.long)
if args.model_type == 'xlm' and features[0].langs is not None:
all_langs = torch.tensor([f.langs for f in features], dtype=torch.long)
logger.info('all_langs[0] = {}'.format(all_langs[0]))
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_pooling_ids,
all_langs)
else:
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_pooling_ids)
if return_examples:
return dataset, examples
else:
return dataset
def predict(args, model, tokenizer, labels, pad_token_label_id, mode, prefix="", lang="en", print_result=True):
eval_dataset = load_and_cache_examples(args, tokenizer, labels, pad_token_label_id, mode=mode, lang=lang)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation %s in %s *****" % (prefix, lang))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
model.eval()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0],
"attention_mask": batch[1],
"labels": batch[3],
"pooling_ids": batch[4]}
if args.model_type != "distilbert":
# XLM and RoBERTa don"t use segment_ids
inputs["token_type_ids"] = batch[2] if args.model_type in ["bert", "xlnet"] else None
if args.model_type == 'xlm':
inputs["langs"] = batch[5]
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
if args.n_gpu > 1:
# mean() to average on multi-gpu parallel evaluating
tmp_eval_loss = tmp_eval_loss.mean()
eval_loss += tmp_eval_loss.item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
if nb_eval_steps == 0:
results = {k: 0 for k in ["loss", "precision", "recall", "f1"]}
else:
eval_loss = eval_loss / nb_eval_steps
preds = np.argmax(preds, axis=2)
label_map = {i: label for i, label in enumerate(labels)}
out_label_list = [[] for _ in range(out_label_ids.shape[0])]
preds_list = [[] for _ in range(out_label_ids.shape[0])]
for i in range(out_label_ids.shape[0]):
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]])
preds_list[i].append(label_map[preds[i][j]])
results = {
"loss": eval_loss,
"precision": precision_score(out_label_list, preds_list),
"recall": recall_score(out_label_list, preds_list),
"f1": f1_score(out_label_list, preds_list)
}
if print_result:
logger.info("***** Evaluation result %s in %s *****" % (prefix, lang))
for key in sorted(results.keys()):
logger.info(" %s = %s", key, str(results[key]))
return results, preds_list | null |
184,779 | from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import copy
import json
import random
import math
import numpy as np
import torch
from seqeval.metrics import precision_score, recall_score, f1_score
from tensorboardX import SummaryWriter
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from torch.utils.data import RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from utils_tag import convert_examples_to_features
from utils_tag import get_labels
from utils_tag import read_examples_from_file
from utils_tag import InputExample
from transformers import (
AdamW,
get_linear_schedule_with_warmup,
WEIGHTS_NAME,
RobertaConfig,
XLMRobertaConfig,
XLMRobertaTokenizer,
XLMRobertaForTokenClassificationPoolingStable,
)
def ConcatDataset(dataset_list):
all_input_ids = torch.cat([dataset.tensors[0] for dataset in dataset_list], dim=0)
all_input_mask = torch.cat([dataset.tensors[1] for dataset in dataset_list], dim=0)
all_segment_ids = torch.cat([dataset.tensors[2] for dataset in dataset_list], dim=0)
all_label_ids = torch.cat([dataset.tensors[3] for dataset in dataset_list], dim=0)
all_pooling_ids = torch.cat([dataset.tensors[4] for dataset in dataset_list], dim=0)
dataset = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids, all_pooling_ids)
return dataset | null |
184,780 | from __future__ import absolute_import, division, print_function
import argparse
import glob
import logging
import os
import copy
import json
import random
import math
import numpy as np
import torch
from seqeval.metrics import precision_score, recall_score, f1_score
from tensorboardX import SummaryWriter
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from torch.utils.data import RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from utils_tag import convert_examples_to_features
from utils_tag import get_labels
from utils_tag import read_examples_from_file
from utils_tag import InputExample
from transformers import (
AdamW,
get_linear_schedule_with_warmup,
WEIGHTS_NAME,
RobertaConfig,
XLMRobertaConfig,
XLMRobertaTokenizer,
XLMRobertaForTokenClassificationPoolingStable,
)
def save_predictions(args, predictions, output_file, text_file, idx_file, output_word_prediction=False):
# Save predictions
with open(text_file, "r") as text_reader, open(idx_file, "r") as idx_reader:
text = text_reader.readlines()
index = idx_reader.readlines()
assert len(text) == len(index)
# Sanity check on the predictions
with open(output_file, "w") as writer:
example_id = 0
prev_id = int(index[0])
for line, idx in zip(text, index):
if line == "" or line == "\n":
example_id += 1
else:
cur_id = int(idx)
output_line = '\n' if cur_id != prev_id else ''
if output_word_prediction:
output_line += line.split()[0] + '\t'
output_line += predictions[example_id].pop(0) + '\n'
writer.write(output_line)
prev_id = cur_id | null |
184,781 | import logging
import torch
from collections import OrderedDict
from transformers.modeling_bert import (BertConfig, BertEncoder,
BertIntermediate, BertLayer,
BertModel, BertOutput,
BertSelfAttention,
BertSelfOutput)
from transformers.modeling_roberta import (RobertaEmbeddings,
RobertaForMaskedLM,
RobertaForSequenceClassification,
RobertaModel)
def convert_cxlm_to_transformers(ckpt_path):
ckpt = torch.load(ckpt_path, map_location="cpu")
args = ckpt["args"]
config = BertConfig(
# vocab_size_or_config_json_file=250002,
vocab_size=250002,
hidden_size=args.encoder_embed_dim,
num_hidden_layers=args.encoder_layers,
num_attention_heads=args.encoder_attention_heads,
intermediate_size=args.encoder_ffn_embed_dim,
max_position_embeddings=args.max_positions + 2,
type_vocab_size=1,
layer_norm_eps=1e-5, # PyTorch default used in fairseq
)
print("Our BERT config:", config)
stat_dict = ckpt["model"]
new_stat_dict = {}
model = RobertaForMaskedLM(config)
model.eval()
sent_enc = "decoder.sentence_encoder"
new_stat_dict["roberta.embeddings.word_embeddings.weight"] = stat_dict[sent_enc + ".embed_tokens.weight"]
new_stat_dict["roberta.embeddings.position_embeddings.weight"] = stat_dict[sent_enc + ".embed_positions.weight"]
new_stat_dict["roberta.embeddings.token_type_embeddings.weight"] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight)
new_stat_dict["roberta.embeddings.LayerNorm.weight"] = stat_dict[sent_enc + ".emb_layer_norm.weight"]
new_stat_dict["roberta.embeddings.LayerNorm.bias"] = stat_dict[sent_enc + ".emb_layer_norm.bias"]
for i in range(config.num_hidden_layers):
# Encoder: start of layer
# layer: BertLayer = model.roberta.encoder.layer[i]
layer = "roberta.encoder.layer.%d" % i
roberta_layer = sent_enc + (".layers.%d" % i)
### self attention
# self_attn: BertSelfAttention = layer.attention.self
self_attn = layer + ".attention.self"
assert (
stat_dict[roberta_layer + ".self_attn.k_proj.weight"].data.shape == \
stat_dict[roberta_layer + ".self_attn.q_proj.weight"].data.shape == \
stat_dict[roberta_layer + ".self_attn.v_proj.weight"].data.shape == \
torch.Size((config.hidden_size, config.hidden_size))
)
new_stat_dict[self_attn + ".query.weight"] = stat_dict[roberta_layer + ".self_attn.q_proj.weight"]
new_stat_dict[self_attn + ".query.bias"] = stat_dict[roberta_layer + ".self_attn.q_proj.bias"]
new_stat_dict[self_attn + ".key.weight"] = stat_dict[roberta_layer + ".self_attn.k_proj.weight"]
new_stat_dict[self_attn + ".key.bias"] = stat_dict[roberta_layer + ".self_attn.k_proj.bias"]
new_stat_dict[self_attn + ".value.weight"] = stat_dict[roberta_layer + ".self_attn.v_proj.weight"]
new_stat_dict[self_attn + ".value.bias"] = stat_dict[roberta_layer + ".self_attn.v_proj.bias"]
### self-attention output
# self_output: BertSelfOutput = layer.attention.output
self_output = layer + ".attention.output"
assert (
model.roberta.encoder.layer[i].attention.output.dense.weight.shape == stat_dict[
roberta_layer + ".self_attn.out_proj.weight"].shape
)
new_stat_dict[self_output + ".dense.weight"] = stat_dict[roberta_layer + ".self_attn.out_proj.weight"]
new_stat_dict[self_output + ".dense.bias"] = stat_dict[roberta_layer + ".self_attn.out_proj.bias"]
new_stat_dict[self_output + ".LayerNorm.weight"] = stat_dict[roberta_layer + ".self_attn_layer_norm.weight"]
new_stat_dict[self_output + ".LayerNorm.bias"] = stat_dict[roberta_layer + ".self_attn_layer_norm.bias"]
### intermediate
# intermediate: BertIntermediate = layer.intermediate
intermediate = layer + ".intermediate"
assert (
model.roberta.encoder.layer[i].intermediate.dense.weight.shape == stat_dict[
roberta_layer + ".fc1.weight"].shape
)
# TODO
new_stat_dict[intermediate + ".dense.weight"] = stat_dict[roberta_layer + ".fc1.weight"]
new_stat_dict[intermediate + ".dense.bias"] = stat_dict[roberta_layer + ".fc1.bias"]
### output
# bert_output: BertOutput = layer.output
bert_output = layer + ".output"
assert (
model.roberta.encoder.layer[i].output.dense.weight.shape == stat_dict[
roberta_layer + ".fc2.weight"].shape
)
new_stat_dict[bert_output + ".dense.weight"] = stat_dict[roberta_layer + ".fc2.weight"]
new_stat_dict[bert_output + ".dense.bias"] = stat_dict[roberta_layer + ".fc2.bias"]
new_stat_dict[bert_output + ".LayerNorm.weight"] = stat_dict[roberta_layer + ".final_layer_norm.weight"]
new_stat_dict[bert_output + ".LayerNorm.bias"] = stat_dict[roberta_layer + ".final_layer_norm.bias"]
#### end of layer
new_stat_dict["lm_head.dense.weight"] = stat_dict["decoder.lm_head.dense.weight"]
new_stat_dict["lm_head.dense.bias"] = stat_dict["decoder.lm_head.dense.bias"]
new_stat_dict["lm_head.layer_norm.weight"] = stat_dict["decoder.lm_head.layer_norm.weight"]
new_stat_dict["lm_head.layer_norm.bias"] = stat_dict["decoder.lm_head.layer_norm.bias"]
new_stat_dict["lm_head.decoder.weight"] = stat_dict["decoder.lm_head.weight"]
new_stat_dict["lm_head.bias"] = stat_dict["decoder.lm_head.bias"]
new_stat_dict["lm_head.decoder.bias"] = stat_dict["decoder.lm_head.bias"]
new_stat_dict["roberta.pooler.dense.weight"] = model.roberta.pooler.dense.weight
new_stat_dict["roberta.pooler.dense.bias"] = model.roberta.pooler.dense.bias
return new_stat_dict
def update_hf_sd(old_sd, xlmr_path):
x = torch.load(xlmr_path, map_location="cpu")
m = old_sd
d = OrderedDict()
for k, v in m.items():
if k == 'roberta.pooler.dense.weight':
d[k] = x[k].half().clone()
elif k not in ('proj_matrix_fast', 'lm_head.decoder.bias', 'roberta.pooler.dense.weight'):
d[k] = v.data.half().clone()
assert set(d.keys()) == set(x.keys())
for k in d.keys():
assert d[k].size() == x[k].size()
for k in d.keys():
if k != 'roberta.pooler.dense.weight':
assert (d[k].float() - m[k].float()).abs().max().item() <= 1e-4
return d
def convert_pt_to_hf(xlmr_path, inf, logger=None):
if logger:
logger.info("converting pt file at {} to hf file.".format(inf))
sd = convert_cxlm_to_transformers(inf)
return update_hf_sd(sd, xlmr_path) | null |
184,782 | import logging
import os
import h5py
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.saving import hdf5_format
from .configuration_utils import PretrainedConfig
from .file_utils import DUMMY_INPUTS, TF2_WEIGHTS_NAME, WEIGHTS_NAME, cached_path, hf_bucket_url, is_remote_url
from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model
def shape_list(x):
"""Deal with dynamic shape in tensorflow cleanly."""
static = x.shape.as_list()
dynamic = tf.shape(x)
return [dynamic[i] if s is None else s for i, s in enumerate(static)]
def _create_next_token_logits_penalties(input_ids, logits, repetition_penalty):
# create logit penalties for already seen input_ids
token_penalties = np.ones(shape_list(logits))
prev_input_ids = [np.unique(input_id) for input_id in input_ids.numpy()]
for i, prev_input_id in enumerate(prev_input_ids):
logit_penalized = logits[i].numpy()[prev_input_id]
# if previous logit score is < 0 then multiply repetition penalty else divide
logit_penalized[logit_penalized < 0] = repetition_penalty
logit_penalized[logit_penalized > 0] = 1 / repetition_penalty
np.put(token_penalties[i], prev_input_id, logit_penalized)
return tf.convert_to_tensor(token_penalties, dtype=tf.float32) | null |
184,783 | import logging
import os
import h5py
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.saving import hdf5_format
from .configuration_utils import PretrainedConfig
from .file_utils import DUMMY_INPUTS, TF2_WEIGHTS_NAME, WEIGHTS_NAME, cached_path, hf_bucket_url, is_remote_url
from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model
def scatter_values_on_batch_indices(values, batch_indices):
shape = shape_list(batch_indices)
# broadcast batch dim to shape
broad_casted_batch_dims = tf.reshape(tf.broadcast_to(tf.expand_dims(tf.range(shape[0]), axis=-1), shape), [1, -1])
# transform batch_indices to pair_indices
pair_indices = tf.transpose(tf.concat([broad_casted_batch_dims, tf.reshape(batch_indices, [1, -1])], 0))
# scatter values to pair indices
return tf.scatter_nd(pair_indices, tf.reshape(values, [-1]), shape)
def set_tensor_by_indices_to_value(tensor, indices, value):
# create value_tensor since tensor value assignment is not possible in TF
value_tensor = tf.zeros_like(tensor) + value
return tf.where(indices, value_tensor, tensor)
def shape_list(x):
"""Deal with dynamic shape in tensorflow cleanly."""
static = x.shape.as_list()
dynamic = tf.shape(x)
return [dynamic[i] if s is None else s for i, s in enumerate(static)]
The provided code snippet includes necessary dependencies for implementing the `tf_top_k_top_p_filtering` function. Write a Python function `def tf_top_k_top_p_filtering(logits, top_k=0, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1)` to solve the following problem:
Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (batch size, vocabulary size) if top_k > 0: keep only top k tokens with highest probability (top-k filtering). if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) Make sure we keep at least min_tokens_to_keep per batch example in the output From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
Here is the function:
def tf_top_k_top_p_filtering(logits, top_k=0, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1):
""" Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (batch size, vocabulary size)
if top_k > 0: keep only top k tokens with highest probability (top-k filtering).
if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
Make sure we keep at least min_tokens_to_keep per batch example in the output
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
logits_shape = shape_list(logits)
if top_k > 0:
top_k = min(max(top_k, min_tokens_to_keep), logits_shape[-1]) # Safety check
# Remove all tokens with a probability less than the last token of the top-k
indices_to_remove = logits < tf.math.top_k(logits, k=top_k)[0][..., -1, None]
logits = set_tensor_by_indices_to_value(logits, indices_to_remove, filter_value)
if top_p < 1.0:
sorted_indices = tf.argsort(logits, direction="DESCENDING")
sorted_logits = tf.gather(
logits, sorted_indices, axis=-1, batch_dims=1
) # expects logits to be of dim (batch_size, vocab_size)
cumulative_probs = tf.math.cumsum(tf.nn.softmax(sorted_logits, axis=-1), axis=-1)
# Remove tokens with cumulative probability above the threshold (token with 0 are kept)
sorted_indices_to_remove = cumulative_probs > top_p
if min_tokens_to_keep > 1:
# Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
sorted_indices_to_remove = tf.concat(
[
tf.zeros_like(sorted_indices_to_remove[:, :min_tokens_to_keep]),
sorted_indices_to_remove[:, min_tokens_to_keep:],
],
-1,
)
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove = tf.roll(sorted_indices_to_remove, 1, axis=-1)
sorted_indices_to_remove = tf.concat(
[tf.zeros_like(sorted_indices_to_remove[:, :1]), sorted_indices_to_remove[:, 1:]], -1,
)
# scatter sorted tensors to original indexing
indices_to_remove = scatter_values_on_batch_indices(sorted_indices_to_remove, sorted_indices)
logits = set_tensor_by_indices_to_value(logits, indices_to_remove, filter_value)
return logits | Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (batch size, vocabulary size) if top_k > 0: keep only top k tokens with highest probability (top-k filtering). if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) Make sure we keep at least min_tokens_to_keep per batch example in the output From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 |
184,784 | import logging
import os
import h5py
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.saving import hdf5_format
from .configuration_utils import PretrainedConfig
from .file_utils import DUMMY_INPUTS, TF2_WEIGHTS_NAME, WEIGHTS_NAME, cached_path, hf_bucket_url, is_remote_url
from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model
The provided code snippet includes necessary dependencies for implementing the `get_initializer` function. Write a Python function `def get_initializer(initializer_range=0.02)` to solve the following problem:
Creates a `tf.initializers.truncated_normal` with the given range. Args: initializer_range: float, initializer range for stddev. Returns: TruncatedNormal initializer with stddev = `initializer_range`.
Here is the function:
def get_initializer(initializer_range=0.02):
"""Creates a `tf.initializers.truncated_normal` with the given range.
Args:
initializer_range: float, initializer range for stddev.
Returns:
TruncatedNormal initializer with stddev = `initializer_range`.
"""
return tf.keras.initializers.TruncatedNormal(stddev=initializer_range) | Creates a `tf.initializers.truncated_normal` with the given range. Args: initializer_range: float, initializer range for stddev. Returns: TruncatedNormal initializer with stddev = `initializer_range`. |
184,785 | import logging
import numpy as np
import tensorflow as tf
from .configuration_ctrl import CTRLConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_tf_utils import TFPreTrainedModel, TFSharedEmbeddings, shape_list
def angle_defn(pos, i, d_model_size):
angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model_size))
return pos * angle_rates
def positional_encoding(position, d_model_size):
# create the sinusoidal pattern for the positional encoding
angle_rads = angle_defn(np.arange(position)[:, np.newaxis], np.arange(d_model_size)[np.newaxis, :], d_model_size)
sines = np.sin(angle_rads[:, 0::2])
cosines = np.cos(angle_rads[:, 1::2])
# pos_encoding = tf.cast(np.concatenate([sines, cosines], axis=-1)[np.newaxis, ...], dtype=tf.float32)
pos_encoding = tf.cast(np.concatenate([sines, cosines], axis=-1), dtype=tf.float32)
return pos_encoding | null |
184,786 | import logging
import numpy as np
import tensorflow as tf
from .configuration_ctrl import CTRLConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_tf_utils import TFPreTrainedModel, TFSharedEmbeddings, shape_list
def shape_list(x):
"""Deal with dynamic shape in tensorflow cleanly."""
static = x.shape.as_list()
dynamic = tf.shape(x)
return [dynamic[i] if s is None else s for i, s in enumerate(static)]
def scaled_dot_product_attention(q, k, v, mask, attention_mask=None, head_mask=None):
# calculate attention
matmul_qk = tf.matmul(q, k, transpose_b=True)
dk = tf.cast(shape_list(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
if mask is not None:
scaled_attention_logits += mask * -1e4
if attention_mask is not None:
# Apply the attention mask
scaled_attention_logits = scaled_attention_logits + attention_mask
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)
# Mask heads if we want to
if head_mask is not None:
attention_weights = attention_weights * head_mask
output = tf.matmul(attention_weights, v)
return output, attention_weights | null |
184,787 | import logging
import numpy as np
import tensorflow as tf
from .configuration_ctrl import CTRLConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_tf_utils import TFPreTrainedModel, TFSharedEmbeddings, shape_list
def point_wise_feed_forward_network(d_model_size, dff, name=""):
return tf.keras.Sequential(
[tf.keras.layers.Dense(dff, activation="relu", name="0"), tf.keras.layers.Dense(d_model_size, name="2")],
name="ffn",
) | null |
184,788 | import os
from argparse import ArgumentParser, Namespace
from logging import getLogger
from transformers import SingleSentenceClassificationProcessor as Processor
from transformers import TextClassificationPipeline, is_tf_available, is_torch_available
from transformers.commands import BaseTransformersCLICommand
class TrainCommand(BaseTransformersCLICommand):
def register_subcommand(parser: ArgumentParser):
"""
Register this command to argparse so it's available for the transformer-cli
:param parser: Root parser to register command-specific arguments
:return:
"""
train_parser = parser.add_parser("train", help="CLI tool to train a model on a task.")
train_parser.add_argument(
"--train_data",
type=str,
required=True,
help="path to train (and optionally evaluation) dataset as a csv with "
"tab separated labels and sentences.",
)
train_parser.add_argument(
"--column_label", type=int, default=0, help="Column of the dataset csv file with example labels."
)
train_parser.add_argument(
"--column_text", type=int, default=1, help="Column of the dataset csv file with example texts."
)
train_parser.add_argument(
"--column_id", type=int, default=2, help="Column of the dataset csv file with example ids."
)
train_parser.add_argument(
"--skip_first_row", action="store_true", help="Skip the first row of the csv file (headers)."
)
train_parser.add_argument("--validation_data", type=str, default="", help="path to validation dataset.")
train_parser.add_argument(
"--validation_split",
type=float,
default=0.1,
help="if validation dataset is not provided, fraction of train dataset " "to use as validation dataset.",
)
train_parser.add_argument("--output", type=str, default="./", help="path to saved the trained model.")
train_parser.add_argument(
"--task", type=str, default="text_classification", help="Task to train the model on."
)
train_parser.add_argument(
"--model", type=str, default="bert-base-uncased", help="Model's name or path to stored model."
)
train_parser.add_argument("--train_batch_size", type=int, default=32, help="Batch size for training.")
train_parser.add_argument("--valid_batch_size", type=int, default=64, help="Batch size for validation.")
train_parser.add_argument("--learning_rate", type=float, default=3e-5, help="Learning rate.")
train_parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon for Adam optimizer.")
train_parser.set_defaults(func=train_command_factory)
def __init__(self, args: Namespace):
self.logger = getLogger("transformers-cli/training")
self.framework = "tf" if is_tf_available() else "torch"
os.makedirs(args.output, exist_ok=True)
assert os.path.isdir(args.output)
self.output = args.output
self.column_label = args.column_label
self.column_text = args.column_text
self.column_id = args.column_id
self.logger.info("Loading {} pipeline for {}".format(args.task, args.model))
if args.task == "text_classification":
self.pipeline = TextClassificationPipeline.from_pretrained(args.model)
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info("Loading dataset from {}".format(args.train_data))
self.train_dataset = Processor.create_from_csv(
args.train_data,
column_label=args.column_label,
column_text=args.column_text,
column_id=args.column_id,
skip_first_row=args.skip_first_row,
)
self.valid_dataset = None
if args.validation_data:
self.logger.info("Loading validation dataset from {}".format(args.validation_data))
self.valid_dataset = Processor.create_from_csv(
args.validation_data,
column_label=args.column_label,
column_text=args.column_text,
column_id=args.column_id,
skip_first_row=args.skip_first_row,
)
self.validation_split = args.validation_split
self.train_batch_size = args.train_batch_size
self.valid_batch_size = args.valid_batch_size
self.learning_rate = args.learning_rate
self.adam_epsilon = args.adam_epsilon
def run(self):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def run_torch(self):
raise NotImplementedError
def run_tf(self):
self.pipeline.fit(
self.train_dataset,
validation_data=self.valid_dataset,
validation_split=self.validation_split,
learning_rate=self.learning_rate,
adam_epsilon=self.adam_epsilon,
train_batch_size=self.train_batch_size,
valid_batch_size=self.valid_batch_size,
)
# Save trained pipeline
self.pipeline.save_pretrained(self.output)
The provided code snippet includes necessary dependencies for implementing the `train_command_factory` function. Write a Python function `def train_command_factory(args: Namespace)` to solve the following problem:
Factory function used to instantiate serving server from provided command line arguments. :return: ServeCommand
Here is the function:
def train_command_factory(args: Namespace):
"""
Factory function used to instantiate serving server from provided command line arguments.
:return: ServeCommand
"""
return TrainCommand(args) | Factory function used to instantiate serving server from provided command line arguments. :return: ServeCommand |
184,789 | from argparse import ArgumentParser, Namespace
from logging import getLogger
from transformers.commands import BaseTransformersCLICommand
class ConvertCommand(BaseTransformersCLICommand):
def register_subcommand(parser: ArgumentParser):
"""
Register this command to argparse so it's available for the transformer-cli
:param parser: Root parser to register command-specific arguments
:return:
"""
train_parser = parser.add_parser(
"convert",
help="CLI tool to run convert model from original "
"author checkpoints to Transformers PyTorch checkpoints.",
)
train_parser.add_argument("--model_type", type=str, required=True, help="Model's type.")
train_parser.add_argument(
"--tf_checkpoint", type=str, required=True, help="TensorFlow checkpoint path or folder."
)
train_parser.add_argument(
"--pytorch_dump_output", type=str, required=True, help="Path to the PyTorch savd model output."
)
train_parser.add_argument("--config", type=str, default="", help="Configuration file path or folder.")
train_parser.add_argument(
"--finetuning_task_name",
type=str,
default=None,
help="Optional fine-tuning task name if the TF model was a finetuned model.",
)
train_parser.set_defaults(func=convert_command_factory)
def __init__(
self,
model_type: str,
tf_checkpoint: str,
pytorch_dump_output: str,
config: str,
finetuning_task_name: str,
*args
):
self._logger = getLogger("transformers-cli/converting")
self._logger.info("Loading model {}".format(model_type))
self._model_type = model_type
self._tf_checkpoint = tf_checkpoint
self._pytorch_dump_output = pytorch_dump_output
self._config = config
self._finetuning_task_name = finetuning_task_name
def run(self):
if self._model_type == "bert":
try:
from transformers.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
msg = (
"transformers can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise ImportError(msg)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "gpt":
from transformers.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "transfo_xl":
try:
from transformers.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
msg = (
"transformers can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise ImportError(msg)
if "ckpt" in self._tf_checkpoint.lower():
TF_CHECKPOINT = self._tf_checkpoint
TF_DATASET_FILE = ""
else:
TF_DATASET_FILE = self._tf_checkpoint
TF_CHECKPOINT = ""
convert_transfo_xl_checkpoint_to_pytorch(
TF_CHECKPOINT, self._config, self._pytorch_dump_output, TF_DATASET_FILE
)
elif self._model_type == "gpt2":
try:
from transformers.convert_gpt2_original_tf_checkpoint_to_pytorch import (
convert_gpt2_checkpoint_to_pytorch,
)
except ImportError:
msg = (
"transformers can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise ImportError(msg)
convert_gpt2_checkpoint_to_pytorch(self._tf_checkpoint, self._config, self._pytorch_dump_output)
elif self._model_type == "xlnet":
try:
from transformers.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
msg = (
"transformers can only be used from the commandline to convert TensorFlow models in PyTorch, "
"In that case, it requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise ImportError(msg)
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint, self._config, self._pytorch_dump_output, self._finetuning_task_name
)
elif self._model_type == "xlm":
from transformers.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint, self._pytorch_dump_output)
else:
raise ValueError("--model_type should be selected in the list [bert, gpt, gpt2, transfo_xl, xlnet, xlm]")
The provided code snippet includes necessary dependencies for implementing the `convert_command_factory` function. Write a Python function `def convert_command_factory(args: Namespace)` to solve the following problem:
Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint. :return: ServeCommand
Here is the function:
def convert_command_factory(args: Namespace):
"""
Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint.
:return: ServeCommand
"""
return ConvertCommand(
args.model_type, args.tf_checkpoint, args.pytorch_dump_output, args.config, args.finetuning_task_name
) | Factory function used to convert a model TF 1.0 checkpoint in a PyTorch checkpoint. :return: ServeCommand |
184,790 | import logging
from argparse import ArgumentParser
from transformers.commands import BaseTransformersCLICommand
from transformers.pipelines import SUPPORTED_TASKS, Pipeline, PipelineDataFormat, pipeline
def try_infer_format_from_ext(path: str):
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(ext):
return ext
raise Exception(
"Unable to determine file format from file extension {}. "
"Please provide the format through --format {}".format(path, PipelineDataFormat.SUPPORTED_FORMATS)
)
class RunCommand(BaseTransformersCLICommand):
def __init__(self, nlp: Pipeline, reader: PipelineDataFormat):
self._nlp = nlp
self._reader = reader
def register_subcommand(parser: ArgumentParser):
run_parser = parser.add_parser("run", help="Run a pipeline through the CLI")
run_parser.add_argument("--task", choices=SUPPORTED_TASKS.keys(), help="Task to run")
run_parser.add_argument("--input", type=str, help="Path to the file to use for inference")
run_parser.add_argument("--output", type=str, help="Path to the file that will be used post to write results.")
run_parser.add_argument("--model", type=str, help="Name or path to the model to instantiate.")
run_parser.add_argument("--config", type=str, help="Name or path to the model's config to instantiate.")
run_parser.add_argument(
"--tokenizer", type=str, help="Name of the tokenizer to use. (default: same as the model name)"
)
run_parser.add_argument(
"--column",
type=str,
help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)",
)
run_parser.add_argument(
"--format",
type=str,
default="infer",
choices=PipelineDataFormat.SUPPORTED_FORMATS,
help="Input format to read from",
)
run_parser.add_argument(
"--device",
type=int,
default=-1,
help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)",
)
run_parser.add_argument("--overwrite", action="store_true", help="Allow overwriting the output file.")
run_parser.set_defaults(func=run_command_factory)
def run(self):
nlp, outputs = self._nlp, []
for entry in self._reader:
output = nlp(**entry) if self._reader.is_multi_columns else nlp(entry)
if isinstance(output, dict):
outputs.append(output)
else:
outputs += output
# Saving data
if self._nlp.binary_output:
binary_path = self._reader.save_binary(outputs)
logger.warning("Current pipeline requires output to be in binary format, saving at {}".format(binary_path))
else:
self._reader.save(outputs)
class PipelineDataFormat:
"""
Base class for all the pipeline supported data format both for reading and writing.
Supported data formats currently includes:
- JSON
- CSV
- stdin/stdout (pipe)
PipelineDataFormat also includes some utilities to work with multi-columns like mapping from datasets columns
to pipelines keyword arguments through the `dataset_kwarg_1=dataset_column_1` format.
"""
SUPPORTED_FORMATS = ["json", "csv", "pipe"]
def __init__(self, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False):
self.output_path = output_path
self.input_path = input_path
self.column = column.split(",") if column is not None else [""]
self.is_multi_columns = len(self.column) > 1
if self.is_multi_columns:
self.column = [tuple(c.split("=")) if "=" in c else (c, c) for c in self.column]
if output_path is not None and not overwrite:
if exists(abspath(self.output_path)):
raise OSError("{} already exists on disk".format(self.output_path))
if input_path is not None:
if not exists(abspath(self.input_path)):
raise OSError("{} doesnt exist on disk".format(self.input_path))
def __iter__(self):
raise NotImplementedError()
def save(self, data: dict):
"""
Save the provided data object with the representation for the current `DataFormat`.
:param data: data to store
:return:
"""
raise NotImplementedError()
def save_binary(self, data: Union[dict, List[dict]]) -> str:
"""
Save the provided data object as a pickle-formatted binary data on the disk.
:param data: data to store
:return: (str) Path where the data has been saved
"""
path, _ = os.path.splitext(self.output_path)
binary_path = os.path.extsep.join((path, "pickle"))
with open(binary_path, "wb+") as f_output:
pickle.dump(data, f_output)
return binary_path
def from_str(
format: str, output_path: Optional[str], input_path: Optional[str], column: Optional[str], overwrite=False
):
if format == "json":
return JsonPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
elif format == "csv":
return CsvPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
elif format == "pipe":
return PipedPipelineDataFormat(output_path, input_path, column, overwrite=overwrite)
else:
raise KeyError("Unknown reader {} (Available reader are json/csv/pipe)".format(format))
def pipeline(
task: str,
model: Optional = None,
config: Optional[Union[str, PretrainedConfig]] = None,
tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None,
framework: Optional[str] = None,
**kwargs
) -> Pipeline:
"""
Utility factory method to build a pipeline.
Pipeline are made of:
- A Tokenizer instance in charge of mapping raw textual input to token
- A Model instance
- Some (optional) post processing for enhancing model's output
Args:
task (:obj:`str`):
The task defining which pipeline will be returned. Currently accepted tasks are:
- "feature-extraction": will return a :class:`~transformers.FeatureExtractionPipeline`
- "sentiment-analysis": will return a :class:`~transformers.TextClassificationPipeline`
- "ner": will return a :class:`~transformers.NerPipeline`
- "question-answering": will return a :class:`~transformers.QuestionAnsweringPipeline`
- "fill-mask": will return a :class:`~transformers.FillMaskPipeline`
model (:obj:`str` or :obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`, `optional`, defaults to :obj:`None`):
The model that will be used by the pipeline to make predictions. This can be :obj:`None`, a string
checkpoint identifier or an actual pre-trained model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
If :obj:`None`, the default of the pipeline will be loaded.
config (:obj:`str` or :obj:`~transformers.PretrainedConfig`, `optional`, defaults to :obj:`None`):
The configuration that will be used by the pipeline to instantiate the model. This can be :obj:`None`,
a string checkpoint identifier or an actual pre-trained model configuration inheriting from
:class:`~transformers.PretrainedConfig`.
If :obj:`None`, the default of the pipeline will be loaded.
tokenizer (:obj:`str` or :obj:`~transformers.PreTrainedTokenizer`, `optional`, defaults to :obj:`None`):
The tokenizer that will be used by the pipeline to encode data for the model. This can be :obj:`None`,
a string checkpoint identifier or an actual pre-trained tokenizer inheriting from
:class:`~transformers.PreTrainedTokenizer`.
If :obj:`None`, the default of the pipeline will be loaded.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
Returns:
:class:`~transformers.Pipeline`: Class inheriting from :class:`~transformers.Pipeline`, according to
the task.
Examples::
from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer
# Sentiment analysis pipeline
pipeline('sentiment-analysis')
# Question answering pipeline, specifying the checkpoint identifier
pipeline('question-answering', model='distilbert-base-cased-distilled-squad', tokenizer='bert-base-cased')
# Named entity recognition pipeline, passing in a specific model and tokenizer
model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english")
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
pipeline('ner', model=model, tokenizer=tokenizer)
# Named entity recognition pipeline, passing a model and configuration with a HTTPS URL.
model_url = "https://s3.amazonaws.com/models.huggingface.co/bert/dbmdz/bert-large-cased-finetuned-conll03-english/pytorch_model.bin"
config_url = "https://s3.amazonaws.com/models.huggingface.co/bert/dbmdz/bert-large-cased-finetuned-conll03-english/config.json"
pipeline('ner', model=model_url, config=config_url, tokenizer='bert-base-cased')
"""
# Retrieve the task
if task not in SUPPORTED_TASKS:
raise KeyError("Unknown task {}, available tasks are {}".format(task, list(SUPPORTED_TASKS.keys())))
framework = framework or get_framework(model)
targeted_task = SUPPORTED_TASKS[task]
task, model_class = targeted_task["impl"], targeted_task[framework]
# Use default model/config/tokenizer for the task if no model is provided
if model is None:
models, config, tokenizer = tuple(targeted_task["default"].values())
model = models[framework]
# Try to infer tokenizer from model or config name (if provided as str)
if tokenizer is None:
if isinstance(model, str) and model in ALL_PRETRAINED_CONFIG_ARCHIVE_MAP:
tokenizer = model
elif isinstance(config, str) and config in ALL_PRETRAINED_CONFIG_ARCHIVE_MAP:
tokenizer = config
else:
# Impossible to guest what is the right tokenizer here
raise Exception(
"Impossible to guess which tokenizer to use. "
"Please provided a PretrainedTokenizer class or a path/url/shortcut name to a pretrained tokenizer."
)
modelcard = None
# Try to infer modelcard from model or config name (if provided as str)
if isinstance(model, str):
modelcard = model
elif isinstance(config, str):
modelcard = config
# Instantiate tokenizer if needed
if isinstance(tokenizer, (str, tuple)):
if isinstance(tokenizer, tuple):
# For tuple we have (tokenizer name, {kwargs})
tokenizer = AutoTokenizer.from_pretrained(tokenizer[0], **tokenizer[1])
else:
tokenizer = AutoTokenizer.from_pretrained(tokenizer)
# Instantiate config if needed
if isinstance(config, str):
config = AutoConfig.from_pretrained(config)
# Instantiate modelcard if needed
if isinstance(modelcard, str):
modelcard = ModelCard.from_pretrained(modelcard)
# Instantiate model if needed
if isinstance(model, str):
# Handle transparent TF/PT model conversion
model_kwargs = {}
if framework == "pt" and model.endswith(".h5"):
model_kwargs["from_tf"] = True
logger.warning(
"Model might be a TensorFlow model (ending with `.h5`) but TensorFlow is not available. "
"Trying to load the model with PyTorch."
)
elif framework == "tf" and model.endswith(".bin"):
model_kwargs["from_pt"] = True
logger.warning(
"Model might be a PyTorch model (ending with `.bin`) but PyTorch is not available. "
"Trying to load the model with Tensorflow."
)
model = model_class.from_pretrained(model, config=config, **model_kwargs)
return task(model=model, tokenizer=tokenizer, modelcard=modelcard, framework=framework, **kwargs)
def run_command_factory(args):
nlp = pipeline(
task=args.task,
model=args.model if args.model else None,
config=args.config,
tokenizer=args.tokenizer,
device=args.device,
)
format = try_infer_format_from_ext(args.input) if args.format == "infer" else args.format
reader = PipelineDataFormat.from_str(
format=format,
output_path=args.output,
input_path=args.input,
column=args.column if args.column else nlp.default_input_names,
overwrite=args.overwrite,
)
return RunCommand(nlp, reader) | null |
184,791 | import logging
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from transformers import Pipeline
from transformers.commands import BaseTransformersCLICommand
from transformers.pipelines import SUPPORTED_TASKS, pipeline
def Body(*x, **y):
pass | null |
184,792 | import logging
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from transformers import Pipeline
from transformers.commands import BaseTransformersCLICommand
from transformers.pipelines import SUPPORTED_TASKS, pipeline
class ServeCommand(BaseTransformersCLICommand):
def register_subcommand(parser: ArgumentParser):
"""
Register this command to argparse so it's available for the transformer-cli
:param parser: Root parser to register command-specific arguments
:return:
"""
serve_parser = parser.add_parser(
"serve", help="CLI tool to run inference requests through REST and GraphQL endpoints."
)
serve_parser.add_argument(
"--task", type=str, choices=SUPPORTED_TASKS.keys(), help="The task to run the pipeline on"
)
serve_parser.add_argument("--host", type=str, default="localhost", help="Interface the server will listen on.")
serve_parser.add_argument("--port", type=int, default=8888, help="Port the serving will listen to.")
serve_parser.add_argument("--workers", type=int, default=1, help="Number of http workers")
serve_parser.add_argument("--model", type=str, help="Model's name or path to stored model.")
serve_parser.add_argument("--config", type=str, help="Model's config name or path to stored model.")
serve_parser.add_argument("--tokenizer", type=str, help="Tokenizer name to use.")
serve_parser.add_argument(
"--device",
type=int,
default=-1,
help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)",
)
serve_parser.set_defaults(func=serve_command_factory)
def __init__(self, pipeline: Pipeline, host: str, port: int, workers: int):
self._pipeline = pipeline
self.host = host
self.port = port
self.workers = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"Using serve command requires FastAPI and unicorn. "
'Please install transformers with [serving]: pip install "transformers[serving]".'
"Or install FastAPI and unicorn separately."
)
else:
logger.info("Serving model over {}:{}".format(host, port))
self._app = FastAPI(
routes=[
APIRoute(
"/",
self.model_info,
response_model=ServeModelInfoResult,
response_class=JSONResponse,
methods=["GET"],
),
APIRoute(
"/tokenize",
self.tokenize,
response_model=ServeTokenizeResult,
response_class=JSONResponse,
methods=["POST"],
),
APIRoute(
"/detokenize",
self.detokenize,
response_model=ServeDeTokenizeResult,
response_class=JSONResponse,
methods=["POST"],
),
APIRoute(
"/forward",
self.forward,
response_model=ServeForwardResult,
response_class=JSONResponse,
methods=["POST"],
),
],
timeout=600,
)
def run(self):
run(self._app, host=self.host, port=self.port, workers=self.workers)
def model_info(self):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config))
def tokenize(self, text_input: str = Body(None, embed=True), return_ids: bool = Body(False, embed=True)):
"""
Tokenize the provided input and eventually returns corresponding tokens id:
- **text_input**: String to tokenize
- **return_ids**: Boolean flags indicating if the tokens have to be converted to their integer mapping.
"""
try:
tokens_txt = self._pipeline.tokenizer.tokenize(text_input)
if return_ids:
tokens_ids = self._pipeline.tokenizer.convert_tokens_to_ids(tokens_txt)
return ServeTokenizeResult(tokens=tokens_txt, tokens_ids=tokens_ids)
else:
return ServeTokenizeResult(tokens=tokens_txt)
except Exception as e:
raise HTTPException(status_code=500, detail={"model": "", "error": str(e)})
def detokenize(
self,
tokens_ids: List[int] = Body(None, embed=True),
skip_special_tokens: bool = Body(False, embed=True),
cleanup_tokenization_spaces: bool = Body(True, embed=True),
):
"""
Detokenize the provided tokens ids to readable text:
- **tokens_ids**: List of tokens ids
- **skip_special_tokens**: Flag indicating to not try to decode special tokens
- **cleanup_tokenization_spaces**: Flag indicating to remove all leading/trailing spaces and intermediate ones.
"""
try:
decoded_str = self._pipeline.tokenizer.decode(tokens_ids, skip_special_tokens, cleanup_tokenization_spaces)
return ServeDeTokenizeResult(model="", text=decoded_str)
except Exception as e:
raise HTTPException(status_code=500, detail={"model": "", "error": str(e)})
async def forward(self, inputs=Body(None, embed=True)):
"""
**inputs**:
**attention_mask**:
**tokens_type_ids**:
"""
# Check we don't have empty string
if len(inputs) == 0:
return ServeForwardResult(output=[], attention=[])
try:
# Forward through the model
output = self._pipeline(inputs)
return ServeForwardResult(output=output)
except Exception as e:
raise HTTPException(500, {"error": str(e)})
def pipeline(
task: str,
model: Optional = None,
config: Optional[Union[str, PretrainedConfig]] = None,
tokenizer: Optional[Union[str, PreTrainedTokenizer]] = None,
framework: Optional[str] = None,
**kwargs
) -> Pipeline:
"""
Utility factory method to build a pipeline.
Pipeline are made of:
- A Tokenizer instance in charge of mapping raw textual input to token
- A Model instance
- Some (optional) post processing for enhancing model's output
Args:
task (:obj:`str`):
The task defining which pipeline will be returned. Currently accepted tasks are:
- "feature-extraction": will return a :class:`~transformers.FeatureExtractionPipeline`
- "sentiment-analysis": will return a :class:`~transformers.TextClassificationPipeline`
- "ner": will return a :class:`~transformers.NerPipeline`
- "question-answering": will return a :class:`~transformers.QuestionAnsweringPipeline`
- "fill-mask": will return a :class:`~transformers.FillMaskPipeline`
model (:obj:`str` or :obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`, `optional`, defaults to :obj:`None`):
The model that will be used by the pipeline to make predictions. This can be :obj:`None`, a string
checkpoint identifier or an actual pre-trained model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
If :obj:`None`, the default of the pipeline will be loaded.
config (:obj:`str` or :obj:`~transformers.PretrainedConfig`, `optional`, defaults to :obj:`None`):
The configuration that will be used by the pipeline to instantiate the model. This can be :obj:`None`,
a string checkpoint identifier or an actual pre-trained model configuration inheriting from
:class:`~transformers.PretrainedConfig`.
If :obj:`None`, the default of the pipeline will be loaded.
tokenizer (:obj:`str` or :obj:`~transformers.PreTrainedTokenizer`, `optional`, defaults to :obj:`None`):
The tokenizer that will be used by the pipeline to encode data for the model. This can be :obj:`None`,
a string checkpoint identifier or an actual pre-trained tokenizer inheriting from
:class:`~transformers.PreTrainedTokenizer`.
If :obj:`None`, the default of the pipeline will be loaded.
framework (:obj:`str`, `optional`, defaults to :obj:`None`):
The framework to use, either "pt" for PyTorch or "tf" for TensorFlow. The specified framework must be
installed.
If no framework is specified, will default to the one currently installed. If no framework is specified
and both frameworks are installed, will default to PyTorch.
Returns:
:class:`~transformers.Pipeline`: Class inheriting from :class:`~transformers.Pipeline`, according to
the task.
Examples::
from transformers import pipeline, AutoModelForTokenClassification, AutoTokenizer
# Sentiment analysis pipeline
pipeline('sentiment-analysis')
# Question answering pipeline, specifying the checkpoint identifier
pipeline('question-answering', model='distilbert-base-cased-distilled-squad', tokenizer='bert-base-cased')
# Named entity recognition pipeline, passing in a specific model and tokenizer
model = AutoModelForTokenClassification.from_pretrained("dbmdz/bert-large-cased-finetuned-conll03-english")
tokenizer = AutoTokenizer.from_pretrained("bert-base-cased")
pipeline('ner', model=model, tokenizer=tokenizer)
# Named entity recognition pipeline, passing a model and configuration with a HTTPS URL.
model_url = "https://s3.amazonaws.com/models.huggingface.co/bert/dbmdz/bert-large-cased-finetuned-conll03-english/pytorch_model.bin"
config_url = "https://s3.amazonaws.com/models.huggingface.co/bert/dbmdz/bert-large-cased-finetuned-conll03-english/config.json"
pipeline('ner', model=model_url, config=config_url, tokenizer='bert-base-cased')
"""
# Retrieve the task
if task not in SUPPORTED_TASKS:
raise KeyError("Unknown task {}, available tasks are {}".format(task, list(SUPPORTED_TASKS.keys())))
framework = framework or get_framework(model)
targeted_task = SUPPORTED_TASKS[task]
task, model_class = targeted_task["impl"], targeted_task[framework]
# Use default model/config/tokenizer for the task if no model is provided
if model is None:
models, config, tokenizer = tuple(targeted_task["default"].values())
model = models[framework]
# Try to infer tokenizer from model or config name (if provided as str)
if tokenizer is None:
if isinstance(model, str) and model in ALL_PRETRAINED_CONFIG_ARCHIVE_MAP:
tokenizer = model
elif isinstance(config, str) and config in ALL_PRETRAINED_CONFIG_ARCHIVE_MAP:
tokenizer = config
else:
# Impossible to guest what is the right tokenizer here
raise Exception(
"Impossible to guess which tokenizer to use. "
"Please provided a PretrainedTokenizer class or a path/url/shortcut name to a pretrained tokenizer."
)
modelcard = None
# Try to infer modelcard from model or config name (if provided as str)
if isinstance(model, str):
modelcard = model
elif isinstance(config, str):
modelcard = config
# Instantiate tokenizer if needed
if isinstance(tokenizer, (str, tuple)):
if isinstance(tokenizer, tuple):
# For tuple we have (tokenizer name, {kwargs})
tokenizer = AutoTokenizer.from_pretrained(tokenizer[0], **tokenizer[1])
else:
tokenizer = AutoTokenizer.from_pretrained(tokenizer)
# Instantiate config if needed
if isinstance(config, str):
config = AutoConfig.from_pretrained(config)
# Instantiate modelcard if needed
if isinstance(modelcard, str):
modelcard = ModelCard.from_pretrained(modelcard)
# Instantiate model if needed
if isinstance(model, str):
# Handle transparent TF/PT model conversion
model_kwargs = {}
if framework == "pt" and model.endswith(".h5"):
model_kwargs["from_tf"] = True
logger.warning(
"Model might be a TensorFlow model (ending with `.h5`) but TensorFlow is not available. "
"Trying to load the model with PyTorch."
)
elif framework == "tf" and model.endswith(".bin"):
model_kwargs["from_pt"] = True
logger.warning(
"Model might be a PyTorch model (ending with `.bin`) but PyTorch is not available. "
"Trying to load the model with Tensorflow."
)
model = model_class.from_pretrained(model, config=config, **model_kwargs)
return task(model=model, tokenizer=tokenizer, modelcard=modelcard, framework=framework, **kwargs)
The provided code snippet includes necessary dependencies for implementing the `serve_command_factory` function. Write a Python function `def serve_command_factory(args: Namespace)` to solve the following problem:
Factory function used to instantiate serving server from provided command line arguments. :return: ServeCommand
Here is the function:
def serve_command_factory(args: Namespace):
"""
Factory function used to instantiate serving server from provided command line arguments.
:return: ServeCommand
"""
nlp = pipeline(
task=args.task,
model=args.model if args.model else None,
config=args.config,
tokenizer=args.tokenizer,
device=args.device,
)
return ServeCommand(nlp, args.host, args.port, args.workers) | Factory function used to instantiate serving server from provided command line arguments. :return: ServeCommand |
184,793 | from argparse import ArgumentParser
from transformers.commands import BaseTransformersCLICommand
class DownloadCommand(BaseTransformersCLICommand):
def register_subcommand(parser: ArgumentParser):
download_parser = parser.add_parser("download")
download_parser.add_argument(
"--cache-dir", type=str, default=None, help="Path to location to store the models"
)
download_parser.add_argument(
"--force", action="store_true", help="Force the model to be download even if already in cache-dir"
)
download_parser.add_argument("model", type=str, help="Name of the model to download")
download_parser.set_defaults(func=download_command_factory)
def __init__(self, model: str, cache: str, force: bool):
self._model = model
self._cache = cache
self._force = force
def run(self):
from transformers import AutoModel, AutoTokenizer
AutoModel.from_pretrained(self._model, cache_dir=self._cache, force_download=self._force)
AutoTokenizer.from_pretrained(self._model, cache_dir=self._cache, force_download=self._force)
def download_command_factory(args):
return DownloadCommand(args.model, args.cache_dir, args.force) | null |
184,794 | import platform
from argparse import ArgumentParser
from transformers import __version__ as version
from transformers import is_tf_available, is_torch_available
from transformers.commands import BaseTransformersCLICommand
class EnvironmentCommand(BaseTransformersCLICommand):
def register_subcommand(parser: ArgumentParser):
download_parser = parser.add_parser("env")
download_parser.set_defaults(func=info_command_factory)
def run(self):
pt_version = "not installed"
pt_cuda_available = "NA"
if is_torch_available():
import torch
pt_version = torch.__version__
pt_cuda_available = torch.cuda.is_available()
tf_version = "not installed"
tf_cuda_available = "NA"
if is_tf_available():
import tensorflow as tf
tf_version = tf.__version__
try:
# deprecated in v2.1
tf_cuda_available = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
tf_cuda_available = bool(tf.config.list_physical_devices("GPU"))
info = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": "{} ({})".format(pt_version, pt_cuda_available),
"Tensorflow version (GPU?)": "{} ({})".format(tf_version, tf_cuda_available),
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n")
print(self.format_dict(info))
return info
def format_dict(d):
return "\n".join(["- {}: {}".format(prop, val) for prop, val in d.items()]) + "\n"
def info_command_factory(_):
return EnvironmentCommand() | null |
184,795 | import argparse
import logging
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, bert_config_file, pytorch_dump_path):
# Initialise PyTorch model
config = BertConfig.from_json_file(bert_config_file)
print("Building PyTorch model from configuration: {}".format(str(config)))
model = BertForPreTraining(config)
# Load weights from tf checkpoint
load_tf_weights_in_bert(model, config, tf_checkpoint_path)
# Save pytorch-model
print("Save PyTorch model to {}".format(pytorch_dump_path))
torch.save(model.state_dict(), pytorch_dump_path) | null |
184,796 | import logging
import numpy as np
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from .configuration_ctrl import CTRLConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_utils import Conv1D, PreTrainedModel
def angle_defn(pos, i, d_model_size):
angle_rates = 1 / torch.pow(10000, (2 * (i // 2)) / d_model_size)
return pos * angle_rates
def positional_encoding(position, d_model_size, dtype):
# create the sinusoidal pattern for the positional encoding
angle_rads = angle_defn(
torch.arange(position, dtype=dtype).unsqueeze(1),
torch.arange(d_model_size, dtype=dtype).unsqueeze(0),
d_model_size,
)
sines = torch.sin(angle_rads[:, 0::2])
cosines = torch.cos(angle_rads[:, 1::2])
pos_encoding = torch.cat([sines, cosines], dim=-1)
return pos_encoding | null |
184,797 | import logging
import numpy as np
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from .configuration_ctrl import CTRLConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_utils import Conv1D, PreTrainedModel
def scaled_dot_product_attention(q, k, v, mask, attention_mask=None, head_mask=None):
# calculate attention
matmul_qk = torch.matmul(q, k.permute(0, 1, 3, 2))
dk = k.shape[-1]
scaled_attention_logits = matmul_qk / np.sqrt(dk)
if mask is not None:
nd, ns = scaled_attention_logits.size(-2), scaled_attention_logits.size(-1)
scaled_attention_logits += mask[ns - nd : ns, :ns] * -1e4
if attention_mask is not None:
# Apply the attention mask
scaled_attention_logits = scaled_attention_logits + attention_mask
attention_weights = torch.softmax(scaled_attention_logits, dim=-1)
# Mask heads if we want to
if head_mask is not None:
attention_weights = attention_weights * head_mask
output = torch.matmul(attention_weights, v)
return output, attention_weights | null |
184,798 | import logging
import numpy as np
import torch
import torch.nn as nn
from torch.nn import CrossEntropyLoss
from .configuration_ctrl import CTRLConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_utils import Conv1D, PreTrainedModel
def point_wise_feed_forward_network(d_model_size, dff):
return torch.nn.Sequential(torch.nn.Linear(d_model_size, dff), torch.nn.ReLU(), torch.nn.Linear(dff, d_model_size)) | null |
184,799 | import logging
import math
import random
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from .configuration_bart import BartConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_utils import BeamHypotheses, PreTrainedModel, create_position_ids_from_input_ids
def _combine_masks(key_padding_mask, attn_mask, targ_size):
# targ_size = (bsz, tgt_len, src_len)
a = torch.zeros(targ_size)
b = torch.zeros(targ_size)
if key_padding_mask is not None: # (bsz, tgt_len) -> targ_size
_check_shapes(key_padding_mask.shape, targ_size[:2])
reshaped = key_padding_mask.unsqueeze(2).expand(*targ_size)
a[reshaped] = 1e-8
if attn_mask is not None: # (tgt_len, src_len) -> targ_size
_check_shapes(attn_mask.shape, targ_size[-2:])
b = attn_mask.unsqueeze(0).expand(*targ_size)
return (a + b).unsqueeze(1).clamp(LARGE_NEGATIVE,)
def shift_tokens_right(input_ids, pad_token_id):
"""Shift input ids one token to the right, and wrap the last non pad token (usually <eos>)."""
prev_output_tokens = input_ids.clone()
index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1)
prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze()
prev_output_tokens[:, 1:] = input_ids[:, :-1]
return prev_output_tokens
def make_padding_mask(input_ids, padding_idx=1):
"""True for pad tokens"""
padding_mask = input_ids.eq(padding_idx)
if not padding_mask.any():
padding_mask = None
return padding_mask
def fill_with_neg_inf(t):
"""FP16-compatible function that fills a input_ids with -inf."""
return t.float().fill_(float("-inf")).type_as(t)
The provided code snippet includes necessary dependencies for implementing the `_prepare_bart_decoder_inputs` function. Write a Python function `def _prepare_bart_decoder_inputs( config, input_ids, decoder_input_ids=None, decoder_attn_mask=None, )` to solve the following problem:
Prepare masks that ignore padding tokens decoder and a causal lm mask for the decoder if none are provided. This mimics the default behavior in fairseq. To override it pass in masks.
Here is the function:
def _prepare_bart_decoder_inputs(
config, input_ids, decoder_input_ids=None, decoder_attn_mask=None,
):
"""Prepare masks that ignore padding tokens decoder and a causal lm mask for the decoder if
none are provided. This mimics the default behavior in fairseq. To override it pass in masks.
"""
pad_token_id = config.pad_token_id
need_causal_mask = not config.output_past
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(input_ids, pad_token_id)
bsz, tgt_len = decoder_input_ids.size()[:2]
if decoder_attn_mask is None:
decoder_padding_mask = make_padding_mask(decoder_input_ids, pad_token_id)
if need_causal_mask:
causal_lm_mask = torch.triu(fill_with_neg_inf(torch.zeros(tgt_len, tgt_len)), 1)
else:
causal_lm_mask = None
new_shape = (bsz, tgt_len, tgt_len)
# make it broadcastable so can just be added to the attention coefficients
decoder_attn_mask = _combine_masks(decoder_padding_mask, causal_lm_mask, new_shape).to(device=input_ids.device)
assert decoder_attn_mask is None or decoder_attn_mask.shape == (bsz, 1, tgt_len, tgt_len)
return decoder_input_ids, decoder_attn_mask | Prepare masks that ignore padding tokens decoder and a causal lm mask for the decoder if none are provided. This mimics the default behavior in fairseq. To override it pass in masks. |
184,800 | import logging
import math
import random
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from .configuration_bart import BartConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_utils import BeamHypotheses, PreTrainedModel, create_position_ids_from_input_ids
def _make_linear_from_emb(emb):
vocab_size, emb_size = emb.weight.shape
lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
lin_layer.weight.data = emb.weight.data # .T
return lin_layer | null |
184,801 | import logging
import math
import random
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from .configuration_bart import BartConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_utils import BeamHypotheses, PreTrainedModel, create_position_ids_from_input_ids
The provided code snippet includes necessary dependencies for implementing the `reorder_attn_buffer` function. Write a Python function `def reorder_attn_buffer(input_buffer, new_order)` to solve the following problem:
Reorder buffered internal state (for incremental generation).
Here is the function:
def reorder_attn_buffer(input_buffer, new_order):
"""Reorder buffered internal state (for incremental generation)."""
# input_buffer = self._get_input_buffer(incremental_state)
for k in input_buffer.keys():
input_buffer_k = input_buffer[k]
if input_buffer_k is not None:
input_buffer[k] = input_buffer_k.index_select(0, new_order)
# incremental_state = self._set_input_buffer(incremental_state, input_buffer)
return input_buffer | Reorder buffered internal state (for incremental generation). |
184,802 | import logging
import math
import random
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from .configuration_bart import BartConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_utils import BeamHypotheses, PreTrainedModel, create_position_ids_from_input_ids
def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True):
if torch.cuda.is_available():
try:
from apex.normalization import FusedLayerNorm
return FusedLayerNorm(normalized_shape, eps, elementwise_affine)
except ImportError:
pass
return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine) | null |
184,803 | import logging
import math
import random
from typing import Dict, List, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from .configuration_bart import BartConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_utils import BeamHypotheses, PreTrainedModel, create_position_ids_from_input_ids
The provided code snippet includes necessary dependencies for implementing the `_filter_out_falsey_values` function. Write a Python function `def _filter_out_falsey_values(tup) -> Tuple` to solve the following problem:
Remove entries that are None or [] from an iterable.
Here is the function:
def _filter_out_falsey_values(tup) -> Tuple:
"""Remove entries that are None or [] from an iterable."""
return tuple(x for x in tup if isinstance(x, torch.Tensor) or x) | Remove entries that are None or [] from an iterable. |
184,804 | import argparse
import logging
import torch
from transformers import AlbertConfig, AlbertForMaskedLM, load_tf_weights_in_albert
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, albert_config_file, pytorch_dump_path):
# Initialise PyTorch model
config = AlbertConfig.from_json_file(albert_config_file)
print("Building PyTorch model from configuration: {}".format(str(config)))
model = AlbertForMaskedLM(config)
# Load weights from tf checkpoint
load_tf_weights_in_albert(model, config, tf_checkpoint_path)
# Save pytorch-model
print("Save PyTorch model to {}".format(pytorch_dump_path))
torch.save(model.state_dict(), pytorch_dump_path) | null |
184,805 | import re
import tensorflow as tf
class WarmUp(tf.keras.optimizers.schedules.LearningRateSchedule):
"""Applys a warmup schedule on a given learning rate decay schedule."""
def __init__(self, initial_learning_rate, decay_schedule_fn, warmup_steps, power=1.0, name=None):
super().__init__()
self.initial_learning_rate = initial_learning_rate
self.warmup_steps = warmup_steps
self.power = power
self.decay_schedule_fn = decay_schedule_fn
self.name = name
def __call__(self, step):
with tf.name_scope(self.name or "WarmUp") as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
global_step_float = tf.cast(step, tf.float32)
warmup_steps_float = tf.cast(self.warmup_steps, tf.float32)
warmup_percent_done = global_step_float / warmup_steps_float
warmup_learning_rate = self.initial_learning_rate * tf.math.pow(warmup_percent_done, self.power)
return tf.cond(
global_step_float < warmup_steps_float,
lambda: warmup_learning_rate,
lambda: self.decay_schedule_fn(step),
name=name,
)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
class AdamWeightDecay(tf.keras.optimizers.Adam):
"""Adam enables L2 weight decay and clip_by_global_norm on gradients.
Just adding the square of the weights to the loss function is *not* the
correct way of using L2 regularization/weight decay with Adam, since that will
interact with the m and v parameters in strange ways.
Instead we want ot decay the weights in a manner that doesn't interact with
the m/v parameters. This is equivalent to adding the square of the weights to
the loss with plain (non-momentum) SGD.
"""
def __init__(
self,
learning_rate=0.001,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-7,
amsgrad=False,
weight_decay_rate=0.0,
include_in_weight_decay=None,
exclude_from_weight_decay=None,
name="AdamWeightDecay",
**kwargs
):
super().__init__(learning_rate, beta_1, beta_2, epsilon, amsgrad, name, **kwargs)
self.weight_decay_rate = weight_decay_rate
self._include_in_weight_decay = include_in_weight_decay
self._exclude_from_weight_decay = exclude_from_weight_decay
def from_config(cls, config):
"""Creates an optimizer from its config with WarmUp custom object."""
custom_objects = {"WarmUp": WarmUp}
return super().from_config(config, custom_objects=custom_objects)
def _prepare_local(self, var_device, var_dtype, apply_state):
super()._prepare_local(var_device, var_dtype, apply_state)
apply_state["weight_decay_rate"] = tf.constant(self.weight_decay_rate, name="adam_weight_decay_rate")
def _decay_weights_op(self, var, learning_rate, apply_state):
do_decay = self._do_use_weight_decay(var.name)
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state["weight_decay_rate"], use_locking=self._use_locking
)
return tf.no_op()
def apply_gradients(self, grads_and_vars, clip_norm, name=None):
grads, tvars = list(zip(*grads_and_vars))
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=clip_norm)
return super().apply_gradients(zip(grads, tvars))
def _get_lr(self, var_device, var_dtype, apply_state):
"""Retrieves the learning rate with the given state."""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
apply_state = apply_state or {}
coefficients = apply_state.get((var_device, var_dtype))
if coefficients is None:
coefficients = self._fallback_apply_state(var_device, var_dtype)
apply_state[(var_device, var_dtype)] = coefficients
return coefficients["lr_t"], dict(apply_state=apply_state)
def _resource_apply_dense(self, grad, var, apply_state=None):
lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)
decay = self._decay_weights_op(var, lr_t, apply_state)
with tf.control_dependencies([decay]):
return super()._resource_apply_dense(grad, var, **kwargs)
def _resource_apply_sparse(self, grad, var, indices, apply_state=None):
lr_t, kwargs = self._get_lr(var.device, var.dtype.base_dtype, apply_state)
decay = self._decay_weights_op(var, lr_t, apply_state)
with tf.control_dependencies([decay]):
return super()._resource_apply_sparse(grad, var, indices, **kwargs)
def get_config(self):
config = super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate})
return config
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(r, param_name) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
The provided code snippet includes necessary dependencies for implementing the `create_optimizer` function. Write a Python function `def create_optimizer(init_lr, num_train_steps, num_warmup_steps)` to solve the following problem:
Creates an optimizer with learning rate schedule.
Here is the function:
def create_optimizer(init_lr, num_train_steps, num_warmup_steps):
"""Creates an optimizer with learning rate schedule."""
# Implements linear decay of the learning rate.
learning_rate_fn = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=init_lr, decay_steps=num_train_steps, end_learning_rate=0.0
)
if num_warmup_steps:
learning_rate_fn = WarmUp(
initial_learning_rate=init_lr, decay_schedule_fn=learning_rate_fn, warmup_steps=num_warmup_steps
)
optimizer = AdamWeightDecay(
learning_rate=learning_rate_fn,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=["layer_norm", "bias"],
)
return optimizer | Creates an optimizer with learning rate schedule. |
184,806 | import json
import logging
import os
import re
from typing import List, Optional, Union
from tokenizers import Tokenizer
from tokenizers.decoders import BPEDecoder
from tokenizers.implementations import BaseTokenizer
from tokenizers.models import BPE
from tokenizers.normalizers import BertNormalizer, Sequence, unicode_normalizer_from_str
from tokenizers.pre_tokenizers import BertPreTokenizer
from tokenizers.trainers import BpeTrainer
from .tokenization_bert import BasicTokenizer
from .tokenization_utils import PreTrainedTokenizer, PreTrainedTokenizerFast
The provided code snippet includes necessary dependencies for implementing the `get_pairs` function. Write a Python function `def get_pairs(word)` to solve the following problem:
Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length strings)
Here is the function:
def get_pairs(word):
"""
Return set of symbol pairs in a word.
word is represented as tuple of symbols (symbols being variable-length strings)
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs | Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length strings) |
184,807 | import json
import logging
import os
import re
from typing import List, Optional, Union
from tokenizers import Tokenizer
from tokenizers.decoders import BPEDecoder
from tokenizers.implementations import BaseTokenizer
from tokenizers.models import BPE
from tokenizers.normalizers import BertNormalizer, Sequence, unicode_normalizer_from_str
from tokenizers.pre_tokenizers import BertPreTokenizer
from tokenizers.trainers import BpeTrainer
from .tokenization_bert import BasicTokenizer
from .tokenization_utils import PreTrainedTokenizer, PreTrainedTokenizerFast
The provided code snippet includes necessary dependencies for implementing the `text_standardize` function. Write a Python function `def text_standardize(text)` to solve the following problem:
fixes some issues the spacy tokenizer had on books corpus also does some whitespace standardization
Here is the function:
def text_standardize(text):
"""
fixes some issues the spacy tokenizer had on books corpus
also does some whitespace standardization
"""
text = text.replace("—", "-")
text = text.replace("–", "-")
text = text.replace("―", "-")
text = text.replace("…", "...")
text = text.replace("´", "'")
text = re.sub(r"""(-+|~+|!+|"+|;+|\?+|\++|,+|\)+|\(+|\\+|\/+|\*+|\[+|\]+|}+|{+|\|+|_+)""", r" \1 ", text)
text = re.sub(r"\s*\n\s*", " \n ", text)
text = re.sub(r"[^\S\n]+", " ", text)
return text.strip() | fixes some issues the spacy tokenizer had on books corpus also does some whitespace standardization |
184,808 | import argparse
import logging
import torch
from transformers import T5Config, T5Model, load_tf_weights_in_t5
def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path):
# Initialise PyTorch model
config = T5Config.from_json_file(config_file)
print("Building PyTorch model from configuration: {}".format(str(config)))
model = T5Model(config)
# Load weights from tf checkpoint
load_tf_weights_in_t5(model, config, tf_checkpoint_path)
# Save pytorch-model
print("Save PyTorch model to {}".format(pytorch_dump_path))
torch.save(model.state_dict(), pytorch_dump_path) | null |
184,809 | import logging
import numpy as np
import tensorflow as tf
from .configuration_openai import OpenAIGPTConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_tf_utils import (
TFConv1D,
TFPreTrainedModel,
TFSequenceSummary,
TFSharedEmbeddings,
get_initializer,
shape_list,
)
The provided code snippet includes necessary dependencies for implementing the `gelu` function. Write a Python function `def gelu(x)` to solve the following problem:
Gaussian Error Linear Unit. This is a smoother version of the RELU. Original paper: https://arxiv.org/abs/1606.08415 Args: x: float Tensor to perform activation. Returns: `x` with the GELU activation applied.
Here is the function:
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh((np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf | Gaussian Error Linear Unit. This is a smoother version of the RELU. Original paper: https://arxiv.org/abs/1606.08415 Args: x: float Tensor to perform activation. Returns: `x` with the GELU activation applied. |
184,810 | import logging
import numpy as np
import tensorflow as tf
from .configuration_openai import OpenAIGPTConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_tf_utils import (
TFConv1D,
TFPreTrainedModel,
TFSequenceSummary,
TFSharedEmbeddings,
get_initializer,
shape_list,
)
def swish(x):
return x * tf.math.sigmoid(x) | null |
184,811 | import argparse
import logging
import torch
from transformers import CONFIG_NAME, WEIGHTS_NAME, GPT2Config, GPT2Model, load_tf_weights_in_gpt2
def convert_gpt2_checkpoint_to_pytorch(gpt2_checkpoint_path, gpt2_config_file, pytorch_dump_folder_path):
# Construct model
if gpt2_config_file == "":
config = GPT2Config()
else:
config = GPT2Config.from_json_file(gpt2_config_file)
model = GPT2Model(config)
# Load weights from numpy
load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path)
# Save pytorch-model
pytorch_weights_dump_path = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME
print("Save PyTorch model to {}".format(pytorch_weights_dump_path))
torch.save(model.state_dict(), pytorch_weights_dump_path)
print("Save configuration file to {}".format(pytorch_config_dump_path))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string()) | null |
184,812 | import argparse
import logging
import torch
from transformers import CONFIG_NAME, WEIGHTS_NAME, OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
def convert_openai_checkpoint_to_pytorch(openai_checkpoint_folder_path, openai_config_file, pytorch_dump_folder_path):
# Construct model
if openai_config_file == "":
config = OpenAIGPTConfig()
else:
config = OpenAIGPTConfig.from_json_file(openai_config_file)
model = OpenAIGPTModel(config)
# Load weights from numpy
load_tf_weights_in_openai_gpt(model, config, openai_checkpoint_folder_path)
# Save pytorch-model
pytorch_weights_dump_path = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME
print("Save PyTorch model to {}".format(pytorch_weights_dump_path))
torch.save(model.state_dict(), pytorch_weights_dump_path)
print("Save configuration file to {}".format(pytorch_config_dump_path))
with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
f.write(config.to_json_string()) | null |
184,813 | import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
from .configuration_transfo_xl import TransfoXLConfig
from .file_utils import add_start_docstrings, add_start_docstrings_to_callable
from .modeling_transfo_xl_utilities import LogUniformSampler, ProjectedAdaptiveLogSoftmax, sample_logits
from .modeling_utils import PreTrainedModel
logger = logging.getLogger(__name__)
def build_tf_to_pytorch_map(model, config):
""" A map of modules from TF to PyTorch.
This time I use a map to keep the PyTorch model as identical to the original PyTorch model as possible.
"""
tf_to_pt_map = {}
if hasattr(model, "transformer"):
# We are loading in a TransfoXLLMHeadModel => we will load also the Adaptive Softmax
tf_to_pt_map.update(
{
"transformer/adaptive_softmax/cutoff_0/cluster_W": model.crit.cluster_weight,
"transformer/adaptive_softmax/cutoff_0/cluster_b": model.crit.cluster_bias,
}
)
for i, (out_l, proj_l, tie_proj) in enumerate(
zip(model.crit.out_layers, model.crit.out_projs, config.tie_projs)
):
layer_str = "transformer/adaptive_softmax/cutoff_%d/" % i
if config.tie_weight:
tf_to_pt_map.update({layer_str + "b": out_l.bias})
else:
raise NotImplementedError
# I don't think this is implemented in the TF code
tf_to_pt_map.update({layer_str + "lookup_table": out_l.weight, layer_str + "b": out_l.bias})
if not tie_proj:
tf_to_pt_map.update({layer_str + "proj": proj_l})
# Now load the rest of the transformer
model = model.transformer
# Embeddings
for i, (embed_l, proj_l) in enumerate(zip(model.word_emb.emb_layers, model.word_emb.emb_projs)):
layer_str = "transformer/adaptive_embed/cutoff_%d/" % i
tf_to_pt_map.update({layer_str + "lookup_table": embed_l.weight, layer_str + "proj_W": proj_l})
# Transformer blocks
for i, b in enumerate(model.layers):
layer_str = "transformer/layer_%d/" % i
tf_to_pt_map.update(
{
layer_str + "rel_attn/LayerNorm/gamma": b.dec_attn.layer_norm.weight,
layer_str + "rel_attn/LayerNorm/beta": b.dec_attn.layer_norm.bias,
layer_str + "rel_attn/o/kernel": b.dec_attn.o_net.weight,
layer_str + "rel_attn/qkv/kernel": b.dec_attn.qkv_net.weight,
layer_str + "rel_attn/r/kernel": b.dec_attn.r_net.weight,
layer_str + "ff/LayerNorm/gamma": b.pos_ff.layer_norm.weight,
layer_str + "ff/LayerNorm/beta": b.pos_ff.layer_norm.bias,
layer_str + "ff/layer_1/kernel": b.pos_ff.CoreNet[0].weight,
layer_str + "ff/layer_1/bias": b.pos_ff.CoreNet[0].bias,
layer_str + "ff/layer_2/kernel": b.pos_ff.CoreNet[3].weight,
layer_str + "ff/layer_2/bias": b.pos_ff.CoreNet[3].bias,
}
)
# Relative positioning biases
if config.untie_r:
r_r_list = []
r_w_list = []
for b in model.layers:
r_r_list.append(b.dec_attn.r_r_bias)
r_w_list.append(b.dec_attn.r_w_bias)
else:
r_r_list = [model.r_r_bias]
r_w_list = [model.r_w_bias]
tf_to_pt_map.update({"transformer/r_r_bias": r_r_list, "transformer/r_w_bias": r_w_list})
return tf_to_pt_map
The provided code snippet includes necessary dependencies for implementing the `load_tf_weights_in_transfo_xl` function. Write a Python function `def load_tf_weights_in_transfo_xl(model, config, tf_path)` to solve the following problem:
Load tf checkpoints in a pytorch model
Here is the function:
def load_tf_weights_in_transfo_xl(model, config, tf_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions."
)
raise
# Build TF to PyTorch weights loading map
tf_to_pt_map = build_tf_to_pytorch_map(model, config)
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
tf_weights = {}
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
tf_weights[name] = array
for name, pointer in tf_to_pt_map.items():
assert name in tf_weights
array = tf_weights[name]
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if "kernel" in name or "proj" in name:
array = np.transpose(array)
if ("r_r_bias" in name or "r_w_bias" in name) and len(pointer) > 1:
# Here we will split the TF weigths
assert len(pointer) == array.shape[0]
for i, p_i in enumerate(pointer):
arr_i = array[i, ...]
try:
assert p_i.shape == arr_i.shape
except AssertionError as e:
e.args += (p_i.shape, arr_i.shape)
raise
logger.info("Initialize PyTorch weight {} for layer {}".format(name, i))
p_i.data = torch.from_numpy(arr_i)
else:
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
tf_weights.pop(name, None)
tf_weights.pop(name + "/Adam", None)
tf_weights.pop(name + "/Adam_1", None)
logger.info("Weights not copied to PyTorch model: {}".format(", ".join(tf_weights.keys())))
return model | Load tf checkpoints in a pytorch model |
184,814 | import json
import logging
import os
import regex as re
from .tokenization_utils import PreTrainedTokenizer
The provided code snippet includes necessary dependencies for implementing the `get_pairs` function. Write a Python function `def get_pairs(word)` to solve the following problem:
Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings).
Here is the function:
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
pairs = set(pairs)
return pairs | Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). |
184,815 | import glob
import logging
import os
import pickle
import re
from collections import Counter, OrderedDict
from typing import List, Optional, Tuple, Union
import numpy as np
from tokenizers import Encoding, Tokenizer
from tokenizers.implementations import BaseTokenizer
from tokenizers.models import WordLevel
from tokenizers.normalizers import Lowercase, Sequence, unicode_normalizer_from_str
from tokenizers.pre_tokenizers import CharDelimiterSplit, WhitespaceSplit
from tokenizers.processors import BertProcessing
from .file_utils import cached_path, is_torch_available
from .tokenization_utils import PreTrainedTokenizer, PreTrainedTokenizerFast
logger = logging.getLogger(__name__)
class TransfoXLCorpus(object):
def from_pretrained(cls, pretrained_model_name_or_path, cache_dir=None, *inputs, **kwargs):
"""
Instantiate a pre-processed corpus.
"""
vocab = TransfoXLTokenizer.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
if pretrained_model_name_or_path in PRETRAINED_CORPUS_ARCHIVE_MAP:
corpus_file = PRETRAINED_CORPUS_ARCHIVE_MAP[pretrained_model_name_or_path]
else:
corpus_file = os.path.join(pretrained_model_name_or_path, CORPUS_NAME)
# redirect to the cache, if necessary
try:
resolved_corpus_file = cached_path(corpus_file, cache_dir=cache_dir)
except EnvironmentError:
logger.error(
"Corpus '{}' was not found in corpus list ({}). "
"We assumed '{}' was a path or url but couldn't find files {} "
"at this path or url.".format(
pretrained_model_name_or_path,
", ".join(PRETRAINED_CORPUS_ARCHIVE_MAP.keys()),
pretrained_model_name_or_path,
corpus_file,
)
)
return None
if resolved_corpus_file == corpus_file:
logger.info("loading corpus file {}".format(corpus_file))
else:
logger.info("loading corpus file {} from cache at {}".format(corpus_file, resolved_corpus_file))
# Instantiate tokenizer.
corpus = cls(*inputs, **kwargs)
corpus_dict = torch.load(resolved_corpus_file)
for key, value in corpus_dict.items():
corpus.__dict__[key] = value
corpus.vocab = vocab
if corpus.train is not None:
corpus.train = torch.tensor(corpus.train, dtype=torch.long)
if corpus.valid is not None:
corpus.valid = torch.tensor(corpus.valid, dtype=torch.long)
if corpus.test is not None:
corpus.test = torch.tensor(corpus.test, dtype=torch.long)
return corpus
def __init__(self, *args, **kwargs):
self.vocab = TransfoXLTokenizer(*args, **kwargs)
self.dataset = None
self.train = None
self.valid = None
self.test = None
def build_corpus(self, path, dataset):
self.dataset = dataset
if self.dataset in ["ptb", "wt2", "enwik8", "text8"]:
self.vocab.count_file(os.path.join(path, "train.txt"))
self.vocab.count_file(os.path.join(path, "valid.txt"))
self.vocab.count_file(os.path.join(path, "test.txt"))
elif self.dataset == "wt103":
self.vocab.count_file(os.path.join(path, "train.txt"))
elif self.dataset == "lm1b":
train_path_pattern = os.path.join(
path,
"1-billion-word-language-modeling-benchmark-r13output",
"training-monolingual.tokenized.shuffled",
"news.en-*",
)
train_paths = glob.glob(train_path_pattern)
# the vocab will load from file when build_vocab() is called
self.vocab.build_vocab()
if self.dataset in ["ptb", "wt2", "wt103"]:
self.train = self.vocab.encode_file(os.path.join(path, "train.txt"), ordered=True)
self.valid = self.vocab.encode_file(os.path.join(path, "valid.txt"), ordered=True)
self.test = self.vocab.encode_file(os.path.join(path, "test.txt"), ordered=True)
elif self.dataset in ["enwik8", "text8"]:
self.train = self.vocab.encode_file(os.path.join(path, "train.txt"), ordered=True, add_eos=False)
self.valid = self.vocab.encode_file(os.path.join(path, "valid.txt"), ordered=True, add_eos=False)
self.test = self.vocab.encode_file(os.path.join(path, "test.txt"), ordered=True, add_eos=False)
elif self.dataset == "lm1b":
self.train = train_paths
self.valid = self.vocab.encode_file(os.path.join(path, "valid.txt"), ordered=False, add_double_eos=True)
self.test = self.vocab.encode_file(os.path.join(path, "test.txt"), ordered=False, add_double_eos=True)
def get_iterator(self, split, *args, **kwargs):
if split == "train":
if self.dataset in ["ptb", "wt2", "wt103", "enwik8", "text8"]:
data_iter = LMOrderedIterator(self.train, *args, **kwargs)
elif self.dataset == "lm1b":
kwargs["shuffle"] = True
data_iter = LMMultiFileIterator(self.train, self.vocab, *args, **kwargs)
elif split in ["valid", "test"]:
data = self.valid if split == "valid" else self.test
if self.dataset in ["ptb", "wt2", "wt103", "enwik8", "text8"]:
data_iter = LMOrderedIterator(data, *args, **kwargs)
elif self.dataset == "lm1b":
data_iter = LMShuffledIterator(data, *args, **kwargs)
return data_iter
def get_lm_corpus(datadir, dataset):
fn = os.path.join(datadir, "cache.pt")
fn_pickle = os.path.join(datadir, "cache.pkl")
if os.path.exists(fn):
logger.info("Loading cached dataset...")
corpus = torch.load(fn_pickle)
elif os.path.exists(fn):
logger.info("Loading cached dataset from pickle...")
with open(fn, "rb") as fp:
corpus = pickle.load(fp)
else:
logger.info("Producing dataset {}...".format(dataset))
kwargs = {}
if dataset in ["wt103", "wt2"]:
kwargs["special"] = ["<eos>"]
kwargs["lower_case"] = False
elif dataset == "ptb":
kwargs["special"] = ["<eos>"]
kwargs["lower_case"] = True
elif dataset == "lm1b":
kwargs["special"] = []
kwargs["lower_case"] = False
kwargs["vocab_file"] = os.path.join(datadir, "1b_word_vocab.txt")
elif dataset in ["enwik8", "text8"]:
pass
corpus = TransfoXLCorpus(datadir, dataset, **kwargs)
torch.save(corpus, fn)
return corpus | null |
184,816 | import fnmatch
import json
import logging
import os
import shutil
import sys
import tarfile
import tempfile
from contextlib import contextmanager
from functools import partial, wraps
from hashlib import sha256
from typing import Optional
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import boto3
import requests
from botocore.config import Config
from botocore.exceptions import ClientError
from filelock import FileLock
from tqdm.auto import tqdm
from . import __version__
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "")
return fn
return docstring_decorator | null |
184,817 | import fnmatch
import json
import logging
import os
import shutil
import sys
import tarfile
import tempfile
from contextlib import contextmanager
from functools import partial, wraps
from hashlib import sha256
from typing import Optional
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import boto3
import requests
from botocore.config import Config
from botocore.exceptions import ClientError
from filelock import FileLock
from tqdm.auto import tqdm
from . import __version__
def add_start_docstrings_to_callable(*docstr):
def docstring_decorator(fn):
class_name = ":class:`~transformers.{}`".format(fn.__qualname__.split(".")[0])
intro = " The {} forward method, overrides the :func:`__call__` special method.".format(class_name)
note = r"""
.. note::
Although the recipe for forward pass needs to be defined within
this function, one should call the :class:`Module` instance afterwards
instead of this since the former takes care of running the
pre and post processing steps while the latter silently ignores them.
"""
fn.__doc__ = intro + note + "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "")
return fn
return docstring_decorator | null |
184,818 | import fnmatch
import json
import logging
import os
import shutil
import sys
import tarfile
import tempfile
from contextlib import contextmanager
from functools import partial, wraps
from hashlib import sha256
from typing import Optional
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import boto3
import requests
from botocore.config import Config
from botocore.exceptions import ClientError
from filelock import FileLock
from tqdm.auto import tqdm
from . import __version__
def add_end_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = fn.__doc__ + "".join(docstr)
return fn
return docstring_decorator | null |
184,819 | import fnmatch
import json
import logging
import os
import shutil
import sys
import tarfile
import tempfile
from contextlib import contextmanager
from functools import partial, wraps
from hashlib import sha256
from typing import Optional
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import boto3
import requests
from botocore.config import Config
from botocore.exceptions import ClientError
from filelock import FileLock
from tqdm.auto import tqdm
from . import __version__
S3_BUCKET_PREFIX = "https://s3.amazonaws.com/models.huggingface.co/bert"
CLOUDFRONT_DISTRIB_PREFIX = "https://d2ws9o8vfrpkyk.cloudfront.net"
def hf_bucket_url(identifier, postfix=None, cdn=False) -> str:
endpoint = CLOUDFRONT_DISTRIB_PREFIX if cdn else S3_BUCKET_PREFIX
if postfix is None:
return "/".join((endpoint, identifier))
else:
return "/".join((endpoint, identifier, postfix)) | null |
184,820 | import fnmatch
import json
import logging
import os
import shutil
import sys
import tarfile
import tempfile
from contextlib import contextmanager
from functools import partial, wraps
from hashlib import sha256
from typing import Optional
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import boto3
import requests
from botocore.config import Config
from botocore.exceptions import ClientError
from filelock import FileLock
from tqdm.auto import tqdm
from . import __version__
TRANSFORMERS_CACHE = PYTORCH_PRETRAINED_BERT_CACHE
The provided code snippet includes necessary dependencies for implementing the `filename_to_url` function. Write a Python function `def filename_to_url(filename, cache_dir=None)` to solve the following problem:
Return the url and etag (which may be ``None``) stored for `filename`. Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
Here is the function:
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + ".json"
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata["url"]
etag = metadata["etag"]
return url, etag | Return the url and etag (which may be ``None``) stored for `filename`. Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist. |
184,821 | import fnmatch
import json
import logging
import os
import shutil
import sys
import tarfile
import tempfile
from contextlib import contextmanager
from functools import partial, wraps
from hashlib import sha256
from typing import Optional
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import boto3
import requests
from botocore.config import Config
from botocore.exceptions import ClientError
from filelock import FileLock
from tqdm.auto import tqdm
from . import __version__
TRANSFORMERS_CACHE = PYTORCH_PRETRAINED_BERT_CACHE
def is_remote_url(url_or_filename):
parsed = urlparse(url_or_filename)
return parsed.scheme in ("http", "https", "s3")
def get_from_cache(
url,
cache_dir=None,
force_download=False,
proxies=None,
etag_timeout=10,
resume_download=False,
user_agent=None,
local_files_only=False,
) -> Optional[str]:
"""
Given a URL, look for the corresponding file in the local cache.
If it's not there, download it. Then return the path to the cached file.
Return:
None in case of non-recoverable file (non-existent or inaccessible url + no cache on disk).
Local path (string) otherwise
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
etag = None
if not local_files_only:
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url, proxies=proxies)
else:
try:
response = requests.head(url, allow_redirects=True, proxies=proxies, timeout=etag_timeout)
if response.status_code == 200:
etag = response.headers.get("ETag")
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(cache_path):
return cache_path
else:
matching_files = [
file
for file in fnmatch.filter(os.listdir(cache_dir), filename + ".*")
if not file.endswith(".json") and not file.endswith(".lock")
]
if len(matching_files) > 0:
return os.path.join(cache_dir, matching_files[-1])
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False."
)
return None
# From now on, etag is not None.
if os.path.exists(cache_path) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
lock_path = cache_path + ".lock"
with FileLock(lock_path):
if resume_download:
incomplete_path = cache_path + ".incomplete"
def _resumable_file_manager():
with open(incomplete_path, "a+b") as f:
yield f
temp_file_manager = _resumable_file_manager
if os.path.exists(incomplete_path):
resume_size = os.stat(incomplete_path).st_size
else:
resume_size = 0
else:
temp_file_manager = partial(tempfile.NamedTemporaryFile, dir=cache_dir, delete=False)
resume_size = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
logger.info("%s not found in cache or force_download set to True, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
if resume_download:
logger.warn('Warning: resumable downloads are not implemented for "s3://" urls')
s3_get(url, temp_file, proxies=proxies)
else:
http_get(url, temp_file, proxies=proxies, resume_size=resume_size, user_agent=user_agent)
logger.info("storing %s in cache at %s", url, cache_path)
os.rename(temp_file.name, cache_path)
logger.info("creating metadata file for %s", cache_path)
meta = {"url": url, "etag": etag}
meta_path = cache_path + ".json"
with open(meta_path, "w") as meta_file:
json.dump(meta, meta_file)
return cache_path
The provided code snippet includes necessary dependencies for implementing the `cached_path` function. Write a Python function `def cached_path( url_or_filename, cache_dir=None, force_download=False, proxies=None, resume_download=False, user_agent=None, extract_compressed_file=False, force_extract=False, local_files_only=False, ) -> Optional[str]` to solve the following problem:
Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path. Args: cache_dir: specify a cache directory to save the file to (overwrite the default cache dir). force_download: if True, re-dowload the file even if it's already cached in the cache dir. resume_download: if True, resume the download if incompletly recieved file is found. user_agent: Optional string or dict that will be appended to the user-agent on remote requests. extract_compressed_file: if True and the path point to a zip or tar file, extract the compressed file in a folder along the archive. force_extract: if True when extract_compressed_file is True and the archive was already extracted, re-extract the archive and overide the folder where it was extracted. Return: None in case of non-recoverable file (non-existent or inaccessible url + no cache on disk). Local path (string) otherwise
Here is the function:
def cached_path(
url_or_filename,
cache_dir=None,
force_download=False,
proxies=None,
resume_download=False,
user_agent=None,
extract_compressed_file=False,
force_extract=False,
local_files_only=False,
) -> Optional[str]:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
Args:
cache_dir: specify a cache directory to save the file to (overwrite the default cache dir).
force_download: if True, re-dowload the file even if it's already cached in the cache dir.
resume_download: if True, resume the download if incompletly recieved file is found.
user_agent: Optional string or dict that will be appended to the user-agent on remote requests.
extract_compressed_file: if True and the path point to a zip or tar file, extract the compressed
file in a folder along the archive.
force_extract: if True when extract_compressed_file is True and the archive was already extracted,
re-extract the archive and overide the folder where it was extracted.
Return:
None in case of non-recoverable file (non-existent or inaccessible url + no cache on disk).
Local path (string) otherwise
"""
if cache_dir is None:
cache_dir = TRANSFORMERS_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if is_remote_url(url_or_filename):
# URL, so get it from the cache (downloading if necessary)
output_path = get_from_cache(
url_or_filename,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
user_agent=user_agent,
local_files_only=local_files_only,
)
elif os.path.exists(url_or_filename):
# File, and it exists.
output_path = url_or_filename
elif urlparse(url_or_filename).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
if extract_compressed_file:
if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
output_dir, output_file = os.path.split(output_path)
output_extract_dir_name = output_file.replace(".", "-") + "-extracted"
output_path_extracted = os.path.join(output_dir, output_extract_dir_name)
if os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
lock_path = output_path + ".lock"
with FileLock(lock_path):
shutil.rmtree(output_path_extracted, ignore_errors=True)
os.makedirs(output_path_extracted)
if is_zipfile(output_path):
with ZipFile(output_path, "r") as zip_file:
zip_file.extractall(output_path_extracted)
zip_file.close()
elif tarfile.is_tarfile(output_path):
tar_file = tarfile.open(output_path)
tar_file.extractall(output_path_extracted)
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(output_path))
return output_path_extracted
return output_path | Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path. Args: cache_dir: specify a cache directory to save the file to (overwrite the default cache dir). force_download: if True, re-dowload the file even if it's already cached in the cache dir. resume_download: if True, resume the download if incompletly recieved file is found. user_agent: Optional string or dict that will be appended to the user-agent on remote requests. extract_compressed_file: if True and the path point to a zip or tar file, extract the compressed file in a folder along the archive. force_extract: if True when extract_compressed_file is True and the archive was already extracted, re-extract the archive and overide the folder where it was extracted. Return: None in case of non-recoverable file (non-existent or inaccessible url + no cache on disk). Local path (string) otherwise |
184,822 | import fnmatch
import json
import logging
import os
import shutil
import sys
import tarfile
import tempfile
from contextlib import contextmanager
from functools import partial, wraps
from hashlib import sha256
from typing import Optional
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import boto3
import requests
from botocore.config import Config
from botocore.exceptions import ClientError
from filelock import FileLock
from tqdm.auto import tqdm
from . import __version__
The provided code snippet includes necessary dependencies for implementing the `s3_request` function. Write a Python function `def s3_request(func)` to solve the following problem:
Wrapper function for s3 requests in order to create more helpful error messages.
Here is the function:
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper | Wrapper function for s3 requests in order to create more helpful error messages. |
184,823 | import torch
import torch.nn as nn
import torch.nn.functional as F
The provided code snippet includes necessary dependencies for implementing the `sample_logits` function. Write a Python function `def sample_logits(embedding, bias, labels, inputs, sampler)` to solve the following problem:
embedding: an nn.Embedding layer bias: [n_vocab] labels: [b1, b2] inputs: [b1, b2, n_emb] sampler: you may use a LogUniformSampler Return logits: [b1, b2, 1 + n_sample]
Here is the function:
def sample_logits(embedding, bias, labels, inputs, sampler):
"""
embedding: an nn.Embedding layer
bias: [n_vocab]
labels: [b1, b2]
inputs: [b1, b2, n_emb]
sampler: you may use a LogUniformSampler
Return
logits: [b1, b2, 1 + n_sample]
"""
true_log_probs, samp_log_probs, neg_samples = sampler.sample(labels)
n_sample = neg_samples.size(0)
b1, b2 = labels.size(0), labels.size(1)
all_ids = torch.cat([labels.view(-1), neg_samples])
all_w = embedding(all_ids)
true_w = all_w[:-n_sample].view(b1, b2, -1)
sample_w = all_w[-n_sample:].view(n_sample, -1)
all_b = bias[all_ids]
true_b = all_b[:-n_sample].view(b1, b2)
sample_b = all_b[-n_sample:]
hit = (labels[:, :, None] == neg_samples).detach()
true_logits = torch.einsum("ijk,ijk->ij", [true_w, inputs]) + true_b - true_log_probs
sample_logits = torch.einsum("lk,ijk->ijl", [sample_w, inputs]) + sample_b - samp_log_probs
sample_logits.masked_fill_(hit, -1e30)
logits = torch.cat([true_logits[:, :, None], sample_logits], -1)
return logits | embedding: an nn.Embedding layer bias: [n_vocab] labels: [b1, b2] inputs: [b1, b2, n_emb] sampler: you may use a LogUniformSampler Return logits: [b1, b2, 1 + n_sample] |
184,824 | import logging
import math
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
The provided code snippet includes necessary dependencies for implementing the `get_constant_schedule` function. Write a Python function `def get_constant_schedule(optimizer, last_epoch=-1)` to solve the following problem:
Create a schedule with a constant learning rate.
Here is the function:
def get_constant_schedule(optimizer, last_epoch=-1):
""" Create a schedule with a constant learning rate.
"""
return LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch) | Create a schedule with a constant learning rate. |
184,825 | import logging
import math
import torch
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
The provided code snippet includes necessary dependencies for implementing the `get_constant_schedule_with_warmup` function. Write a Python function `def get_constant_schedule_with_warmup(optimizer, num_warmup_steps, last_epoch=-1)` to solve the following problem:
Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate increases linearly between 0 and 1.
Here is the function:
def get_constant_schedule_with_warmup(optimizer, num_warmup_steps, last_epoch=-1):
""" Create a schedule with a constant learning rate preceded by a warmup
period during which the learning rate increases linearly between 0 and 1.
"""
def lr_lambda(current_step):
if current_step < num_warmup_steps:
return float(current_step) / float(max(1.0, num_warmup_steps))
return 1.0
return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) | Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate increases linearly between 0 and 1. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.