id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
19,268 | import logging
import math
import os
import random
import sys
import numpy as np
import torch
from fairseq import (
checkpoint_utils,
distributed_utils,
options,
quantization_utils,
tasks,
utils,
)
from fairseq.data import iterators
from fairseq.logging import meters, metrics, progress_bar
from fairseq.trainer import Trainer
from fairseq.model_parallel.megatron_trainer import MegatronTrainer
def tpu_data_loader(args, itr):
import torch_xla.core.xla_model as xm
import torch_xla.distributed.parallel_loader as pl
xm.rendezvous('tpu_data_loader') # wait for all workers
xm.mark_step()
device = utils.get_tpu_device(args)
return iterators.CountingIterator(
pl.ParallelLoader(itr, [device]).per_device_loader(device),
start=getattr(itr, 'n', 0),
total=len(itr),
)
def validate_and_save(args, trainer, task, epoch_itr, valid_subsets, end_of_epoch):
num_updates = trainer.get_num_updates()
do_save = (
(
args.save_interval_updates > 0
and num_updates > 0
and num_updates % args.save_interval_updates == 0
)
or (end_of_epoch and epoch_itr.epoch % args.save_interval == 0)
)
do_validate = (
(
(not end_of_epoch and do_save) # validate during mid-epoch saves
or (end_of_epoch and epoch_itr.epoch % args.validate_interval == 0)
)
and not args.disable_validation
)
# Validate
valid_losses = [None]
if do_validate:
valid_losses = validate(args, trainer, task, epoch_itr, valid_subsets)
# Stopping conditions
max_update = args.max_update or math.inf
should_stop = (
should_stop_early(args, valid_losses[0])
or trainer.get_num_updates() >= max_update
)
# Save checkpoint
if do_save or should_stop:
checkpoint_utils.save_checkpoint(args, trainer, epoch_itr, valid_losses[0])
return valid_losses, should_stop
def get_training_stats(stats):
stats['wall'] = round(metrics.get_meter('default', 'wall').elapsed_time, 0)
return stats
def progress_bar(
iterator,
log_format: Optional[str] = None,
log_interval: int = 100,
epoch: Optional[int] = None,
prefix: Optional[str] = None,
tensorboard_logdir: Optional[str] = None,
default_log_format: str = 'tqdm',
):
if log_format is None:
log_format = default_log_format
if log_format == 'tqdm' and not sys.stderr.isatty():
log_format = 'simple'
if log_format == 'json':
bar = JsonProgressBar(iterator, epoch, prefix, log_interval)
elif log_format == 'none':
bar = NoopProgressBar(iterator, epoch, prefix)
elif log_format == 'simple':
bar = SimpleProgressBar(iterator, epoch, prefix, log_interval)
elif log_format == 'tqdm':
bar = TqdmProgressBar(iterator, epoch, prefix)
else:
raise ValueError('Unknown log format: {}'.format(log_format))
if tensorboard_logdir:
try:
# [FB only] custom wrapper for TensorBoard
import palaas # noqa
from .fb_tbmf_wrapper import FbTbmfWrapper
bar = FbTbmfWrapper(bar, log_interval)
except ImportError:
bar = TensorboardProgressBarWrapper(bar, tensorboard_logdir)
return bar
The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(args, trainer, task, epoch_itr)` to solve the following problem:
Train the model for one epoch and return validation losses.
Here is the function:
def train(args, trainer, task, epoch_itr):
"""Train the model for one epoch and return validation losses."""
# Initialize data iterator
itr = epoch_itr.next_epoch_itr(
fix_batches_to_gpus=args.fix_batches_to_gpus,
shuffle=(epoch_itr.next_epoch_idx > args.curriculum),
)
update_freq = (
args.update_freq[epoch_itr.epoch - 1]
if epoch_itr.epoch <= len(args.update_freq)
else args.update_freq[-1]
)
itr = iterators.GroupedIterator(itr, update_freq)
if getattr(args, 'tpu', False):
itr = tpu_data_loader(args, itr)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
epoch=epoch_itr.epoch,
tensorboard_logdir=(
args.tensorboard_logdir if distributed_utils.is_master(args) else None
),
default_log_format=('tqdm' if not args.no_progress_bar else 'simple'),
)
trainer.begin_epoch(epoch_itr.epoch)
valid_subsets = args.valid_subset.split(',')
should_stop = False
for samples in progress:
with metrics.aggregate('train_inner'):
log_output = trainer.train_step(samples)
if log_output is None: # OOM, overflow, ...
continue
# log mid-epoch stats
num_updates = trainer.get_num_updates()
if num_updates % args.log_interval == 0:
stats = get_training_stats(metrics.get_smoothed_values('train_inner'))
progress.log(stats, tag='train_inner', step=num_updates)
# reset mid-epoch stats after each log interval
# the end-of-epoch stats will still be preserved
metrics.reset_meters('train_inner')
end_of_epoch = not itr.has_next()
valid_losses, should_stop = validate_and_save(
args, trainer, task, epoch_itr, valid_subsets, end_of_epoch
)
if should_stop:
break
# log end-of-epoch stats
stats = get_training_stats(metrics.get_smoothed_values('train'))
progress.print(stats, tag='train', step=num_updates)
# reset epoch-level meters
metrics.reset_meters('train')
return valid_losses, should_stop | Train the model for one epoch and return validation losses. |
19,269 | import logging
import math
import os
import random
import sys
import numpy as np
import torch
from fairseq import (
checkpoint_utils,
distributed_utils,
options,
quantization_utils,
tasks,
utils,
)
from fairseq.data import iterators
from fairseq.logging import meters, metrics, progress_bar
from fairseq.trainer import Trainer
from fairseq.model_parallel.megatron_trainer import MegatronTrainer
def main(args, init_distributed=False):
utils.import_user_module(args)
assert args.max_tokens is not None or args.max_sentences is not None, \
'Must specify batch size either with --max-tokens or --max-sentences'
metrics.reset()
# Initialize CUDA and distributed training
if torch.cuda.is_available() and not args.cpu and not getattr(args, 'tpu', False):
torch.cuda.set_device(args.device_id)
np.random.seed(args.seed)
utils.set_torch_seed(args.seed)
if init_distributed:
args.distributed_rank = distributed_utils.distributed_init(args)
if distributed_utils.is_master(args):
checkpoint_utils.verify_checkpoint_directory(args.save_dir)
# Print args
logger.info(args)
# Setup task, e.g., translation, language modeling, etc.
task = tasks.setup_task(args)
# Load valid dataset (we load training data below, based on the latest checkpoint)
for valid_sub_split in args.valid_subset.split(','):
task.load_dataset(valid_sub_split, combine=False, epoch=1)
# Build model and criterion
model = task.build_model(args)
criterion = task.build_criterion(args)
logger.info(model)
logger.info('model {}, criterion {}'.format(args.arch, criterion.__class__.__name__))
logger.info('num. model params: {} (num. trained: {})'.format(
sum(p.numel() for p in model.parameters()),
sum(p.numel() for p in model.parameters() if p.requires_grad),
))
# (optionally) Configure quantization
if args.quantization_config_path is not None:
quantizer = quantization_utils.Quantizer(
config_path=args.quantization_config_path,
max_epoch=args.max_epoch,
max_update=args.max_update,
)
else:
quantizer = None
# Build trainer
if args.model_parallel_size == 1:
trainer = Trainer(args, task, model, criterion, quantizer)
else:
trainer = MegatronTrainer(args, task, model, criterion)
logger.info('training on {} devices (GPUs/TPUs)'.format(args.distributed_world_size))
logger.info('max tokens per GPU = {} and max sentences per GPU = {}'.format(
args.max_tokens,
args.max_sentences,
))
# Load the latest checkpoint if one is available and restore the
# corresponding train iterator
extra_state, epoch_itr = checkpoint_utils.load_checkpoint(args, trainer)
if args.tpu:
import torch_xla.core.xla_model as xm
xm.rendezvous('load_checkpoint') # wait for all workers
xm.mark_step()
# Train until the learning rate gets too small
max_epoch = args.max_epoch or math.inf
lr = trainer.get_lr()
train_meter = meters.StopwatchMeter()
train_meter.start()
while (
lr > args.min_lr
and epoch_itr.next_epoch_idx <= max_epoch
):
# train for one epoch
valid_losses, should_stop = train(args, trainer, task, epoch_itr)
if should_stop:
break
# only use first validation loss to update the learning rate
lr = trainer.lr_step(epoch_itr.epoch, valid_losses[0])
epoch_itr = trainer.get_train_iterator(
epoch_itr.next_epoch_idx,
# sharded data: get train iterator for next epoch
load_dataset=(os.pathsep in getattr(args, 'data', '')),
)
train_meter.stop()
logger.info('done training in {:.1f} seconds'.format(train_meter.sum))
def distributed_main(i, args, start_rank=0):
args.device_id = i
if args.distributed_rank is None: # torch.multiprocessing.spawn
args.distributed_rank = start_rank + i
main(args, init_distributed=True)
def cli_main(modify_parser=None):
parser = options.get_training_parser()
args = options.parse_args_and_arch(parser, modify_parser=modify_parser)
if args.distributed_init_method is None:
distributed_utils.infer_init_method(args)
if args.distributed_init_method is not None:
# distributed training
if torch.cuda.device_count() > 1 and not args.distributed_no_spawn:
start_rank = args.distributed_rank
args.distributed_rank = None # assign automatically
torch.multiprocessing.spawn(
fn=distributed_main,
args=(args, start_rank),
nprocs=torch.cuda.device_count(),
)
else:
distributed_main(args.device_id, args)
elif args.distributed_world_size > 1:
if not getattr(args, 'tpu', False):
# fallback for single node with multiple GPUs
assert args.distributed_world_size <= torch.cuda.device_count()
port = random.randint(10000, 20000)
args.distributed_init_method = 'tcp://localhost:{port}'.format(port=port)
args.distributed_rank = None # set based on device id
torch.multiprocessing.spawn(
fn=distributed_main,
args=(args, ),
nprocs=args.distributed_world_size,
)
else:
import torch_xla.distributed.xla_multiprocessing as xmp
torch.multiprocessing.set_sharing_strategy('file_system')
xmp.spawn(
fn=distributed_main,
args=(args, ),
nprocs=8, # use all 8 TPU cores
)
else:
# single GPU training
main(args) | null |
19,270 | from collections import namedtuple
import fileinput
import logging
import math
import sys
import os
import torch
from fairseq import checkpoint_utils, options, tasks, utils
from fairseq.data import encoders
def buffered_read(input, buffer_size):
buffer = []
with fileinput.input(files=[input], openhook=fileinput.hook_encoded("utf-8")) as h:
for src_str in h:
buffer.append(src_str.strip())
if len(buffer) >= buffer_size:
yield buffer
buffer = []
if len(buffer) > 0:
yield buffer | null |
19,271 | from collections import namedtuple
import fileinput
import logging
import math
import sys
import os
import torch
from fairseq import checkpoint_utils, options, tasks, utils
from fairseq.data import encoders
Batch = namedtuple('Batch', 'ids src_tokens src_lengths')
def make_batches(lines, args, task, max_positions, encode_fn):
tokens = [
task.source_dictionary.encode_line(
encode_fn(src_str), add_if_not_exist=False
).long()
for src_str in lines
]
lengths = [t.numel() for t in tokens]
itr = task.get_batch_iterator(
dataset=task.build_dataset_for_inference(tokens, lengths),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=max_positions,
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test
).next_epoch_itr(shuffle=False)
for batch in itr:
yield Batch(
ids=batch['id'],
src_tokens=batch['net_input']['src_tokens'], src_lengths=batch['net_input']['src_lengths'],
) | null |
19,272 | from collections import namedtuple
import fileinput
import logging
import math
import sys
import os
import torch
from fairseq import checkpoint_utils, options, tasks, utils
from fairseq.data import encoders
def main(args):
utils.import_user_module(args)
if args.buffer_size < 1:
args.buffer_size = 1
if args.max_tokens is None and args.max_sentences is None:
args.max_sentences = 1
assert not args.sampling or args.nbest == args.beam, \
'--sampling requires --nbest to be equal to --beam'
assert not args.max_sentences or args.max_sentences <= args.buffer_size, \
'--max-sentences/--batch-size cannot be larger than --buffer-size'
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
# Setup task, e.g., translation
task = tasks.setup_task(args)
# Load ensemble
logger.info('loading model(s) from {}'.format(args.path))
models, _model_args = checkpoint_utils.load_model_ensemble(
args.path.split(os.pathsep),
arg_overrides=eval(args.model_overrides),
task=task,
)
# Set dictionaries
src_dict = task.source_dictionary
tgt_dict = task.target_dictionary
# Optimize ensemble for generation
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
# Initialize generator
generator = task.build_generator(models, args)
# Handle tokenization and BPE
tokenizer = encoders.build_tokenizer(args)
bpe = encoders.build_bpe(args)
def encode_fn(x):
if tokenizer is not None:
x = tokenizer.encode(x)
if bpe is not None:
x = bpe.encode(x)
return x
def decode_fn(x):
if bpe is not None:
x = bpe.decode(x)
if tokenizer is not None:
x = tokenizer.decode(x)
return x
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(args.replace_unk)
max_positions = utils.resolve_max_positions(
task.max_positions(),
*[model.max_positions() for model in models]
)
if args.buffer_size > 1:
logger.info('Sentence buffer size: %s', args.buffer_size)
logger.info('NOTE: hypothesis and token scores are output in base 2')
logger.info('Type the input sentence and press return:')
start_id = 0
for inputs in buffered_read(args.input, args.buffer_size):
results = []
for batch in make_batches(inputs, args, task, max_positions, encode_fn):
src_tokens = batch.src_tokens
src_lengths = batch.src_lengths
if use_cuda:
src_tokens = src_tokens.cuda()
src_lengths = src_lengths.cuda()
sample = {
'net_input': {
'src_tokens': src_tokens,
'src_lengths': src_lengths,
},
}
translations = task.inference_step(generator, models, sample)
for i, (id, hypos) in enumerate(zip(batch.ids.tolist(), translations)):
src_tokens_i = utils.strip_pad(src_tokens[i], tgt_dict.pad())
results.append((start_id + id, src_tokens_i, hypos))
# sort output to match input order
for id, src_tokens, hypos in sorted(results, key=lambda x: x[0]):
if src_dict is not None:
src_str = src_dict.string(src_tokens, args.remove_bpe)
print('S-{}\t{}'.format(id, src_str))
# Process top predictions
for hypo in hypos[:min(len(hypos), args.nbest)]:
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo['tokens'].int().cpu(),
src_str=src_str,
alignment=hypo['alignment'],
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=args.remove_bpe,
)
detok_hypo_str = decode_fn(hypo_str)
score = hypo['score'] / math.log(2) # convert to base 2
# original hypothesis (after tokenization and BPE)
print('H-{}\t{}\t{}'.format(id, score, hypo_str))
# detokenized hypothesis
print('D-{}\t{}\t{}'.format(id, score, detok_hypo_str))
print('P-{}\t{}'.format(
id,
' '.join(map(
lambda x: '{:.4f}'.format(x),
# convert from base e to base 2
hypo['positional_scores'].div_(math.log(2)).tolist(),
))
))
if args.print_alignment:
alignment_str = " ".join(["{}-{}".format(src, tgt) for src, tgt in alignment])
print('A-{}\t{}'.format(
id,
alignment_str
))
# update running id counter
start_id += len(inputs)
def cli_main():
parser = options.get_generation_parser(interactive=True)
args = options.parse_args_and_arch(parser)
main(args) | null |
19,273 | from itertools import chain
import logging
import sys
import torch
from fairseq import checkpoint_utils, distributed_utils, options, utils
from fairseq.logging import metrics, progress_bar
def main(args, override_args=None):
utils.import_user_module(args)
assert args.max_tokens is not None or args.max_sentences is not None, \
'Must specify batch size either with --max-tokens or --max-sentences'
use_fp16 = args.fp16
use_cuda = torch.cuda.is_available() and not args.cpu
if use_cuda:
torch.cuda.set_device(args.device_id)
if override_args is not None:
overrides = vars(override_args)
overrides.update(eval(getattr(override_args, 'model_overrides', '{}')))
else:
overrides = None
# Load ensemble
logger.info('loading model(s) from {}'.format(args.path))
models, model_args, task = checkpoint_utils.load_model_ensemble_and_task(
[args.path],
arg_overrides=overrides,
suffix=getattr(args, "checkpoint_suffix", ""),
)
model = models[0]
# Move models to GPU
for model in models:
if use_fp16:
model.half()
if use_cuda:
model.cuda()
# Print args
logger.info(model_args)
# Build criterion
criterion = task.build_criterion(model_args)
criterion.eval()
for subset in args.valid_subset.split(','):
try:
task.load_dataset(subset, combine=False, epoch=1)
dataset = task.dataset(subset)
except KeyError:
raise Exception('Cannot find dataset: ' + subset)
# Initialize data iterator
itr = task.get_batch_iterator(
dataset=dataset,
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[m.max_positions() for m in models],
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
seed=args.seed,
num_shards=args.distributed_world_size,
shard_id=args.distributed_rank,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
prefix=f"valid on '{subset}' subset",
default_log_format=('tqdm' if not args.no_progress_bar else 'simple'),
)
log_outputs = []
for i, sample in enumerate(progress):
sample = utils.move_to_cuda(sample) if use_cuda else sample
_loss, _sample_size, log_output = task.valid_step(sample, model, criterion)
progress.log(log_output, step=i)
log_outputs.append(log_output)
if args.distributed_world_size > 1:
log_outputs = distributed_utils.all_gather_list(
log_outputs,
max_size=getattr(args, 'all_gather_list_size', 16384),
)
log_outputs = list(chain.from_iterable(log_outputs))
with metrics.aggregate() as agg:
task.reduce_metrics(log_outputs, criterion)
log_output = agg.get_smoothed_values()
progress.print(log_output, tag=subset, step=i)
def cli_main():
parser = options.get_validation_parser()
args = options.parse_args_and_arch(parser)
# only override args that are explicitly given on the command line
override_parser = options.get_validation_parser()
override_args = options.parse_args_and_arch(override_parser, suppress_defaults=True)
distributed_utils.call_main(args, main, override_args=override_args) | null |
19,274 | from collections import Counter
from itertools import zip_longest
import logging
from multiprocessing import Pool
import os
import shutil
import sys
from fairseq import options, tasks, utils
from fairseq.data import indexed_dataset
from fairseq.binarizer import Binarizer
def dataset_dest_file(args, output_prefix, lang, extension):
base = dataset_dest_prefix(args, output_prefix, lang)
return "{}.{}".format(base, extension)
class Binarizer:
def binarize(
filename,
dict,
consumer,
tokenize=tokenize_line,
append_eos=True,
reverse_order=False,
offset=0,
end=-1,
already_numberized=False,
):
nseq, ntok = 0, 0
replaced = Counter()
def replaced_consumer(word, idx):
if idx == dict.unk_index and word != dict.unk_word:
replaced.update([word])
with open(filename, "r", encoding="utf-8") as f:
f.seek(offset)
# next(f) breaks f.tell(), hence readline() must be used
line = safe_readline(f)
while line:
if end > 0 and f.tell() > end:
break
if already_numberized:
id_strings = line.strip().split()
id_list = [int(id_string) for id_string in id_strings]
if reverse_order:
id_list.reverse()
if append_eos:
id_list.append(dict.eos())
ids = torch.IntTensor(id_list)
else:
ids = dict.encode_line(
line=line,
line_tokenizer=tokenize,
add_if_not_exist=False,
consumer=replaced_consumer,
append_eos=append_eos,
reverse_order=reverse_order,
)
nseq += 1
ntok += len(ids)
consumer(ids)
line = f.readline()
return {
"nseq": nseq,
"nunk": sum(replaced.values()),
"ntok": ntok,
"replaced": replaced,
}
def binarize_alignments(filename, alignment_parser, consumer, offset=0, end=-1):
nseq = 0
with open(filename, "r") as f:
f.seek(offset)
line = safe_readline(f)
while line:
if end > 0 and f.tell() > end:
break
ids = alignment_parser(line)
nseq += 1
consumer(ids)
line = f.readline()
return {"nseq": nseq}
def find_offsets(filename, num_chunks):
with open(filename, "r", encoding="utf-8") as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_chunks
offsets = [0 for _ in range(num_chunks + 1)]
for i in range(1, num_chunks):
f.seek(chunk_size * i)
safe_readline(f)
offsets[i] = f.tell()
return offsets
def binarize(args, filename, vocab, output_prefix, lang, offset, end, append_eos=True):
ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, lang, "bin"),
impl=args.dataset_impl, vocab_size=len(vocab))
def consumer(tensor):
ds.add_item(tensor)
res = Binarizer.binarize(filename, vocab, consumer, append_eos=append_eos,
offset=offset, end=end)
ds.finalize(dataset_dest_file(args, output_prefix, lang, "idx"))
return res | null |
19,275 | from collections import Counter
from itertools import zip_longest
import logging
from multiprocessing import Pool
import os
import shutil
import sys
from fairseq import options, tasks, utils
from fairseq.data import indexed_dataset
from fairseq.binarizer import Binarizer
def dataset_dest_file(args, output_prefix, lang, extension):
base = dataset_dest_prefix(args, output_prefix, lang)
return "{}.{}".format(base, extension)
class Binarizer:
def binarize(
filename,
dict,
consumer,
tokenize=tokenize_line,
append_eos=True,
reverse_order=False,
offset=0,
end=-1,
already_numberized=False,
):
nseq, ntok = 0, 0
replaced = Counter()
def replaced_consumer(word, idx):
if idx == dict.unk_index and word != dict.unk_word:
replaced.update([word])
with open(filename, "r", encoding="utf-8") as f:
f.seek(offset)
# next(f) breaks f.tell(), hence readline() must be used
line = safe_readline(f)
while line:
if end > 0 and f.tell() > end:
break
if already_numberized:
id_strings = line.strip().split()
id_list = [int(id_string) for id_string in id_strings]
if reverse_order:
id_list.reverse()
if append_eos:
id_list.append(dict.eos())
ids = torch.IntTensor(id_list)
else:
ids = dict.encode_line(
line=line,
line_tokenizer=tokenize,
add_if_not_exist=False,
consumer=replaced_consumer,
append_eos=append_eos,
reverse_order=reverse_order,
)
nseq += 1
ntok += len(ids)
consumer(ids)
line = f.readline()
return {
"nseq": nseq,
"nunk": sum(replaced.values()),
"ntok": ntok,
"replaced": replaced,
}
def binarize_alignments(filename, alignment_parser, consumer, offset=0, end=-1):
nseq = 0
with open(filename, "r") as f:
f.seek(offset)
line = safe_readline(f)
while line:
if end > 0 and f.tell() > end:
break
ids = alignment_parser(line)
nseq += 1
consumer(ids)
line = f.readline()
return {"nseq": nseq}
def find_offsets(filename, num_chunks):
with open(filename, "r", encoding="utf-8") as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_chunks
offsets = [0 for _ in range(num_chunks + 1)]
for i in range(1, num_chunks):
f.seek(chunk_size * i)
safe_readline(f)
offsets[i] = f.tell()
return offsets
def binarize_alignments(args, filename, parse_alignment, output_prefix, offset, end):
ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, None, "bin"),
impl=args.dataset_impl, vocab_size=None)
def consumer(tensor):
ds.add_item(tensor)
res = Binarizer.binarize_alignments(filename, parse_alignment, consumer, offset=offset,
end=end)
ds.finalize(dataset_dest_file(args, output_prefix, None, "idx"))
return res | null |
19,276 | from collections import Counter
from itertools import zip_longest
import logging
from multiprocessing import Pool
import os
import shutil
import sys
from fairseq import options, tasks, utils
from fairseq.data import indexed_dataset
from fairseq.binarizer import Binarizer
class Binarizer:
def binarize(
filename,
dict,
consumer,
tokenize=tokenize_line,
append_eos=True,
reverse_order=False,
offset=0,
end=-1,
already_numberized=False,
):
nseq, ntok = 0, 0
replaced = Counter()
def replaced_consumer(word, idx):
if idx == dict.unk_index and word != dict.unk_word:
replaced.update([word])
with open(filename, "r", encoding="utf-8") as f:
f.seek(offset)
# next(f) breaks f.tell(), hence readline() must be used
line = safe_readline(f)
while line:
if end > 0 and f.tell() > end:
break
if already_numberized:
id_strings = line.strip().split()
id_list = [int(id_string) for id_string in id_strings]
if reverse_order:
id_list.reverse()
if append_eos:
id_list.append(dict.eos())
ids = torch.IntTensor(id_list)
else:
ids = dict.encode_line(
line=line,
line_tokenizer=tokenize,
add_if_not_exist=False,
consumer=replaced_consumer,
append_eos=append_eos,
reverse_order=reverse_order,
)
nseq += 1
ntok += len(ids)
consumer(ids)
line = f.readline()
return {
"nseq": nseq,
"nunk": sum(replaced.values()),
"ntok": ntok,
"replaced": replaced,
}
def binarize_alignments(filename, alignment_parser, consumer, offset=0, end=-1):
nseq = 0
with open(filename, "r") as f:
f.seek(offset)
line = safe_readline(f)
while line:
if end > 0 and f.tell() > end:
break
ids = alignment_parser(line)
nseq += 1
consumer(ids)
line = f.readline()
return {"nseq": nseq}
def find_offsets(filename, num_chunks):
with open(filename, "r", encoding="utf-8") as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_chunks
offsets = [0 for _ in range(num_chunks + 1)]
for i in range(1, num_chunks):
f.seek(chunk_size * i)
safe_readline(f)
offsets[i] = f.tell()
return offsets
def get_offsets(input_file, num_workers):
return Binarizer.find_offsets(input_file, num_workers) | null |
19,277 | from collections import Counter
from itertools import zip_longest
import logging
from multiprocessing import Pool
import os
import shutil
import sys
from fairseq import options, tasks, utils
from fairseq.data import indexed_dataset
from fairseq.binarizer import Binarizer
def main(args):
utils.import_user_module(args)
os.makedirs(args.destdir, exist_ok=True)
logger.addHandler(logging.FileHandler(
filename=os.path.join(args.destdir, 'preprocess.log'),
))
logger.info(args)
task = tasks.get_task(args.task)
def train_path(lang):
return "{}{}".format(args.trainpref, ("." + lang) if lang else "")
def file_name(prefix, lang):
fname = prefix
if lang is not None:
fname += ".{lang}".format(lang=lang)
return fname
def dest_path(prefix, lang):
return os.path.join(args.destdir, file_name(prefix, lang))
def dict_path(lang):
return dest_path("dict", lang) + ".txt"
def build_dictionary(filenames, src=False, tgt=False):
assert src ^ tgt
return task.build_dictionary(
filenames,
workers=args.workers,
threshold=args.thresholdsrc if src else args.thresholdtgt,
nwords=args.nwordssrc if src else args.nwordstgt,
padding_factor=args.padding_factor,
)
target = not args.only_source
if not args.srcdict and os.path.exists(dict_path(args.source_lang)):
raise FileExistsError(dict_path(args.source_lang))
if target and not args.tgtdict and os.path.exists(dict_path(args.target_lang)):
raise FileExistsError(dict_path(args.target_lang))
if args.joined_dictionary:
assert not args.srcdict or not args.tgtdict, \
"cannot use both --srcdict and --tgtdict with --joined-dictionary"
if args.srcdict:
src_dict = task.load_dictionary(args.srcdict)
elif args.tgtdict:
src_dict = task.load_dictionary(args.tgtdict)
else:
assert args.trainpref, "--trainpref must be set if --srcdict is not specified"
src_dict = build_dictionary(
{train_path(lang) for lang in [args.source_lang, args.target_lang]}, src=True
)
tgt_dict = src_dict
else:
if args.srcdict:
src_dict = task.load_dictionary(args.srcdict)
else:
assert args.trainpref, "--trainpref must be set if --srcdict is not specified"
src_dict = build_dictionary([train_path(args.source_lang)], src=True)
if target:
if args.tgtdict:
tgt_dict = task.load_dictionary(args.tgtdict)
else:
assert args.trainpref, "--trainpref must be set if --tgtdict is not specified"
tgt_dict = build_dictionary([train_path(args.target_lang)], tgt=True)
else:
tgt_dict = None
src_dict.save(dict_path(args.source_lang))
if target and tgt_dict is not None:
tgt_dict.save(dict_path(args.target_lang))
def make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers):
logger.info("[{}] Dictionary: {} types".format(lang, len(vocab) - 1))
n_seq_tok = [0, 0]
replaced = Counter()
def merge_result(worker_result):
replaced.update(worker_result["replaced"])
n_seq_tok[0] += worker_result["nseq"]
n_seq_tok[1] += worker_result["ntok"]
input_file = "{}{}".format(
input_prefix, ("." + lang) if lang is not None else ""
)
offsets = Binarizer.find_offsets(input_file, num_workers)
pool = None
if num_workers > 1:
pool = Pool(processes=num_workers - 1)
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
pool.apply_async(
binarize,
(
args,
input_file,
vocab,
prefix,
lang,
offsets[worker_id],
offsets[worker_id + 1]
),
callback=merge_result
)
pool.close()
ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, lang, "bin"),
impl=args.dataset_impl, vocab_size=len(vocab))
merge_result(
Binarizer.binarize(
input_file, vocab, lambda t: ds.add_item(t),
offset=0, end=offsets[1]
)
)
if num_workers > 1:
pool.join()
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
temp_file_path = dataset_dest_prefix(args, prefix, lang)
ds.merge_file_(temp_file_path)
os.remove(indexed_dataset.data_file_path(temp_file_path))
os.remove(indexed_dataset.index_file_path(temp_file_path))
ds.finalize(dataset_dest_file(args, output_prefix, lang, "idx"))
logger.info(
"[{}] {}: {} sents, {} tokens, {:.3}% replaced by {}".format(
lang,
input_file,
n_seq_tok[0],
n_seq_tok[1],
100 * sum(replaced.values()) / n_seq_tok[1],
vocab.unk_word,
)
)
def make_binary_alignment_dataset(input_prefix, output_prefix, num_workers):
nseq = [0]
def merge_result(worker_result):
nseq[0] += worker_result['nseq']
input_file = input_prefix
offsets = Binarizer.find_offsets(input_file, num_workers)
pool = None
if num_workers > 1:
pool = Pool(processes=num_workers - 1)
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
pool.apply_async(
binarize_alignments,
(
args,
input_file,
utils.parse_alignment,
prefix,
offsets[worker_id],
offsets[worker_id + 1]
),
callback=merge_result
)
pool.close()
ds = indexed_dataset.make_builder(dataset_dest_file(args, output_prefix, None, "bin"),
impl=args.dataset_impl)
merge_result(
Binarizer.binarize_alignments(
input_file, utils.parse_alignment, lambda t: ds.add_item(t),
offset=0, end=offsets[1]
)
)
if num_workers > 1:
pool.join()
for worker_id in range(1, num_workers):
prefix = "{}{}".format(output_prefix, worker_id)
temp_file_path = dataset_dest_prefix(args, prefix, None)
ds.merge_file_(temp_file_path)
os.remove(indexed_dataset.data_file_path(temp_file_path))
os.remove(indexed_dataset.index_file_path(temp_file_path))
ds.finalize(dataset_dest_file(args, output_prefix, None, "idx"))
logger.info(
"[alignments] {}: parsed {} alignments".format(
input_file,
nseq[0]
)
)
def make_dataset(vocab, input_prefix, output_prefix, lang, num_workers=1):
if args.dataset_impl == "raw":
# Copy original text file to destination folder
output_text_file = dest_path(
output_prefix + ".{}-{}".format(args.source_lang, args.target_lang),
lang,
)
shutil.copyfile(file_name(input_prefix, lang), output_text_file)
else:
make_binary_dataset(vocab, input_prefix, output_prefix, lang, num_workers)
def make_all(lang, vocab):
if args.trainpref:
make_dataset(vocab, args.trainpref, "train", lang, num_workers=args.workers)
if args.validpref:
for k, validpref in enumerate(args.validpref.split(",")):
outprefix = "valid{}".format(k) if k > 0 else "valid"
make_dataset(vocab, validpref, outprefix, lang, num_workers=args.workers)
if args.testpref:
for k, testpref in enumerate(args.testpref.split(",")):
outprefix = "test{}".format(k) if k > 0 else "test"
make_dataset(vocab, testpref, outprefix, lang, num_workers=args.workers)
def make_all_alignments():
if args.trainpref and os.path.exists(args.trainpref + "." + args.align_suffix):
make_binary_alignment_dataset(args.trainpref + "." + args.align_suffix, "train.align", num_workers=args.workers)
if args.validpref and os.path.exists(args.validpref + "." + args.align_suffix):
make_binary_alignment_dataset(args.validpref + "." + args.align_suffix, "valid.align", num_workers=args.workers)
if args.testpref and os.path.exists(args.testpref + "." + args.align_suffix):
make_binary_alignment_dataset(args.testpref + "." + args.align_suffix, "test.align", num_workers=args.workers)
make_all(args.source_lang, src_dict)
if target:
make_all(args.target_lang, tgt_dict)
if args.align_suffix:
make_all_alignments()
logger.info("Wrote preprocessed data to {}".format(args.destdir))
if args.alignfile:
assert args.trainpref, "--trainpref must be set if --alignfile is specified"
src_file_name = train_path(args.source_lang)
tgt_file_name = train_path(args.target_lang)
freq_map = {}
with open(args.alignfile, "r", encoding='utf-8') as align_file:
with open(src_file_name, "r", encoding='utf-8') as src_file:
with open(tgt_file_name, "r", encoding='utf-8') as tgt_file:
for a, s, t in zip_longest(align_file, src_file, tgt_file):
si = src_dict.encode_line(s, add_if_not_exist=False)
ti = tgt_dict.encode_line(t, add_if_not_exist=False)
ai = list(map(lambda x: tuple(x.split("-")), a.split()))
for sai, tai in ai:
srcidx = si[int(sai)]
tgtidx = ti[int(tai)]
if srcidx != src_dict.unk() and tgtidx != tgt_dict.unk():
assert srcidx != src_dict.pad()
assert srcidx != src_dict.eos()
assert tgtidx != tgt_dict.pad()
assert tgtidx != tgt_dict.eos()
if srcidx not in freq_map:
freq_map[srcidx] = {}
if tgtidx not in freq_map[srcidx]:
freq_map[srcidx][tgtidx] = 1
else:
freq_map[srcidx][tgtidx] += 1
align_dict = {}
for srcidx in freq_map.keys():
align_dict[srcidx] = max(freq_map[srcidx], key=freq_map[srcidx].get)
with open(
os.path.join(
args.destdir,
"alignment.{}-{}.txt".format(args.source_lang, args.target_lang),
),
"w", encoding='utf-8'
) as f:
for k, v in align_dict.items():
print("{} {}".format(src_dict[k], tgt_dict[v]), file=f)
def cli_main():
parser = options.get_preprocessing_parser()
args = parser.parse_args()
main(args) | null |
19,278 | import argparse
import os
import sys
from fairseq import bleu
from fairseq.data import dictionary
def get_parser():
parser = argparse.ArgumentParser(description='Command-line script for BLEU scoring.')
# fmt: off
parser.add_argument('-s', '--sys', default='-', help='system output')
parser.add_argument('-r', '--ref', required=True, help='references')
parser.add_argument('-o', '--order', default=4, metavar='N',
type=int, help='consider ngrams up to this order')
parser.add_argument('--ignore-case', action='store_true',
help='case-insensitive scoring')
parser.add_argument('--sacrebleu', action='store_true',
help='score with sacrebleu')
parser.add_argument('--sentence-bleu', action='store_true',
help='report sentence-level BLEUs (i.e., with +1 smoothing)')
# fmt: on
return parser
def cli_main():
parser = get_parser()
args = parser.parse_args()
print(args)
assert args.sys == '-' or os.path.exists(args.sys), \
"System output file {} does not exist".format(args.sys)
assert os.path.exists(args.ref), \
"Reference file {} does not exist".format(args.ref)
dict = dictionary.Dictionary()
def readlines(fd):
for line in fd.readlines():
if args.ignore_case:
yield line.lower()
else:
yield line
if args.sacrebleu:
import sacrebleu
def score(fdsys):
with open(args.ref) as fdref:
print(sacrebleu.corpus_bleu(fdsys, [fdref]))
elif args.sentence_bleu:
def score(fdsys):
with open(args.ref) as fdref:
scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk())
for i, (sys_tok, ref_tok) in enumerate(zip(readlines(fdsys), readlines(fdref))):
scorer.reset(one_init=True)
sys_tok = dict.encode_line(sys_tok)
ref_tok = dict.encode_line(ref_tok)
scorer.add(ref_tok, sys_tok)
print(i, scorer.result_string(args.order))
else:
def score(fdsys):
with open(args.ref) as fdref:
scorer = bleu.Scorer(dict.pad(), dict.eos(), dict.unk())
for sys_tok, ref_tok in zip(readlines(fdsys), readlines(fdref)):
sys_tok = dict.encode_line(sys_tok)
ref_tok = dict.encode_line(ref_tok)
scorer.add(ref_tok, sys_tok)
print(scorer.result_string(args.order))
if args.sys == '-':
score(sys.stdin)
else:
with open(args.sys, 'r') as f:
score(f) | null |
19,279 | import logging
import math
import os
import torch
from fairseq import checkpoint_utils, options, tasks, utils
from fairseq.data import LMContextWindowDataset
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from fairseq.sequence_scorer import SequenceScorer
from fairseq.options import add_distributed_training_args
from fairseq import distributed_utils
def main(parsed_args, **unused_kwargs):
def cli_main():
parser = options.get_eval_lm_parser()
args = options.parse_args_and_arch(parser)
distributed_utils.call_main(args, main) | null |
19,280 | import logging
import math
import os
import sys
import torch
from fairseq import bleu, checkpoint_utils, options, tasks, utils
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from fairseq.data import encoders
def progress_bar(
iterator,
log_format: Optional[str] = None,
log_interval: int = 100,
epoch: Optional[int] = None,
prefix: Optional[str] = None,
tensorboard_logdir: Optional[str] = None,
default_log_format: str = 'tqdm',
):
if log_format is None:
log_format = default_log_format
if log_format == 'tqdm' and not sys.stderr.isatty():
log_format = 'simple'
if log_format == 'json':
bar = JsonProgressBar(iterator, epoch, prefix, log_interval)
elif log_format == 'none':
bar = NoopProgressBar(iterator, epoch, prefix)
elif log_format == 'simple':
bar = SimpleProgressBar(iterator, epoch, prefix, log_interval)
elif log_format == 'tqdm':
bar = TqdmProgressBar(iterator, epoch, prefix)
else:
raise ValueError('Unknown log format: {}'.format(log_format))
if tensorboard_logdir:
try:
# [FB only] custom wrapper for TensorBoard
import palaas # noqa
from .fb_tbmf_wrapper import FbTbmfWrapper
bar = FbTbmfWrapper(bar, log_interval)
except ImportError:
bar = TensorboardProgressBarWrapper(bar, tensorboard_logdir)
return bar
class TimeMeter(Meter):
"""Computes the average occurrence of some event per second"""
def __init__(
self,
init: int = 0,
n: int = 0,
round: Optional[int] = None,
):
self.round = round
self.reset(init, n)
def reset(self, init=0, n=0):
self.init = init
self.start = time.perf_counter()
self.n = n
self.i = 0
def update(self, val=1):
self.n = type_as(self.n, val) + val
self.i += 1
def state_dict(self):
return {
'init': self.elapsed_time,
'n': self.n,
'round': self.round,
}
def load_state_dict(self, state_dict):
if 'start' in state_dict:
# backwards compatibility for old state_dicts
self.reset(init=state_dict['init'])
else:
self.reset(init=state_dict['init'], n=state_dict['n'])
self.round = state_dict.get('round', None)
def avg(self):
return self.n / self.elapsed_time
def elapsed_time(self):
return self.init + (time.perf_counter() - self.start)
def smoothed_value(self) -> float:
val = self.avg
if self.round is not None and val is not None:
val = safe_round(val, self.round)
return val
class StopwatchMeter(Meter):
"""Computes the sum/avg duration of some event in seconds"""
def __init__(self, round: Optional[int] = None):
self.round = round
self.sum = 0
self.n = 0
self.start_time = None
def start(self):
self.start_time = time.perf_counter()
def stop(self, n=1, prehook=None):
if self.start_time is not None:
if prehook is not None:
prehook()
delta = time.perf_counter() - self.start_time
self.sum = self.sum + delta
self.n = type_as(self.n, n) + n
def reset(self):
self.sum = 0 # cumulative time during which stopwatch was active
self.n = 0 # total n across all start/stop
self.start()
def state_dict(self):
return {
'sum': self.sum,
'n': self.n,
'round': self.round,
}
def load_state_dict(self, state_dict):
self.sum = state_dict['sum']
self.n = state_dict['n']
self.start_time = None
self.round = state_dict.get('round', None)
def avg(self):
return self.sum / self.n if self.n > 0 else self.sum
def elapsed_time(self):
if self.start_time is None:
return 0.
return time.perf_counter() - self.start_time
def smoothed_value(self) -> float:
val = self.avg if self.sum > 0 else self.elapsed_time
if self.round is not None and val is not None:
val = safe_round(val, self.round)
return val
def _main(args, output_file):
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.INFO,
stream=output_file,
)
logger = logging.getLogger('fairseq_cli.generate')
utils.import_user_module(args)
if args.max_tokens is None and args.max_sentences is None:
args.max_tokens = 12000
logger.info(args)
use_cuda = torch.cuda.is_available() and not args.cpu
# Load dataset splits
task = tasks.setup_task(args)
task.load_dataset(args.gen_subset)
# Set dictionaries
try:
src_dict = getattr(task, 'source_dictionary', None)
except NotImplementedError:
src_dict = None
tgt_dict = task.target_dictionary
# Load ensemble
logger.info('loading model(s) from {}'.format(args.path))
models, _model_args = checkpoint_utils.load_model_ensemble(
utils.split_paths(args.path),
arg_overrides=eval(args.model_overrides),
task=task,
)
# Optimize ensemble for generation
for model in models:
model.make_generation_fast_(
beamable_mm_beam_size=None if args.no_beamable_mm else args.beam,
need_attn=args.print_alignment,
)
if args.fp16:
model.half()
if use_cuda:
model.cuda()
# Load alignment dictionary for unknown word replacement
# (None if no unknown word replacement, empty if no path to align dictionary)
align_dict = utils.load_align_dict(args.replace_unk)
# Load dataset (possibly sharded)
itr = task.get_batch_iterator(
dataset=task.dataset(args.gen_subset),
max_tokens=args.max_tokens,
max_sentences=args.max_sentences,
max_positions=utils.resolve_max_positions(
task.max_positions(),
*[model.max_positions() for model in models]
),
ignore_invalid_inputs=args.skip_invalid_size_inputs_valid_test,
required_batch_size_multiple=args.required_batch_size_multiple,
num_shards=args.num_shards,
shard_id=args.shard_id,
num_workers=args.num_workers,
).next_epoch_itr(shuffle=False)
progress = progress_bar.progress_bar(
itr,
log_format=args.log_format,
log_interval=args.log_interval,
default_log_format=('tqdm' if not args.no_progress_bar else 'none'),
)
# Initialize generator
gen_timer = StopwatchMeter()
generator = task.build_generator(models, args)
# Handle tokenization and BPE
tokenizer = encoders.build_tokenizer(args)
bpe = encoders.build_bpe(args)
def decode_fn(x):
if bpe is not None:
x = bpe.decode(x)
if tokenizer is not None:
x = tokenizer.decode(x)
return x
# Generate and compute BLEU score
if args.sacrebleu:
scorer = bleu.SacrebleuScorer()
else:
scorer = bleu.Scorer(tgt_dict.pad(), tgt_dict.eos(), tgt_dict.unk())
num_sentences = 0
has_target = True
wps_meter = TimeMeter()
for sample in progress:
sample = utils.move_to_cuda(sample) if use_cuda else sample
if 'net_input' not in sample:
continue
prefix_tokens = None
if args.prefix_size > 0:
prefix_tokens = sample['target'][:, :args.prefix_size]
gen_timer.start()
hypos = task.inference_step(generator, models, sample, prefix_tokens)
num_generated_tokens = sum(len(h[0]['tokens']) for h in hypos)
gen_timer.stop(num_generated_tokens)
for i, sample_id in enumerate(sample['id'].tolist()):
has_target = sample['target'] is not None
# Remove padding
src_tokens = utils.strip_pad(sample['net_input']['src_tokens'][i, :], tgt_dict.pad())
target_tokens = None
if has_target:
target_tokens = utils.strip_pad(sample['target'][i, :], tgt_dict.pad()).int().cpu()
# Either retrieve the original sentences or regenerate them from tokens.
if align_dict is not None:
src_str = task.dataset(args.gen_subset).src.get_original_text(sample_id)
target_str = task.dataset(args.gen_subset).tgt.get_original_text(sample_id)
else:
if src_dict is not None:
src_str = src_dict.string(src_tokens, args.remove_bpe)
else:
src_str = ""
if has_target:
target_str = tgt_dict.string(
target_tokens,
args.remove_bpe,
escape_unk=True,
extra_symbols_to_ignore={
generator.eos,
}
)
src_str = decode_fn(src_str)
if has_target:
target_str = decode_fn(target_str)
if not args.quiet:
if src_dict is not None:
print('S-{}\t{}'.format(sample_id, src_str), file=output_file)
if has_target:
print('T-{}\t{}'.format(sample_id, target_str), file=output_file)
# Process top predictions
for j, hypo in enumerate(hypos[i][:args.nbest]):
hypo_tokens, hypo_str, alignment = utils.post_process_prediction(
hypo_tokens=hypo['tokens'].int().cpu(),
src_str=src_str,
alignment=hypo['alignment'],
align_dict=align_dict,
tgt_dict=tgt_dict,
remove_bpe=args.remove_bpe,
extra_symbols_to_ignore={
generator.eos,
}
)
detok_hypo_str = decode_fn(hypo_str)
if not args.quiet:
score = hypo['score'] / math.log(2) # convert to base 2
# original hypothesis (after tokenization and BPE)
print('H-{}\t{}\t{}'.format(sample_id, score, hypo_str), file=output_file)
# detokenized hypothesis
print('D-{}\t{}\t{}'.format(sample_id, score, detok_hypo_str), file=output_file)
print('P-{}\t{}'.format(
sample_id,
' '.join(map(
lambda x: '{:.4f}'.format(x),
# convert from base e to base 2
hypo['positional_scores'].div_(math.log(2)).tolist(),
))
), file=output_file)
if args.print_alignment:
print('A-{}\t{}'.format(
sample_id,
' '.join(['{}-{}'.format(src_idx, tgt_idx) for src_idx, tgt_idx in alignment])
), file=output_file)
if args.print_step:
print('I-{}\t{}'.format(sample_id, hypo['steps']), file=output_file)
if getattr(args, 'retain_iter_history', False):
for step, h in enumerate(hypo['history']):
_, h_str, _ = utils.post_process_prediction(
hypo_tokens=h['tokens'].int().cpu(),
src_str=src_str,
alignment=None,
align_dict=None,
tgt_dict=tgt_dict,
remove_bpe=None,
)
print('E-{}_{}\t{}'.format(sample_id, step, h_str), file=output_file)
# Score only the top hypothesis
if has_target and j == 0:
if align_dict is not None or args.remove_bpe is not None:
# Convert back to tokens for evaluation with unk replacement and/or without BPE
target_tokens = tgt_dict.encode_line(target_str, add_if_not_exist=True)
hypo_tokens = tgt_dict.encode_line(detok_hypo_str, add_if_not_exist=True)
if hasattr(scorer, 'add_string'):
scorer.add_string(target_str, detok_hypo_str)
else:
scorer.add(target_tokens, hypo_tokens)
wps_meter.update(num_generated_tokens)
progress.log({'wps': round(wps_meter.avg)})
num_sentences += sample['nsentences']
logger.info('NOTE: hypothesis and token scores are output in base 2')
logger.info('Translated {} sentences ({} tokens) in {:.1f}s ({:.2f} sentences/s, {:.2f} tokens/s)'.format(
num_sentences, gen_timer.n, gen_timer.sum, num_sentences / gen_timer.sum, 1. / gen_timer.avg))
if has_target:
if args.bpe and not args.sacrebleu:
if args.remove_bpe:
logger.warning("BLEU score is being computed by splitting detokenized string on spaces, this is probably not what you want. Use --sacrebleu for standard 13a BLEU tokenization")
else:
logger.warning("If you are using BPE on the target side, the BLEU score is computed on BPE tokens, not on proper words. Use --sacrebleu for standard 13a BLEU tokenization")
logger.info('Generate {} with beam={}: {}'.format(args.gen_subset, args.beam, scorer.result_string()))
return scorer | null |
19,281 | import logging
import math
import os
import sys
import torch
from fairseq import bleu, checkpoint_utils, options, tasks, utils
from fairseq.logging import progress_bar
from fairseq.logging.meters import StopwatchMeter, TimeMeter
from fairseq.data import encoders
def main(args):
assert args.path is not None, '--path required for generation!'
assert not args.sampling or args.nbest == args.beam, \
'--sampling requires --nbest to be equal to --beam'
assert args.replace_unk is None or args.dataset_impl == 'raw', \
'--replace-unk requires a raw text dataset (--dataset-impl=raw)'
if args.results_path is not None:
os.makedirs(args.results_path, exist_ok=True)
output_path = os.path.join(args.results_path, 'generate-{}.txt'.format(args.gen_subset))
with open(output_path, 'w', buffering=1) as h:
return _main(args, h)
else:
return _main(args, sys.stdout)
def cli_main():
parser = options.get_generation_parser()
args = options.parse_args_and_arch(parser)
main(args) | null |
19,282 | from __future__ import absolute_import, division, print_function
import argparse
from transformers import BertTokenizer, XLMTokenizer, XLMRobertaTokenizer
import os
from collections import defaultdict
import csv
import random
import os
import shutil
import json
TOKENIZERS = {
'bert': BertTokenizer,
'xlm': XLMTokenizer,
'xlmr': XLMRobertaTokenizer,
}
def panx_tokenize_preprocess(args):
def _preprocess_one_file(infile, outfile, idxfile, tokenizer, max_len):
if not os.path.exists(infile):
print(f'{infile} not exists')
return 0
special_tokens_count = 3 if isinstance(tokenizer, XLMRobertaTokenizer) else 2
max_seq_len = max_len - special_tokens_count
subword_len_counter = idx = 0
with open(infile, "rt") as fin, open(outfile, "w") as fout, open(idxfile, "w") as fidx:
for line in fin:
line = line.strip()
if not line:
fout.write('\n')
fidx.write('\n')
idx += 1
subword_len_counter = 0
continue
items = line.split()
token = items[0].strip()
if len(items) == 2:
label = items[1].strip()
else:
label = 'O'
current_subwords_len = len(tokenizer.tokenize(token))
if (current_subwords_len == 0 or current_subwords_len > max_seq_len) and len(token) != 0:
token = tokenizer.unk_token
current_subwords_len = 1
if (subword_len_counter + current_subwords_len) > max_seq_len:
fout.write(f"\n{token}\t{label}\n")
fidx.write(f"\n{idx}\n")
subword_len_counter = current_subwords_len
else:
fout.write(f"{token}\t{label}\n")
fidx.write(f"{idx}\n")
subword_len_counter += current_subwords_len
return 1
model_type = args.model_type
tokenizer = TOKENIZERS[model_type].from_pretrained(args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
for lang in args.languages.split(','):
out_dir = os.path.join(args.output_dir, lang)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if args.process_all_langs:
files = ['dev', 'test', 'train']
else:
if lang == 'en':
files = ['dev', 'test', 'train']
else:
files = ['dev', 'test']
for file in files:
infile = os.path.join(args.data_dir, f'{file}-{lang}.tsv')
# if os.path.exists(args.model_name_or_path):
# args.model_name_or_path = args.model_name_or_path.split('/')[-1].replace('.', '-')
outfile = os.path.join(out_dir, "{}.{}".format(file, args.model_type))
idxfile = os.path.join(out_dir, "{}.{}.idx".format(file, args.model_type))
print(f'start preprocessing {infile}')
if os.path.exists(outfile) and os.path.exists(idxfile):
print(f'{outfile} and {idxfile} exist')
else:
code = _preprocess_one_file(infile, outfile, idxfile, tokenizer, args.max_len)
if code > 0:
print(f'finish preprocessing {outfile}') | null |
19,283 | from __future__ import absolute_import, division, print_function
import argparse
from transformers import BertTokenizer, XLMTokenizer, XLMRobertaTokenizer
import os
from collections import defaultdict
import csv
import random
import os
import shutil
import json
def panx_preprocess(args):
def _process_one_file(infile, outfile):
lines = open(infile, 'r').readlines()
if lines[-1].strip() == '':
lines = lines[:-1]
with open(outfile, 'w') as fout:
for l in lines:
items = l.strip().split('\t')
if len(items) == 2:
label = items[1].strip()
idx = items[0].find(':')
if idx != -1:
token = items[0][idx+1:].strip()
if 'test' in infile:
if args.remove_test_label:
fout.write(f'{token}\n')
else:
fout.write(f'{token}\t{label}\n')
else:
fout.write(f'{token}\t{label}\n')
else:
fout.write('\n')
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
langs = 'ar he vi id jv ms tl eu ml ta te af nl en de el bn hi mr ur fa fr it pt es bg ru ja ka ko th sw yo my zh kk tr et fi hu'.split(' ')
for lg in langs:
for split in ['train', 'test', 'dev']:
infile = os.path.join(args.data_dir, f'{lg}-{split}')
outfile = os.path.join(args.output_dir, f'{split}-{lg}.tsv')
_process_one_file(infile, outfile) | null |
19,284 | from __future__ import absolute_import, division, print_function
import argparse
from transformers import BertTokenizer, XLMTokenizer, XLMRobertaTokenizer
import os
from collections import defaultdict
import csv
import random
import os
import shutil
import json
TOKENIZERS = {
'bert': BertTokenizer,
'xlm': XLMTokenizer,
'xlmr': XLMRobertaTokenizer,
}
def udpos_tokenize_preprocess(args):
def _preprocess_one_file(infile, outfile, idxfile, tokenizer, max_len):
if not os.path.exists(infile):
print(f'{infile} does not exist')
return
subword_len_counter = idx = 0
special_tokens_count = 3 if isinstance(tokenizer, XLMRobertaTokenizer) else 2
max_seq_len = max_len - special_tokens_count
with open(infile, "rt") as fin, open(outfile, "w") as fout, open(idxfile, "w") as fidx:
for line in fin:
line = line.strip()
if len(line) == 0 or line == '':
fout.write('\n')
fidx.write('\n')
idx += 1
subword_len_counter = 0
continue
items = line.split()
if len(items) == 2:
label = items[1].strip()
else:
label = "X"
token = items[0].strip()
current_subwords_len = len(tokenizer.tokenize(token))
if (current_subwords_len == 0 or current_subwords_len > max_seq_len) and len(token) != 0:
token = tokenizer.unk_token
current_subwords_len = 1
if (subword_len_counter + current_subwords_len) > max_seq_len:
fout.write(f"\n{token}\t{label}\n")
fidx.write(f"\n{idx}\n")
subword_len_counter = current_subwords_len
else:
fout.write(f"{token}\t{label}\n")
fidx.write(f"{idx}\n")
subword_len_counter += current_subwords_len
model_type = args.model_type
tokenizer = TOKENIZERS[model_type].from_pretrained(args.model_name_or_path,
do_lower_case=args.do_lower_case,
cache_dir=args.cache_dir if args.cache_dir else None)
for lang in args.languages.split(','):
out_dir = os.path.join(args.output_dir, lang)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if args.process_all_langs:
files = ['dev', 'test', 'train']
else:
if lang == 'en':
files = ['dev', 'test', 'train']
else:
files = ['dev', 'test']
for file in files:
infile = os.path.join(args.data_dir, "{}-{}.tsv".format(file, lang))
outfile = os.path.join(out_dir, "{}.{}".format(file, args.model_type))
idxfile = os.path.join(out_dir, "{}.{}.idx".format(file, args.model_type))
print(f'start preprocessing {infile}')
if os.path.exists(outfile) and os.path.exists(idxfile):
print(f'{outfile} and {idxfile} exist')
else:
_preprocess_one_file(infile, outfile, idxfile, tokenizer, args.max_len)
print(f'finish preprocessing {outfile}') | null |
19,285 | from __future__ import absolute_import, division, print_function
import argparse
from transformers import BertTokenizer, XLMTokenizer, XLMRobertaTokenizer
import os
from collections import defaultdict
import csv
import random
import os
import shutil
import json
def udpos_preprocess(args):
def _read_one_file(file):
data = []
sent, tag, lines = [], [], []
for line in open(file, 'r'):
items = line.strip().split('\t')
if len(items) != 10:
empty = all(w == '_' for w in sent)
# if not empty:
num_empty = sum([int(w == '_') for w in sent])
if num_empty == 0 or num_empty < len(sent) - 1:
data.append((sent, tag, lines))
sent, tag, lines = [], [], []
else:
sent.append(items[1].strip())
tag.append(items[3].strip())
lines.append(line.strip())
assert len(sent) == int(items[0]), 'line={}, sent={}, tag={}'.format(line, sent, tag)
return data
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def remove_empty_space(data):
new_data = {}
for split in data:
new_data[split] = []
for sent, tag, lines in data[split]:
new_sent = [''.join(w.replace('\u200c', '').split(' ')) for w in sent]
lines = [line.replace('\u200c', '') for line in lines]
assert len(" ".join(new_sent).split(' ')) == len(tag)
new_data[split].append((new_sent, tag, lines))
return new_data
def check_file(file):
for i, l in enumerate(open(file)):
items = l.strip().split('\t')
assert len(items[0].split(' ')) == len(items[1].split(' ')), 'idx={}, line={}'.format(i, l)
def _write_files(data, output_dir, lang, suffix):
for split in data:
if len(data[split]) > 0:
prefix = os.path.join(output_dir, f'{split}-{lang}')
if suffix == 'mt':
with open(prefix + '.mt.tsv', 'w') as fout:
for idx, (sent, tag, _) in enumerate(data[split]):
newline = '\n' if idx != len(data[split]) - 1 else ''
if split == 'test':
if args.remove_test_label:
fout.write('{}{}'.format(' '.join(sent, newline)))
else:
fout.write('{}\t{}{}'.format(' '.join(sent), ' '.join(tag), newline))
else:
fout.write('{}\t{}{}'.format(' '.join(sent), ' '.join(tag), newline))
check_file(prefix + '.mt.tsv')
print(' - finish checking ' + prefix + '.mt.tsv')
elif suffix == 'tsv':
with open(prefix + '.tsv', 'w') as fout:
for sidx, (sent, tag, _) in enumerate(data[split]):
for widx, (w, t) in enumerate(zip(sent, tag)):
newline = '' if (sidx == len(data[split]) - 1) and (widx == len(sent) - 1) else '\n'
if split == 'test':
if args.remove_test_label:
fout.write('{}{}'.format(w, newline))
else:
fout.write('{}\t{}{}'.format(w, t, newline))
else:
fout.write('{}\t{}{}'.format(w, t, newline))
fout.write('\n')
elif suffix == 'conll':
with open(prefix + '.conll', 'w') as fout:
for _, _, lines in data[split]:
for l in lines:
fout.write(l.strip() + '\n')
fout.write('\n')
print(f'finish writing file to {prefix}.{suffix}')
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
languages = 'af ar bg de el en es et eu fa fi fr he hi hu id it ja kk ko mr nl pt ru ta te th tl tr ur vi yo zh'.split(' ')
for root, dirs, files in os.walk(args.data_dir):
lg = root.strip().split('/')[-1]
if root == args.data_dir or lg not in languages:
continue
data = {k: [] for k in ['train', 'dev', 'test']}
print('files before:', files)
print('sorted(files)', sorted(files))
assert files == sorted(files)
# for f in files: # TODO(note): has bug !!!!!!!
for f in sorted(files): # TODO(note): bug fix !!!!!!!
if f.endswith('conll'):
file = os.path.join(root, f)
examples = _read_one_file(file)
if 'train' in f:
data['train'].extend(examples)
elif 'dev' in f:
data['dev'].extend(examples)
elif 'test' in f:
data['test'].extend(examples)
else:
print('split not found: ', file)
print(' - finish reading {}, {}'.format(file, [(k, len(v)) for k,v in data.items()]))
data = remove_empty_space(data)
for sub in ['tsv']:
_write_files(data, args.output_dir, lg, sub) | null |
19,286 | from __future__ import absolute_import, division, print_function
import argparse
from transformers import BertTokenizer, XLMTokenizer, XLMRobertaTokenizer
import os
from collections import defaultdict
import csv
import random
import os
import shutil
import json
def pawsx_preprocess(args):
def _preprocess_one_file(infile, outfile, remove_label=False):
data = []
for i, line in enumerate(open(infile, 'r')):
if i == 0:
continue
items = line.strip().split('\t')
sent1 = ' '.join(items[1].strip().split(' '))
sent2 = ' '.join(items[2].strip().split(' '))
label = items[3]
data.append([sent1, sent2, label])
with open(outfile, 'w') as fout:
writer = csv.writer(fout, delimiter='\t')
for sent1, sent2, label in data:
if remove_label:
writer.writerow([sent1, sent2])
else:
writer.writerow([sent1, sent2, label])
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
split2file = {'train': 'train', 'test': 'test_2k', 'dev': 'dev_2k'}
for lang in ['en', 'de', 'es', 'fr', 'ja', 'ko', 'zh']:
for split in ['train', 'test', 'dev']:
if split == 'train' and lang != 'en':
continue
file = split2file[split]
infile = os.path.join(args.data_dir, lang, "{}.tsv".format(file))
outfile = os.path.join(args.output_dir, "{}-{}.tsv".format(split, lang))
removel_label = args.remove_test_label and (split == 'test')
_preprocess_one_file(infile, outfile, remove_label=removel_label)
print(f'finish preprocessing {outfile}') | null |
19,287 | from __future__ import absolute_import, division, print_function
import argparse
from transformers import BertTokenizer, XLMTokenizer, XLMRobertaTokenizer
import os
from collections import defaultdict
import csv
import random
import os
import shutil
import json
def xnli_preprocess(args):
def _preprocess_file(infile, output_dir, split, remove_label=False):
all_langs = defaultdict(list)
for i, line in enumerate(open(infile, 'r')):
if i == 0:
continue
items = line.strip().split('\t')
lang = items[0].strip()
label = "contradiction" if items[1].strip() == "contradictory" else items[1].strip()
sent1 = ' '.join(items[6].strip().split(' '))
sent2 = ' '.join(items[7].strip().split(' '))
all_langs[lang].append((sent1, sent2, label))
print(f'# langs={len(all_langs)}')
for lang, pairs in all_langs.items():
outfile = os.path.join(output_dir, '{}-{}.tsv'.format(split, lang))
with open(outfile, 'w') as fout:
writer = csv.writer(fout, delimiter='\t')
for (sent1, sent2, label) in pairs:
if remove_label:
writer.writerow([sent1, sent2])
else:
writer.writerow([sent1, sent2, label])
print(f'finish preprocess {outfile}')
def _preprocess_train_file(infile, outfile):
with open(outfile, 'w') as fout:
writer = csv.writer(fout, delimiter='\t')
for i, line in enumerate(open(infile, 'r')):
if i == 0:
continue
items = line.strip().split('\t')
sent1 = ' '.join(items[0].strip().split(' '))
sent2 = ' '.join(items[1].strip().split(' '))
label = "contradiction" if items[2].strip() == "contradictory" else items[2].strip()
writer.writerow([sent1, sent2, label])
print(f'finish preprocess {outfile}')
infile = os.path.join(args.data_dir, 'XNLI-MT-1.0/multinli/multinli.train.en.tsv')
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
outfile = os.path.join(args.output_dir, 'train-en.tsv')
_preprocess_train_file(infile, outfile)
for split in ['test', 'dev']:
infile = os.path.join(args.data_dir, 'XNLI-1.0/xnli.{}.tsv'.format(split))
print(f'reading file {infile}')
removel_label = args.remove_test_label and (split == 'test')
_preprocess_file(infile, args.output_dir, split, remove_label=removel_label) | null |
19,288 | from __future__ import absolute_import, division, print_function
import argparse
from transformers import BertTokenizer, XLMTokenizer, XLMRobertaTokenizer
import os
from collections import defaultdict
import csv
import random
import os
import shutil
import json
def tatoeba_preprocess(args):
lang3_dict = {
'afr':'af', 'ara':'ar', 'bul':'bg', 'ben':'bn',
'deu':'de', 'ell':'el', 'spa':'es', 'est':'et',
'eus':'eu', 'pes':'fa', 'fin':'fi', 'fra':'fr',
'heb':'he', 'hin':'hi', 'hun':'hu', 'ind':'id',
'ita':'it', 'jpn':'ja', 'jav':'jv', 'kat':'ka',
'kaz':'kk', 'kor':'ko', 'mal':'ml', 'mar':'mr',
'nld':'nl', 'por':'pt', 'rus':'ru', 'swh':'sw',
'tam':'ta', 'tel':'te', 'tha':'th', 'tgl':'tl',
'tur':'tr', 'urd':'ur', 'vie':'vi', 'cmn':'zh',
'eng':'en',
}
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
for sl3, sl2 in lang3_dict.items():
if sl3 != 'eng':
src_file = f'{args.data_dir}/tatoeba.{sl3}-eng.{sl3}'
tgt_file = f'{args.data_dir}/tatoeba.{sl3}-eng.eng'
src_out = f'{args.output_dir}/{sl2}-en.{sl2}'
tgt_out = f'{args.output_dir}/{sl2}-en.en'
shutil.copy(src_file, src_out)
if args.remove_test_label:
tgts = [l.strip() for l in open(tgt_file)]
idx = range(len(tgts))
data = zip(tgts, idx)
with open(tgt_out, 'w') as ftgt:
for t, i in sorted(data, key=lambda x: x[0]): # 打乱按照字母顺序重排
ftgt.write(f'{t}\n')
else:
shutil.copy(tgt_file, tgt_out) | null |
19,289 | from __future__ import absolute_import, division, print_function
import argparse
from transformers import BertTokenizer, XLMTokenizer, XLMRobertaTokenizer
import os
from collections import defaultdict
import csv
import random
import os
import shutil
import json
def remove_qa_test_annotations(test_dir):
assert os.path.exists(test_dir)
for file_name in os.listdir(test_dir):
new_data = []
test_file = os.path.join(test_dir, file_name)
with open(test_file, 'r') as f:
data = json.load(f)
version = data['version']
for doc in data['data']:
for par in doc['paragraphs']:
context = par['context']
for qa in par['qas']:
question = qa['question']
question_id = qa['id']
for answer in qa['answers']:
a_start, a_text = answer['answer_start'], answer['text']
a_end = a_start + len(a_text)
assert context[a_start:a_end] == a_text
new_data.append({'paragraphs': [{
'context': context,
'qas': [{'answers': [{'answer_start': 0, 'text': ''}],
'question': question,
'id': question_id}]}]})
with open(test_file, 'w') as f:
json.dump({'data': new_data, 'version': version}, f)
def xquad_preprocess(args):
# Remove the test annotations to prevent accidental cheating
if args.remove_test_label:
remove_qa_test_annotations(args.data_dir) | null |
19,290 | from __future__ import absolute_import, division, print_function
import argparse
from transformers import BertTokenizer, XLMTokenizer, XLMRobertaTokenizer
import os
from collections import defaultdict
import csv
import random
import os
import shutil
import json
def remove_qa_test_annotations(test_dir):
assert os.path.exists(test_dir)
for file_name in os.listdir(test_dir):
new_data = []
test_file = os.path.join(test_dir, file_name)
with open(test_file, 'r') as f:
data = json.load(f)
version = data['version']
for doc in data['data']:
for par in doc['paragraphs']:
context = par['context']
for qa in par['qas']:
question = qa['question']
question_id = qa['id']
for answer in qa['answers']:
a_start, a_text = answer['answer_start'], answer['text']
a_end = a_start + len(a_text)
assert context[a_start:a_end] == a_text
new_data.append({'paragraphs': [{
'context': context,
'qas': [{'answers': [{'answer_start': 0, 'text': ''}],
'question': question,
'id': question_id}]}]})
with open(test_file, 'w') as f:
json.dump({'data': new_data, 'version': version}, f)
def mlqa_preprocess(args):
# Remove the test annotations to prevent accidental cheating
if args.remove_test_label:
remove_qa_test_annotations(args.data_dir) | null |
19,291 | from __future__ import absolute_import, division, print_function
import argparse
from transformers import BertTokenizer, XLMTokenizer, XLMRobertaTokenizer
import os
from collections import defaultdict
import csv
import random
import os
import shutil
import json
def remove_qa_test_annotations(test_dir):
assert os.path.exists(test_dir)
for file_name in os.listdir(test_dir):
new_data = []
test_file = os.path.join(test_dir, file_name)
with open(test_file, 'r') as f:
data = json.load(f)
version = data['version']
for doc in data['data']:
for par in doc['paragraphs']:
context = par['context']
for qa in par['qas']:
question = qa['question']
question_id = qa['id']
for answer in qa['answers']:
a_start, a_text = answer['answer_start'], answer['text']
a_end = a_start + len(a_text)
assert context[a_start:a_end] == a_text
new_data.append({'paragraphs': [{
'context': context,
'qas': [{'answers': [{'answer_start': 0, 'text': ''}],
'question': question,
'id': question_id}]}]})
with open(test_file, 'w') as f:
json.dump({'data': new_data, 'version': version}, f)
def tydiqa_preprocess(args):
LANG2ISO = {'arabic': 'ar', 'bengali': 'bn', 'english': 'en', 'finnish': 'fi',
'indonesian': 'id', 'korean': 'ko', 'russian': 'ru',
'swahili': 'sw', 'telugu': 'te'}
assert os.path.exists(args.data_dir)
train_file = os.path.join(args.data_dir, 'tydiqa-goldp-v1.1-train.json')
os.makedirs(args.output_dir, exist_ok=True)
# Split the training file into language-specific files
lang2data = defaultdict(list)
with open(train_file, 'r') as f_in:
data = json.load(f_in)
version = data['version']
for doc in data['data']:
for par in doc['paragraphs']:
context = par['context']
for qa in par['qas']:
question = qa['question']
question_id = qa['id']
example_lang = question_id.split('-')[0]
q_id = question_id.split('-')[-1]
for answer in qa['answers']:
a_start, a_text = answer['answer_start'], answer['text']
a_end = a_start + len(a_text)
assert context[a_start:a_end] == a_text
lang2data[example_lang].append({'paragraphs': [{
'context': context,
'qas': [{'answers': qa['answers'],
'question': question,
'id': q_id}]}]})
for lang, data in lang2data.items():
out_file = os.path.join(
args.output_dir, 'tydiqa.%s.train.json' % LANG2ISO[lang])
with open(out_file, 'w') as f:
json.dump({'data': data, 'version': version}, f)
# Rename the dev files
dev_dir = os.path.join(args.data_dir, 'tydiqa-goldp-v1.1-dev')
assert os.path.exists(dev_dir)
for lang, iso in LANG2ISO.items():
src_file = os.path.join(dev_dir, 'tydiqa-goldp-dev-%s.json' % lang)
dst_file = os.path.join(dev_dir, 'tydiqa.%s.dev.json' % iso)
os.rename(src_file, dst_file)
# Remove the test annotations to prevent accidental cheating
if args.remove_test_label:
remove_qa_test_annotations(dev_dir) | null |
19,292 | import argparse
import glob
import logging
import os
import random
import timeit
import shutil,json
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from evaluate_squad import evaluate as squad_eval
from transformers import (
WEIGHTS_NAME,
AdamW,
get_linear_schedule_with_warmup,
XLMRobertaTokenizer
)
from transformers.data.metrics.squad_metrics import (
compute_predictions_log_probs,
compute_predictions_logits,
)
from xlm_roberta import XLMRobertaForQuestionAnswering, XLMRobertaConfig
from processors.squad import (
SquadResult,
SquadV1Processor,
SquadV2Processor,
squad_convert_examples_to_features
)
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def evaluate(args, dataset_tuple, model, tokenizer, prefix="", language='en', lang2id=None):
dataset, examples, features, ori_data = dataset_tuple
if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
os.makedirs(args.output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(dataset)
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu evaluate
if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
all_results = []
start_time = timeit.default_timer()
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": None,
}
example_indices = batch[3]
outputs = model(**inputs)
for i, example_index in enumerate(example_indices):
eval_feature = features[example_index.item()]
unique_id = int(eval_feature.unique_id)
output = [to_list(output[i]) for output in outputs]
# Some models (XLNet, XLM) use 5 arguments for their predictions, while the other "simpler"
# models only use two.
if len(output) >= 5:
start_logits = output[0]
start_top_index = output[1]
end_logits = output[2]
end_top_index = output[3]
cls_logits = output[4]
result = SquadResult(
unique_id,
start_logits,
end_logits,
start_top_index=start_top_index,
end_top_index=end_top_index,
cls_logits=cls_logits,
)
else:
start_logits, end_logits = output
result = SquadResult(unique_id, start_logits, end_logits)
all_results.append(result)
evalTime = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset))
# Compute predictions
output_prediction_file = os.path.join(args.output_dir, "predictions_{}_{}.json".format(language, prefix))
output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}_{}.json".format(language, prefix))
if args.version_2_with_negative:
output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix))
else:
output_null_log_odds_file = None
predictions = compute_predictions_logits(
examples,
features,
all_results,
args.n_best_size,
args.max_answer_length,
args.do_lower_case,
output_prediction_file,
output_nbest_file,
output_null_log_odds_file,
args.verbose_logging,
args.version_2_with_negative,
args.null_score_diff_threshold,
tokenizer,
)
# Compute the F1 and exact scores.
results = None
if ori_data is not None:
results = squad_eval(ori_data, predictions)
return results
def load_and_cache_examples(args, tokenizer, data_file, evaluate=False, output_examples=False,
language='en', lang2id=None, output_original_data=False):
if args.local_rank not in [-1, 0] and not evaluate:
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
torch.distributed.barrier()
# Load data features from cache or dataset file
input_dir = args.data_dir if args.data_dir else "."
cached_features_file = os.path.join(
input_dir,
"cached_{}_{}_{}_{}_{}".format(
"dev" if evaluate else "train",
list(filter(None, args.model_name_or_path.split("/"))).pop(),
list(filter(None, data_file.split("/"))).pop(),
str(args.max_seq_length),
str(language)
),
)
# Init features and dataset from cache if it exists
if os.path.exists(cached_features_file) and not args.overwrite_cache and not output_examples:
logger.info("Loading features from cached file %s", cached_features_file)
features_and_dataset = torch.load(cached_features_file)
features, dataset = features_and_dataset["features"], features_and_dataset["dataset"]
else:
logger.info("Creating features from dataset file at %s", data_file)
if not args.data_dir and ((evaluate and not args.predict_file and not args.dev_file) or (not evaluate and not args.train_file)):
try:
import tensorflow_datasets as tfds
except ImportError:
raise ImportError("If not data_dir is specified, tensorflow_datasets needs to be installed.")
if args.version_2_with_negative:
logger.warn("tensorflow_datasets does not handle version 2 of SQuAD.")
tfds_examples = tfds.load("squad")
examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate, language=language)
else:
processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor()
if evaluate:
examples = processor.get_dev_examples(args.data_dir, filename=data_file, language=language)
else:
examples = processor.get_train_examples(args.data_dir, filename=data_file, language=language)
features, dataset = squad_convert_examples_to_features(
examples=examples,
tokenizer=tokenizer,
max_seq_length=args.max_seq_length,
doc_stride=args.doc_stride,
max_query_length=args.max_query_length,
is_training=not evaluate,
return_dataset="pt",
threads=args.threads,
lang2id=lang2id
)
if args.local_rank in [-1, 0]:
logger.info("Saving features into cached file %s", cached_features_file)
torch.save({"features": features, "dataset": dataset}, cached_features_file)
if args.local_rank == 0 and not evaluate:
# Make sure only the first process in distributed training process the dataset, and the others will use the cache
torch.distributed.barrier()
if output_original_data:
with open(data_file) as dataset_file:
dataset_json = json.load(dataset_file)
ori_data = dataset_json['data']
if output_examples and output_original_data:
return dataset, examples, features, ori_data
elif output_examples:
return dataset, examples, features
else:
return dataset
def evaluate(dataset, predictions):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
total += 1
if qa['id'] not in predictions:
message = 'Unanswered question ' + qa['id'] + \
' will receive score 0.'
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x['text'], qa['answers']))
prediction = predictions[qa['id']]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(
f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1}
The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(args, train_dataset, model, tokenizer)` to solve the following problem:
Train the model
Here is the function:
def train(args, train_dataset, model, tokenizer):
""" Train the model """
if args.local_rank in [-1, 0]:
tb_dir = os.path.join(args.output_dir, "tb_log")
if os.path.isdir(tb_dir):
shutil.rmtree(tb_dir)
tb_writer = SummaryWriter(tb_dir)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
dev_dataset = None
if args.save_only_best_checkpoint and args.dev_file is not None:
# Read dev dataset
dev_dataset = load_and_cache_examples(args, tokenizer, args.dev_file, evaluate=True, output_examples=True,
language=args.train_lang, lang2id=None, output_original_data=True)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 1
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
try:
# set global_step to gobal_step of last saved checkpoint from model path
checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0]
global_step = int(checkpoint_suffix)
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
# Added here for reproductibility
set_seed(args)
best_score = 0
best_checkpoint = None
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"token_type_ids": None,
"start_positions": batch[3],
"end_positions": batch[4],
}
outputs = model(**inputs)
# model outputs are always tuple in transformers (see doc)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
# Log metrics
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Only evaluate when single GPU otherwise metrics may not average well
if args.local_rank == -1 and args.evaluate_during_training:
results = evaluate(args, model, tokenizer)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
# Save model checkpoint
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
if args.save_only_best_checkpoint and dev_dataset is not None:
result = evaluate(args, dev_dataset, model, tokenizer, prefix=str(global_step))
logger.info(" Dev F1 on global step {} = {}".format(global_step, result['f1']))
logger.info(" Dev exact match on global step {} = {}".format(global_step, result['exact_match']))
if (result['f1'] + result['exact_match']) / 2.0 > best_score:
logger.info(" result socre={} > best score={}".format((result['f1'] + result['exact_match']) / 2.0 , best_score))
output_dir = os.path.join(args.output_dir, "checkpoint-best")
best_checkpoint = output_dir
best_score = (result['f1'] + result['exact_match']) / 2.0
# Save model checkpoint
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
else:
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Take care of distributed/parallel training
model_to_save = model.module if hasattr(model, "module") else model
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step | Train the model |
19,293 | import os
import sys
import faiss
import tempfile
import numpy as np
import faiss
def knn(x, y, k, use_gpu, dist='cosine'):
return knnGPU(x, y, k) if use_gpu else knnCPU(x, y, k, dist)
def score(x, y, fwd_mean, bwd_mean, margin, dist='cosine'):
if dist == 'cosine':
return margin(x.dot(y), (fwd_mean + bwd_mean) / 2)
else:
l2 = ((x - y) ** 2).sum()
sim = 1 / (1 + l2)
return margin(sim, (fwd_mean + bwd_mean) / 2)
def score_candidates(x, y, candidate_inds, fwd_mean, bwd_mean, margin, dist='cosine'):
print(' - scoring {:d} candidates using {}'.format(x.shape[0], dist))
scores = np.zeros(candidate_inds.shape)
for i in range(scores.shape[0]):
for j in range(scores.shape[1]):
k = candidate_inds[i, j]
scores[i, j] = score(x[i], y[k], fwd_mean[i], bwd_mean[k], margin, dist)
return scores
def text_load_unify(fname, encoding, unify=True):
print(' - loading texts {:s}: '.format(fname), end='')
fin = open(fname, encoding=encoding, errors='surrogateescape')
inds = []
sents = []
sent2ind = {}
n = 0
nu = 0
for line in fin:
new_ind = len(sent2ind)
inds.append(sent2ind.setdefault(line, new_ind))
if unify:
if inds[-1] == new_ind:
sents.append(line[:-1])
nu += 1
else:
sents.append(line[:-1])
nu += 1
n += 1
print('{:d} lines, {:d} unique'.format(n, nu))
del sent2ind
return inds, sents
def unique_embeddings(emb, ind):
aux = {j: i for i, j in enumerate(ind)}
print(' - unify embeddings: {:d} -> {:d}'.format(len(emb), len(aux)))
return emb[[aux[i] for i in range(len(aux))]]
def shift_embeddings(x, y):
print(' - shift embeddings')
delta = x.mean(axis=0) - y.mean(axis=0)
x2y = x - delta
y2x = y + delta
return x2y, y2x
def mine_bitext(x, y, src_text_file, trg_text_file, output_file, mode='mine',
retrieval='max', margin='ratio', threshold=0,
neighborhood=4, use_gpu=False, encoding='utf-8', dist='cosine', use_shift_embeds=False):
src_inds, src_sents = text_load_unify(src_text_file, encoding, True)
trg_inds, trg_sents = text_load_unify(trg_text_file, encoding, True)
x = unique_embeddings(x, src_inds)
y = unique_embeddings(y, trg_inds)
if dist == 'cosine':
faiss.normalize_L2(x)
faiss.normalize_L2(y)
if use_shift_embeds:
x2y, y2x = shift_embeddings(x, y)
# calculate knn in both directions
if retrieval is not 'bwd':
print(' - perform {:d}-nn source against target, dist={}'.format(neighborhood, dist))
if use_shift_embeds:
# project x to y space, and search k-nn ys for each x
x2y_sim, x2y_ind = knn(x2y, y, min(y.shape[0], neighborhood), use_gpu, dist)
x2y_mean = x2y_sim.mean(axis=1)
else:
x2y_sim, x2y_ind = knn(x, y, min(y.shape[0], neighborhood), use_gpu, dist)
x2y_mean = x2y_sim.mean(axis=1)
if retrieval is not 'fwd':
print(' - perform {:d}-nn target against source, dist={}'.format(neighborhood, dist))
if use_shift_embeds:
y2x_sim, y2x_ind = knn(y2x, x, min(x.shape[0], neighborhood), use_gpu, dist)
y2x_mean = y2x_sim.mean(axis=1)
else:
y2x_sim, y2x_ind = knn(y, x, min(x.shape[0], neighborhood), use_gpu, dist)
y2x_mean = y2x_sim.mean(axis=1)
# margin function
if margin == 'absolute':
margin = lambda a, b: a
elif margin == 'distance':
margin = lambda a, b: a - b
else: # margin == 'ratio':
margin = lambda a, b: a / b
fout = open(output_file, mode='w', encoding=encoding, errors='surrogateescape')
if mode == 'search':
print(' - Searching for closest sentences in target')
print(' - writing alignments to {:s}'.format(output_file))
scores = score_candidates(x, y, x2y_ind, x2y_mean, y2x_mean, margin)
best = x2y_ind[np.arange(x.shape[0]), scores.argmax(axis=1)]
nbex = x.shape[0]
ref = np.linspace(0, nbex-1, nbex).astype(int) # [0, nbex)
err = nbex - np.equal(best.reshape(nbex), ref).astype(int).sum()
print(' - errors: {:d}={:.2f}%'.format(err, 100*err/nbex))
for i in src_inds:
print(trg_sents[best[i]], file=fout)
elif mode == 'score':
for i, j in zip(src_inds, trg_inds):
s = score(x[i], y[j], x2y_mean[i], y2x_mean[j], margin)
print(s, src_sents[i], trg_sents[j], sep='\t', file=fout)
elif mode == 'mine':
print(' - mining for parallel data')
if use_shift_embeds:
fwd_scores = score_candidates(x2y, y, x2y_ind, x2y_mean, y2x_mean, margin)
bwd_scores = score_candidates(y2x, x, y2x_ind, y2x_mean, x2y_mean, margin)
else:
fwd_scores = score_candidates(x, y, x2y_ind, x2y_mean, y2x_mean, margin)
bwd_scores = score_candidates(y, x, y2x_ind, y2x_mean, x2y_mean, margin)
fwd_best = x2y_ind[np.arange(x.shape[0]), fwd_scores.argmax(axis=1)]
bwd_best = y2x_ind[np.arange(y.shape[0]), bwd_scores.argmax(axis=1)]
print(' - writing alignments to {:s}'.format(output_file))
if threshold > 0:
print(' - with threshold of {:f}'.format(threshold))
if retrieval == 'fwd':
for i, j in enumerate(fwd_best):
print(fwd_scores[i].max(), src_sents[i], trg_sents[j], sep='\t', file=fout)
if retrieval == 'bwd':
for j, i in enumerate(bwd_best):
print(bwd_scores[j].max(), src_sents[i], trg_sents[j], sep='\t', file=fout)
if retrieval == 'intersect':
for i, j in enumerate(fwd_best):
if bwd_best[j] == i:
print(fwd_scores[i].max(), src_sents[i], trg_sents[j], sep='\t', file=fout)
if retrieval == 'max':
indices = np.stack((np.concatenate((np.arange(x.shape[0]), bwd_best)),
np.concatenate((fwd_best, np.arange(y.shape[0])))), axis=1)
scores = np.concatenate((fwd_scores.max(axis=1), bwd_scores.max(axis=1)))
seen_src, seen_trg = set(), set()
for i in np.argsort(-scores):
src_ind, trg_ind = indices[i]
if not src_ind in seen_src and not trg_ind in seen_trg:
seen_src.add(src_ind)
seen_trg.add(trg_ind)
if scores[i] > threshold:
print(scores[i], src_sents[src_ind], trg_sents[trg_ind], sep='\t', file=fout)
fout.close() | null |
19,294 | import os
import sys
import faiss
import tempfile
import numpy as np
import faiss
def bucc_optimize(candidate2score, gold):
items = sorted(candidate2score.items(), key=lambda x: -x[1])
ngold = len(gold)
nextract = ncorrect = 0
threshold = 0
best_f1 = 0
for i in range(len(items)):
nextract += 1
if '\t'.join(items[i][0]) in gold:
ncorrect += 1
if ncorrect > 0:
precision = ncorrect / nextract
recall = ncorrect / ngold
f1 = 2 * precision * recall / (precision + recall)
if f1 > best_f1:
best_f1 = f1
threshold = (items[i][1] + items[i + 1][1]) / 2
return threshold
def bucc_extract(cand2score, th, fname):
if fname:
of = open(fname, 'w', encoding=args.encoding)
bitexts = []
for (src, trg), score in cand2score.items():
if score >= th:
bitexts.append(src + '\t' + trg)
if fname:
of.write(src + '\t' + trg + '\n')
if fname:
of.close()
return bitexts
def read_candidate2score(candidates_file, src_text_file, trg_text_file, src_id_file, trg_id_file, encoding='utf-8'):
print(' - reading sentences {}'.format(candidates_file))
src_sent2id = read_sent2id(src_text_file, src_id_file, encoding)
trg_sent2id = read_sent2id(trg_text_file, trg_id_file, encoding)
print(' - reading candidates {}'.format(candidates_file))
candidate2score = {}
with open(candidates_file, encoding=encoding, errors='surrogateescape') as f:
for line in f:
score, src, trg = line.split('\t')
score = float(score)
src = src.strip()
trg = trg.strip()
if src in src_sent2id and trg in trg_sent2id:
src_id = src_sent2id[src]
trg_id = trg_sent2id[trg]
score = max(score, candidate2score.get((src_id, trg_id), score))
candidate2score[(src_id, trg_id)] = score
return candidate2score
def bucc_eval(candidates_file, gold_file, src_file, trg_file, src_id_file, trg_id_file, predict_file, threshold=None, encoding='utf-8'):
candidate2score = read_candidate2score(candidates_file, src_file, trg_file, src_id_file, trg_id_file, encoding)
if threshold is not None and gold_file is None:
print(' - using threshold {}'.format(threshold))
else:
print(' - optimizing threshold on gold alignments {}'.format(gold_file))
gold = {line.strip() for line in open(gold_file)}
threshold = bucc_optimize(candidate2score, gold)
bitexts = bucc_extract(candidate2score, threshold, predict_file)
if gold_file is not None:
ncorrect = len(gold.intersection(bitexts))
if ncorrect > 0:
precision = ncorrect / len(bitexts)
recall = ncorrect / len(gold)
f1 = 2*precision*recall / (precision + recall)
else:
precision = recall = f1 = 0
print(' - best threshold={:f}: precision={:.2f}, recall={:.2f}, F1={:.2f}'
.format(threshold, 100*precision, 100*recall, 100*f1))
return {'best-threshold': threshold, 'precision': 100*precision, 'recall': 100*recall, 'F1': 100*f1}
else:
return None | null |
19,295 | import os
import sys
import faiss
import tempfile
import numpy as np
import faiss
def similarity_search(x, y, dim, normalize=False):
num = x.shape[0]
idx = faiss.IndexFlatL2(dim)
if normalize:
faiss.normalize_L2(x)
faiss.normalize_L2(y)
idx.add(x)
scores, prediction = idx.search(y, 1)
return prediction | null |
19,296 | import argparse
import glob
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, TensorDataset
from torch.utils.data import RandomSampler, SequentialSampler
from tqdm import tqdm, trange
from transformers import (
XLMRobertaConfig,
XLMRobertaTokenizer,
XLMRobertaModel,
)
from processors.utils import InputFeatures
from utils_retrieve import mine_bitext, bucc_eval, similarity_search
logger = logging.getLogger(__name__)
def load_embeddings(embed_file, num_sentences=None):
def prepare_batch(sentences, tokenizer, device="cuda", max_length=512, lang='en', langid=None, use_local_max_length=True, pool_skip_special_token=False):
def tokenize_text(text_file, tok_file, tokenizer, lang=None):
def mean_pool_embedding(all_layer_outputs, masks):
def cls_pool_embedding(all_layer_outputs):
def extract_embeddings(args, model, config, tokenizer, text_file, tok_file, embed_file, lang='en', pool_type='mean'):
num_embeds = args.num_layers
all_embed_files = ["{}_{}.npy".format(embed_file, i) for i in range(num_embeds)]
if all(os.path.exists(f) for f in all_embed_files):
logger.info('loading files from {}'.format(all_embed_files))
return [load_embeddings(f) for f in all_embed_files]
langid = 0
model.eval()
sent_toks = tokenize_text(text_file, tok_file, tokenizer, lang)
max_length = max([len(s) for s in sent_toks])
logger.info('max length of tokenized text = {}'.format(max_length))
batch_size = args.batch_size
num_batch = int(np.ceil(len(sent_toks) * 1.0 / batch_size))
num_sents = len(sent_toks)
all_embeds = [np.zeros(shape=(num_sents, args.embed_size), dtype=np.float32) for _ in range(num_embeds)]
for i in tqdm(range(num_batch), desc='Batch'):
start_index = i * batch_size
end_index = min((i + 1) * batch_size, num_sents)
batch, pool_mask = prepare_batch(sent_toks[start_index: end_index],
tokenizer,
args.device,
args.max_seq_length,
lang=lang,
langid=langid,
pool_skip_special_token=args.pool_skip_special_token)
with torch.no_grad():
outputs = model(**batch)
last_layer_outputs, first_token_outputs, all_layer_outputs = outputs
# get the pool embedding
if pool_type == 'cls':
all_batch_embeds = cls_pool_embedding(all_layer_outputs[-args.num_layers:])
else:
all_batch_embeds = []
all_layer_outputs = all_layer_outputs[-args.num_layers:]
all_batch_embeds.extend(mean_pool_embedding(all_layer_outputs, pool_mask))
for embeds, batch_embeds in zip(all_embeds, all_batch_embeds):
embeds[start_index: end_index] = batch_embeds.cpu().numpy().astype(np.float32)
del last_layer_outputs, first_token_outputs, all_layer_outputs
torch.cuda.empty_cache()
if embed_file is not None:
for file, embeds in zip(all_embed_files, all_embeds):
logger.info('save embed {} to file {}'.format(embeds.shape, file))
np.save(file, embeds)
return all_embeds | null |
19,297 | import argparse
import glob
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, TensorDataset
from torch.utils.data import RandomSampler, SequentialSampler
from tqdm import tqdm, trange
from transformers import (
XLMRobertaConfig,
XLMRobertaTokenizer,
XLMRobertaModel,
)
from processors.utils import InputFeatures
from utils_retrieve import mine_bitext, bucc_eval, similarity_search
def concate_embedding(all_embeds, last_k):
if last_k == 1:
return all_embeds[-1]
else:
embeds = np.hstack(all_embeds[-last_k:]) # (B,D)
return embeds | null |
19,298 | from __future__ import print_function
from collections import Counter
import string
import re
import argparse
import json
import sys
import unicodedata
def f1_score(prediction, ground_truth, lang):
def exact_match_score(prediction, ground_truth, lang):
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths, lang):
def evaluate(dataset, predictions, lang):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
total += 1
if qa['id'] not in predictions:
message = 'Unanswered question ' + qa['id'] + \
' will receive score 0.'
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x['text'], qa['answers']))
prediction = predictions[qa['id']]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths, lang)
f1 += metric_max_over_ground_truths(
f1_score, prediction, ground_truths, lang)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1} | null |
19,299 | import json
import logging
import os
from functools import partial
from multiprocessing import Pool, cpu_count
import numpy as np
from tqdm import tqdm
from transformers.file_utils import is_tf_available, is_torch_available
from transformers.tokenization_bert import whitespace_tokenize
from transformers import DataProcessor
The provided code snippet includes necessary dependencies for implementing the `_check_is_max_context` function. Write a Python function `def _check_is_max_context(doc_spans, cur_span_index, position)` to solve the following problem:
Check if this is the 'max context' doc span for the token.
Here is the function:
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index | Check if this is the 'max context' doc span for the token. |
19,300 | import json
import logging
import os
from functools import partial
from multiprocessing import Pool, cpu_count
import numpy as np
from tqdm import tqdm
from transformers.file_utils import is_tf_available, is_torch_available
from transformers.tokenization_bert import whitespace_tokenize
from transformers import DataProcessor
def _is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False | null |
19,301 | import argparse
import glob
import logging
import os
import random
import shutil, pickle
import numpy as np
import torch
from torch.utils.data import DataLoader, TensorDataset
from torch.utils.data import RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from transformers import (
WEIGHTS_NAME,
AdamW,
XLMRobertaConfig,
XLMRobertaTokenizer,
XLMRobertaForSequenceClassification,
get_linear_schedule_with_warmup,
)
from processors.utils import convert_examples_to_features, language_list
from processors.xnli import XnliProcessor
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
from tensorboardX import SummaryWriter
logger = logging.getLogger(__name__)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def evaluate(args, model, tokenizer, split='train', language='en', lang2id=None, prefix="", output_file=None, label_list=None, output_only_prediction=True):
"""Evalute the model."""
eval_task_names = (args.task_name,)
eval_outputs_dirs = (args.output_dir,)
results = {}
for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs):
eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, split=split, language=language, lang2id=lang2id, evaluate=True)
if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]:
os.makedirs(eval_output_dir)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
# Note that DistributedSampler samples randomly
eval_sampler = SequentialSampler(eval_dataset)
eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size)
# multi-gpu eval
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Eval!
logger.info("***** Running evaluation {} {} *****".format(prefix, language))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
preds = None
out_label_ids = None
sentences = None
for batch in tqdm(eval_dataloader, desc="Evaluating"):
model.eval()
batch = tuple(t.to(args.device) for t in batch)
with torch.no_grad():
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
outputs = model(**inputs)
tmp_eval_loss, logits = outputs[:2]
eval_loss += tmp_eval_loss.mean().item()
nb_eval_steps += 1
if preds is None:
preds = logits.detach().cpu().numpy()
out_label_ids = inputs["labels"].detach().cpu().numpy()
sentences = inputs["input_ids"].detach().cpu().numpy()
else:
preds = np.append(preds, logits.detach().cpu().numpy(), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
sentences = np.append(sentences, inputs["input_ids"].detach().cpu().numpy(), axis=0)
eval_loss = eval_loss / nb_eval_steps
if args.output_mode == "classification":
preds = np.argmax(preds, axis=1)
else:
raise ValueError("No other `output_mode` for XNLI.")
result = compute_metrics(preds, out_label_ids)
results.update(result)
if output_file:
logger.info("***** Save prediction ******")
with open(output_file, 'w') as fout:
pad_token_id = tokenizer.pad_token_id
sentences = sentences.astype(int).tolist()
sentences = [[w for w in s if w != pad_token_id]for s in sentences]
sentences = [tokenizer.convert_ids_to_tokens(s) for s in sentences]
for p, l, s in zip(list(preds), list(out_label_ids), sentences):
s = ' '.join(s)
if label_list:
p = label_list[p]
l = label_list[l]
if output_only_prediction:
fout.write(str(p) + '\n')
else:
fout.write('{}\t{}\t{}\n'.format(p, l, s))
logger.info("***** Eval (split={}) results {} {} *****".format(split, prefix, language))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
return results
The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(args, train_dataset, model, tokenizer, lang2id=None)` to solve the following problem:
Train the model.
Here is the function:
def train(args, train_dataset, model, tokenizer, lang2id=None):
"""Train the model."""
if args.local_rank in [-1, 0]:
tb_dir = os.path.join(args.output_dir, "tb_log")
if os.path.isdir(tb_dir):
shutil.rmtree(tb_dir)
tb_writer = SummaryWriter(tb_dir)
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset)
train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
# Prepare optimizer and schedule (linear warmup and decay)
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
},
{"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
)
# Check if saved optimizer or scheduler states exist
if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile(
os.path.join(args.model_name_or_path, "scheduler.pt")
):
# Load in optimizer and scheduler states
optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt")))
scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt")))
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level)
# multi-gpu training (should be after apex fp16 initialization)
if args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# Distributed training (should be after apex fp16 initialization)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True
)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size
* args.gradient_accumulation_steps
* (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
global_step = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
# Check if continuing training from a checkpoint
if os.path.exists(args.model_name_or_path):
try:
# set global_step to gobal_step of last saved checkpoint from model path
global_step = int(args.model_name_or_path.split("-")[-1].split("/")[0])
epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps)
steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps)
logger.info(" Continuing training from checkpoint, will skip to saved global_step")
logger.info(" Continuing training from epoch %d", epochs_trained)
logger.info(" Continuing training from global step %d", global_step)
logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch)
except ValueError:
logger.info(" Starting fine-tuning.")
best_score = 0
best_checkpoint = None
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(
epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]
)
set_seed(args) # Added here for reproductibility
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0])
for step, batch in enumerate(epoch_iterator):
# Skip past any already trained steps if resuming training
if steps_trained_in_current_epoch > 0:
steps_trained_in_current_epoch -= 1
continue
model.train()
batch = tuple(t.to(args.device) for t in batch)
inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
outputs = model(**inputs)
loss = outputs[0]
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel training
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0:
# Log metrics
tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step)
tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step)
logging_loss = tr_loss
# Only evaluate on single GPU otherwise metrics may not average well
if (args.local_rank == -1 and args.evaluate_during_training):
results = evaluate(args, model, tokenizer, split=args.train_split, language=args.train_language, lang2id=lang2id)
for key, value in results.items():
tb_writer.add_scalar("eval_{}".format(key), value, global_step)
if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0:
if args.eval_test_set:
output_predict_file = os.path.join(args.output_dir, 'eval_test_results')
total = total_correct = 0.0
with open(output_predict_file, 'a') as writer:
writer.write('\n======= Test Predict using the model from {}/checkpoint-{}:\n'.format(args.output_dir, global_step))
total_test_accs = []
for language in args.predict_languages.split(','):
result = evaluate(args, model, tokenizer, split=args.test_split, language=language, lang2id=lang2id, prefix='checkpoint-'+str(global_step))
writer.write('{}={}\n'.format(language, result['acc']))
total_test_accs.append(result['acc'])
total += result['num']
total_correct += result['correct']
average_test_acc = sum(total_test_accs) / len(total_test_accs)
writer.write(" Avg Test accuracy [total_correct/total] : {}\n".format(total_correct / total))
writer.write(" Avg Test accuracy [avg(total_test_accs)]: {}\n".format(average_test_acc))
if args.save_only_best_checkpoint:
result = evaluate(args, model, tokenizer, split='dev', language=args.train_language, lang2id=lang2id, prefix=str(global_step))
logger.info(" Dev accuracy {} = {}".format(args.train_language, result['acc']))
if result['acc'] > best_score:
logger.info(" result['acc']={} > best_score={}".format(result['acc'], best_score))
output_dir = os.path.join(args.output_dir, "checkpoint-best")
best_checkpoint = output_dir
best_score = result['acc']
# Save model checkpoint
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
else:
# Save model checkpoint
output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(output_dir)
tokenizer.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir, "training_args.bin"))
logger.info("Saving model checkpoint to %s", output_dir)
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt"))
logger.info("Saving optimizer and scheduler states to %s", output_dir)
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
if args.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step, best_score, best_checkpoint | Train the model. |
19,302 | import argparse
from seqeval.metrics import precision_score, recall_score, f1_score
import sys
import os
from collections import defaultdict
import json
from third_party.evaluate_squad import evaluate as squad_eval
from third_party.evaluate_mlqa import evaluate as mlqa_eval
def read_tag(file):
labels = []
example = []
with open(file, 'r') as f:
for line in f:
line = line.strip()
if line:
example.append(line)
else:
labels.append(example)
example = []
return labels | null |
19,303 | import argparse
from seqeval.metrics import precision_score, recall_score, f1_score
import sys
import os
from collections import defaultdict
import json
from third_party.evaluate_squad import evaluate as squad_eval
from third_party.evaluate_mlqa import evaluate as mlqa_eval
def read_label(file):
with open(file, 'r') as f:
return [l.strip() for l in f] | null |
19,304 | import argparse
from seqeval.metrics import precision_score, recall_score, f1_score
import sys
import os
from collections import defaultdict
import json
from third_party.evaluate_squad import evaluate as squad_eval
from third_party.evaluate_mlqa import evaluate as mlqa_eval
def read_squad(file):
expected_version = '1.1'
with open(file) as dataset_file:
dataset_json = json.load(dataset_file)
# if 'version' in dataset_json and dataset_json['version'] != expected_version:
# print('Evaluation expects v-' + expected_version,
# ', but got dataset with v-' + dataset_json['version'],
# file=sys.stderr)
if 'data' in dataset_json:
return dataset_json['data']
else:
return dataset_json | null |
19,305 | import argparse
from seqeval.metrics import precision_score, recall_score, f1_score
import sys
import os
from collections import defaultdict
import json
from third_party.evaluate_squad import evaluate as squad_eval
from third_party.evaluate_mlqa import evaluate as mlqa_eval
def accuracy(labels, predictions, language=None):
correct = sum([int(p == l) for p, l in zip(predictions, labels)])
accuracy = float(correct) / len(predictions)
return {'accuracy': accuracy * 100} | null |
19,306 | import argparse
from seqeval.metrics import precision_score, recall_score, f1_score
import sys
import os
from collections import defaultdict
import json
from third_party.evaluate_squad import evaluate as squad_eval
from third_party.evaluate_mlqa import evaluate as mlqa_eval
def f1(labels, predictions, language=None):
def bucc_f1(labels, predictions, language=None):
labels = set([tuple(l.split('\t')) for l in labels])
predictions = set([tuple(l.split('\t')) for l in predictions])
ncorrect = len(labels.intersection(predictions))
if ncorrect > 0:
precision = ncorrect / len(predictions)
recall = ncorrect / len(labels)
f1 = 2 * precision * recall / (precision + recall)
else:
precision = recall = f1 = 0
return {'f1': f1 * 100, 'precision': precision * 100, 'recall': recall * 100} | null |
19,307 | import argparse
from seqeval.metrics import precision_score, recall_score, f1_score
import sys
import os
from collections import defaultdict
import json
from third_party.evaluate_squad import evaluate as squad_eval
from third_party.evaluate_mlqa import evaluate as mlqa_eval
def squad_em_f1(labels, predictions, language=None):
return squad_eval(labels, predictions) | null |
19,308 | import argparse
from seqeval.metrics import precision_score, recall_score, f1_score
import sys
import os
from collections import defaultdict
import json
from third_party.evaluate_squad import evaluate as squad_eval
from third_party.evaluate_mlqa import evaluate as mlqa_eval
def mlqa_em_f1(labels, predictions, language):
if language is None:
print('required 2-char language code for the argument `language`')
exit(0)
return mlqa_eval(labels, predictions, language) | null |
19,309 | import argparse
from seqeval.metrics import precision_score, recall_score, f1_score
import sys
import os
from collections import defaultdict
import json
from third_party.evaluate_squad import evaluate as squad_eval
from third_party.evaluate_mlqa import evaluate as mlqa_eval
GROUP2TASK = {
"classification": ["pawsx", "xnli"],
"tagging": ["udpos", "panx"],
"qa": ["xquad", "mlqa", "tydiqa"],
"retrieval": ["bucc2018", "tatoeba"],
}
TASK2LANGS = {
"pawsx": "de,en,es,fr,ja,ko,zh".split(","),
"xnli": "ar,bg,de,el,en,es,fr,hi,ru,sw,th,tr,ur,vi,zh".split(","),
"panx": "ar,he,vi,id,jv,ms,tl,eu,ml,ta,te,af,nl,en,de,el,bn,hi,mr,ur,fa,fr,it,pt,es,bg,ru,ja,ka,ko,th,sw,yo,my,zh,kk,tr,et,fi,hu".split(","),
"udpos": "af,ar,bg,de,el,en,es,et,eu,fa,fi,fr,he,hi,hu,id,it,ja,kk,ko,mr,nl,pt,ru,ta,te,th,tl,tr,ur,vi,yo,zh".split(","),
"bucc2018": "de,fr,ru,zh".split(","),
"tatoeba": "ar,he,vi,id,jv,tl,eu,ml,ta,te,af,nl,de,el,bn,hi,mr,ur,fa,fr,it,pt,es,bg,ru,ja,ka,ko,th,sw,zh,kk,tr,et,fi,hu".split(","),
"xquad": "en,es,de,el,ru,tr,ar,vi,th,zh,hi".split(","),
"mlqa": "en,es,de,ar,hi,vi,zh".split(","),
"tydiqa": "en,ar,bn,fi,id,ko,ru,sw,te".split(","),
}
def evaluate_one_task(prediction_file, label_file, task, language=None):
"""Evalute the classification tasks by accuracy.
Args:
prediction_file (string): path to the prediction tsv file.
label_file (string): path to the grouth truth tsv file.
Return:
result (dict): a dictionary with accuracy.
Both input files contain one example per line as follows:
``[label]\t[sentence1]\t[sentence2]``
"""
predictions = READER_FUNCTION[task](prediction_file)
labels = READER_FUNCTION[task](label_file)
if task not in ['bucc2018', 'mlqa', 'tydiqa', 'xquad']:
assert len(predictions) == len(labels), 'Number of examples in {} and {} not matched in {} task'.format(prediction_file, label_file, task)
result = METRIC_FUNCTION[task](labels, predictions, language)
return result
The provided code snippet includes necessary dependencies for implementing the `evaluate` function. Write a Python function `def evaluate(prediction_folder, label_folder, verbose=False)` to solve the following problem:
Evaluate on all tasks if available. Args: prediction_folder (string): prediction folder that contains each task's prediction in each subfolder. label_file (string): label folder that contains each task's ground-truth label in each subfolder. Return: overall_scores (dict): a dictionary with sub-group scores. key: group label. detailed_scores (dict): a dictionary with all detailed scores. key: task label.
Here is the function:
def evaluate(prediction_folder, label_folder, verbose=False):
"""Evaluate on all tasks if available.
Args:
prediction_folder (string): prediction folder that contains each task's prediction in each subfolder.
label_file (string): label folder that contains each task's ground-truth label in each subfolder.
Return:
overall_scores (dict): a dictionary with sub-group scores. key: group label.
detailed_scores (dict): a dictionary with all detailed scores. key: task label.
"""
prediction_tasks = next(os.walk(prediction_folder))[1]
label_tasks = next(os.walk(label_folder))[1]
# prediction_tasks = label_tasks = ['mlqa', 'tydiqa', 'xquad']
detailed_scores = {}
for task, langs in TASK2LANGS.items():
if task in prediction_tasks and task in label_tasks:
suffix = "json" if task in GROUP2TASK["qa"] else "tsv"
# collect scores over all languages
score = defaultdict(dict)
for lg in langs:
prediction_file = os.path.join(prediction_folder, task, f"test-{lg}.{suffix}")
label_file = os.path.join(label_folder, task, f"test-{lg}.{suffix}")
score_lg = evaluate_one_task(prediction_file, label_file, task, language=lg)
for metric in score_lg:
score[metric][lg] = score_lg[metric]
# average over all languages
avg_score = {}
for m in score:
avg_score[f'avg_{m}'] = sum(score[m].values()) / len(score[m])
score.update(avg_score)
if task in GROUP2TASK["qa"]:
score['avg_metric'] = (score['avg_exact_match'] + score['avg_f1']) / 2
elif 'avg_f1' in score:
score['avg_metric'] = score['avg_f1']
elif 'avg_accuracy' in score:
score['avg_metric'] = score['avg_accuracy']
detailed_scores[task] = score
if verbose:
avg_result = ', '.join(['{}={:.1f}'.format(k, v) for k, v in score.items() if k.startswith('avg')])
print('- Evaluate {}:\t{}'.format(task, avg_result))
# Display logic:
overall_scores = {}
all_tasks = set(TASK2LANGS.keys())
available_tasks = set(detailed_scores.keys())
# If scores of all tasks are available, show the overall score in the main table
if all_tasks == available_tasks:
overall_scores['all_task'] = sum(detailed_scores[task]['avg_metric'] for task in all_tasks) / len(all_tasks)
# If scores of all tasks in a sub group are available, show the score in the sub table
for group, group_tasks in GROUP2TASK.items():
if len(set(group_tasks) - available_tasks) == 0:
overall_scores[group] = sum(detailed_scores[task]['avg_metric'] for task in group_tasks) / len(group_tasks)
return overall_scores, detailed_scores | Evaluate on all tasks if available. Args: prediction_folder (string): prediction folder that contains each task's prediction in each subfolder. label_file (string): label folder that contains each task's ground-truth label in each subfolder. Return: overall_scores (dict): a dictionary with sub-group scores. key: group label. detailed_scores (dict): a dictionary with all detailed scores. key: task label. |
19,310 | import warnings
import torch
from torch.nn import Module, Parameter, Linear
from torch.nn.init import xavier_normal_, xavier_uniform_, constant_
from torch.nn.functional import linear, softmax, dropout
The provided code snippet includes necessary dependencies for implementing the `multi_head_attention_forward` function. Write a Python function `def multi_head_attention_forward(query, # type: Tensor key, # type: Tensor value, # type: Tensor embed_dim_to_check, # type: int num_heads, # type: int in_proj_weight, # type: Tensor in_proj_bias, # type: Tensor bias_k, # type: Optional[Tensor] bias_v, # type: Optional[Tensor] add_zero_attn, # type: bool dropout_p, # type: float out_proj_weight, # type: Tensor out_proj_bias, # type: Tensor training=True, # type: bool key_padding_mask=None, # type: Optional[Tensor] need_weights=True, # type: bool attn_mask=None, # type: Optional[Tensor] use_separate_proj_weight=False, # type: bool q_proj_weight=None, # type: Optional[Tensor] k_proj_weight=None, # type: Optional[Tensor] v_proj_weight=None, # type: Optional[Tensor] static_k=None, # type: Optional[Tensor] static_v=None # type: Optional[Tensor] )` to solve the following problem:
r""" Args: query, key, value: map a query and a set of key-value pairs to an output. See "Attention Is All You Need" for more details. embed_dim_to_check: total dimension of the model. num_heads: parallel attention heads. in_proj_weight, in_proj_bias: input projection weight and bias. bias_k, bias_v: bias of the key and value sequences to be added at dim=0. add_zero_attn: add a new batch of zeros to the key and value sequences at dim=1. dropout_p: probability of an element to be zeroed. out_proj_weight, out_proj_bias: the output projection weight and bias. training: apply dropout if is ``True``. key_padding_mask: if provided, specified padding elements in the key will be ignored by the attention. This is an binary mask. When the value is True, the corresponding value on the attention layer will be filled with -inf. need_weights: output attn_output_weights. attn_mask: mask that prevents attention to certain positions. This is an additive mask (i.e. the values will be added to the attention layer). use_separate_proj_weight: the function accept the proj. weights for query, key, and value in differnt forms. If false, in_proj_weight will be used, which is a combination of q_proj_weight, k_proj_weight, v_proj_weight. q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias. static_k, static_v: static key and value used for attention operators. Shape: Inputs: - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is the embedding dimension. - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is the embedding dimension. - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is the embedding dimension. - key_padding_mask: :math:`(N, S)`, ByteTensor, where N is the batch size, S is the source sequence length. - attn_mask: :math:`(L, S)` where L is the target sequence length, S is the source sequence length. - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. Outputs: - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is the embedding dimension. - attn_output_weights: :math:`(N, L, S)` where N is the batch size, L is the target sequence length, S is the source sequence length.
Here is the function:
def multi_head_attention_forward(query, # type: Tensor
key, # type: Tensor
value, # type: Tensor
embed_dim_to_check, # type: int
num_heads, # type: int
in_proj_weight, # type: Tensor
in_proj_bias, # type: Tensor
bias_k, # type: Optional[Tensor]
bias_v, # type: Optional[Tensor]
add_zero_attn, # type: bool
dropout_p, # type: float
out_proj_weight, # type: Tensor
out_proj_bias, # type: Tensor
training=True, # type: bool
key_padding_mask=None, # type: Optional[Tensor]
need_weights=True, # type: bool
attn_mask=None, # type: Optional[Tensor]
use_separate_proj_weight=False, # type: bool
q_proj_weight=None, # type: Optional[Tensor]
k_proj_weight=None, # type: Optional[Tensor]
v_proj_weight=None, # type: Optional[Tensor]
static_k=None, # type: Optional[Tensor]
static_v=None # type: Optional[Tensor]
):
# type: (...) -> Tuple[Tensor, Optional[Tensor]]
r"""
Args:
query, key, value: map a query and a set of key-value pairs to an output.
See "Attention Is All You Need" for more details.
embed_dim_to_check: total dimension of the model.
num_heads: parallel attention heads.
in_proj_weight, in_proj_bias: input projection weight and bias.
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
add_zero_attn: add a new batch of zeros to the key and
value sequences at dim=1.
dropout_p: probability of an element to be zeroed.
out_proj_weight, out_proj_bias: the output projection weight and bias.
training: apply dropout if is ``True``.
key_padding_mask: if provided, specified padding elements in the key will
be ignored by the attention. This is an binary mask. When the value is True,
the corresponding value on the attention layer will be filled with -inf.
need_weights: output attn_output_weights.
attn_mask: mask that prevents attention to certain positions. This is an additive mask
(i.e. the values will be added to the attention layer).
use_separate_proj_weight: the function accept the proj. weights for query, key,
and value in differnt forms. If false, in_proj_weight will be used, which is
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
static_k, static_v: static key and value used for attention operators.
Shape:
Inputs:
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
the embedding dimension.
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
the embedding dimension.
- key_padding_mask: :math:`(N, S)`, ByteTensor, where N is the batch size, S is the source sequence length.
- attn_mask: :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
Outputs:
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
E is the embedding dimension.
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
L is the target sequence length, S is the source sequence length.
"""
qkv_same = torch.equal(query, key) and torch.equal(key, value)
kv_same = torch.equal(key, value)
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == embed_dim_to_check
assert list(query.size()) == [tgt_len, bsz, embed_dim]
assert key.size() == value.size()
head_dim = embed_dim // num_heads
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
scaling = float(head_dim) ** -0.5
if use_separate_proj_weight is not True:
if qkv_same:
# self-attention
q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)
elif kv_same:
# encoder-decoder attention
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
if key is None:
assert value is None
k = None
v = None
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
k, v = linear(key, _w, _b).chunk(2, dim=-1)
else:
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = 0
_end = embed_dim
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
q = linear(query, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim
_end = embed_dim * 2
_w = in_proj_weight[_start:_end, :]
if _b is not None:
_b = _b[_start:_end]
k = linear(key, _w, _b)
# This is inline in_proj function with in_proj_weight and in_proj_bias
_b = in_proj_bias
_start = embed_dim * 2
_end = None
_w = in_proj_weight[_start:, :]
if _b is not None:
_b = _b[_start:]
v = linear(value, _w, _b)
else:
q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
len1, len2 = q_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == query.size(-1)
k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
len1, len2 = k_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == key.size(-1)
v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
len1, len2 = v_proj_weight_non_opt.size()
assert len1 == embed_dim and len2 == value.size(-1)
if in_proj_bias is not None:
q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim])
k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim:(embed_dim * 2)])
v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2):])
else:
q = linear(query, q_proj_weight_non_opt, in_proj_bias)
k = linear(key, k_proj_weight_non_opt, in_proj_bias)
v = linear(value, v_proj_weight_non_opt, in_proj_bias)
q = q * scaling
if bias_k is not None and bias_v is not None:
if static_k is None and static_v is None:
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat([attn_mask,
torch.zeros((attn_mask.size(0), 1),
dtype=attn_mask.dtype,
device=attn_mask.device)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[key_padding_mask, torch.zeros((key_padding_mask.size(0), 1),
dtype=key_padding_mask.dtype,
device=key_padding_mask.device)], dim=1)
else:
assert static_k is None, "bias cannot be added to static key."
assert static_v is None, "bias cannot be added to static value."
else:
assert bias_k is None
assert bias_v is None
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
if k is not None:
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if v is not None:
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
if static_k is not None:
assert static_k.size(0) == bsz * num_heads
assert static_k.size(2) == head_dim
k = static_k
if static_v is not None:
assert static_v.size(0) == bsz * num_heads
assert static_v.size(2) == head_dim
v = static_v
src_len = k.size(1)
if key_padding_mask is not None:
assert key_padding_mask.size(0) == bsz
assert key_padding_mask.size(1) == src_len
if add_zero_attn:
src_len += 1
k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1)
v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1)
if attn_mask is not None:
attn_mask = torch.cat([attn_mask, torch.zeros((attn_mask.size(0), 1),
dtype=attn_mask.dtype,
device=attn_mask.device)], dim=1)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[key_padding_mask, torch.zeros((key_padding_mask.size(0), 1),
dtype=key_padding_mask.dtype,
device=key_padding_mask.device)], dim=1)
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
attn_output_weights += attn_mask
if key_padding_mask is not None:
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
attn_output_weights = attn_output_weights.masked_fill(
key_padding_mask.unsqueeze(1).unsqueeze(2),
float('-inf'),
)
attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
attn_output_weights = softmax(
attn_output_weights, dim=-1)
attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training)
attn_output = torch.bmm(attn_output_weights, v)
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
if need_weights:
# average attention weights over heads
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
return attn_output, attn_output_weights.sum(dim=1) / num_heads
else:
return attn_output, None | r""" Args: query, key, value: map a query and a set of key-value pairs to an output. See "Attention Is All You Need" for more details. embed_dim_to_check: total dimension of the model. num_heads: parallel attention heads. in_proj_weight, in_proj_bias: input projection weight and bias. bias_k, bias_v: bias of the key and value sequences to be added at dim=0. add_zero_attn: add a new batch of zeros to the key and value sequences at dim=1. dropout_p: probability of an element to be zeroed. out_proj_weight, out_proj_bias: the output projection weight and bias. training: apply dropout if is ``True``. key_padding_mask: if provided, specified padding elements in the key will be ignored by the attention. This is an binary mask. When the value is True, the corresponding value on the attention layer will be filled with -inf. need_weights: output attn_output_weights. attn_mask: mask that prevents attention to certain positions. This is an additive mask (i.e. the values will be added to the attention layer). use_separate_proj_weight: the function accept the proj. weights for query, key, and value in differnt forms. If false, in_proj_weight will be used, which is a combination of q_proj_weight, k_proj_weight, v_proj_weight. q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias. static_k, static_v: static key and value used for attention operators. Shape: Inputs: - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is the embedding dimension. - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is the embedding dimension. - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is the embedding dimension. - key_padding_mask: :math:`(N, S)`, ByteTensor, where N is the batch size, S is the source sequence length. - attn_mask: :math:`(L, S)` where L is the target sequence length, S is the source sequence length. - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length, N is the batch size, E is the embedding dimension. E/num_heads is the head dimension. Outputs: - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is the embedding dimension. - attn_output_weights: :math:`(N, L, S)` where N is the batch size, L is the target sequence length, S is the source sequence length. |
19,311 | import json
import torch
import logging
logger = logging.getLogger(__name__)
class AnswerTable:
ANS_CONVERT = {
"a man": "man",
"the man": "man",
"a woman": "woman",
"the woman": "woman",
'one': '1',
'two': '2',
'three': '3',
'four': '4',
'five': '5',
'six': '6',
'seven': '7',
'eight': '8',
'nine': '9',
'ten': '10',
'grey': 'gray',
}
def __init__(self, dsets=None):
self.all_ans = json.load(open("data/vqa/all_ans.json"))
if dsets is not None:
dsets = set(dsets)
# If the answer is used in the dsets
self.anss = [ans['ans'] for ans in self.all_ans if
len(set(ans['dsets']) & dsets) > 0]
else:
self.anss = [ans['ans'] for ans in self.all_ans]
self.ans_set = set(self.anss)
self._id2ans_map = self.anss
self._ans2id_map = {ans: ans_id for ans_id, ans in enumerate(self.anss)}
assert len(self._id2ans_map) == len(self._ans2id_map)
for ans_id, ans in enumerate(self._id2ans_map):
assert self._ans2id_map[ans] == ans_id
def convert_ans(self, ans):
if len(ans) == 0:
return ""
ans = ans.lower()
if ans[-1] == '.':
ans = ans[:-1].strip()
if ans.startswith("a "):
ans = ans[2:].strip()
if ans.startswith("an "):
ans = ans[3:].strip()
if ans.startswith("the "):
ans = ans[4:].strip()
if ans in self.ANS_CONVERT:
ans = self.ANS_CONVERT[ans]
return ans
def ans2id(self, ans):
return self._ans2id_map[ans]
def id2ans(self, ans_id):
return self._id2ans_map[ans_id]
def ans2id_map(self):
return self._ans2id_map.copy()
def id2ans_map(self):
return self._id2ans_map.copy()
def used(self, ans):
return ans in self.ans_set
def all_answers(self):
return self.anss.copy()
def num_answers(self):
return len(self.anss)
The provided code snippet includes necessary dependencies for implementing the `load_lxmert_qa` function. Write a Python function `def load_lxmert_qa(path, model, label2ans)` to solve the following problem:
Load model weights from pre-training model. The answers in the fine-tuned QA task (indicated by label2ans) would also be properly initialized with pre-trained QA heads. :param path: Path to model snapshot. :param model: LXRT model instance. :param label2ans: The label2ans dict of fine-tuned QA datasets, like {0: 'cat', 1: 'dog', ...} :return:
Here is the function:
def load_lxmert_qa(path, model, label2ans):
"""
Load model weights from pre-training model.
The answers in the fine-tuned QA task (indicated by label2ans)
would also be properly initialized with pre-trained
QA heads.
:param path: Path to model snapshot.
:param model: LXRT model instance.
:param label2ans: The label2ans dict of fine-tuned QA datasets, like
{0: 'cat', 1: 'dog', ...}
:return:
"""
logger.info("Load QA pre-trained Model from %s " % path)
try:
loaded_state_dict = torch.load(path, map_location = torch.device('cpu')) # for save gpu memory
except:
logger.info('load from online server')
loaded_state_dict = torch.load(path)['model']
model_state_dict = model.state_dict()
# Handle Multi-GPU pre-training --> Single GPU fine-tuning
for key in list(loaded_state_dict.keys()):
loaded_state_dict[key.replace("module.", '')] = loaded_state_dict.pop(key)
# Isolate bert model
bert_state_dict = {}
for key, value in loaded_state_dict.items():
if key.startswith('bert.'):
bert_state_dict[key] = value
# Isolate answer head
answer_state_dict = {}
for key, value in loaded_state_dict.items():
if key.startswith("answer_head."):
answer_state_dict[key.replace('answer_head.', '')] = value
# Do surgery on answer state dict
ans_weight = answer_state_dict['logit_fc.3.weight']
ans_bias = answer_state_dict['logit_fc.3.bias']
import copy
new_answer_weight = copy.deepcopy(model_state_dict['logit_fc.3.weight'])
new_answer_bias = copy.deepcopy(model_state_dict['logit_fc.3.bias'])
answer_table = AnswerTable()
loaded = 0
unload = 0
if type(label2ans) is list:
label2ans = {label: ans for label, ans in enumerate(label2ans)}
for label, ans in label2ans.items():
new_ans = answer_table.convert_ans(ans)
if answer_table.used(new_ans):
ans_id_9500 = answer_table.ans2id(new_ans)
new_answer_weight[label] = ans_weight[ans_id_9500]
new_answer_bias[label] = ans_bias[ans_id_9500]
loaded += 1
else:
new_answer_weight[label] = 0.
new_answer_bias[label] = 0.
unload += 1
logger.info("Loaded %d answers from LXRTQA pre-training and %d not" % (loaded, unload))
answer_state_dict['logit_fc.3.weight'] = new_answer_weight
answer_state_dict['logit_fc.3.bias'] = new_answer_bias
# Load Bert Weights
bert_model_keys = set(model.lxrt_encoder.model.state_dict().keys())
bert_loaded_keys = set(bert_state_dict.keys())
assert len(bert_model_keys - bert_loaded_keys) == 0
model.lxrt_encoder.model.load_state_dict(bert_state_dict, strict=False)
# Load Answer Logic FC Weights
model_keys = set(model.state_dict().keys())
ans_loaded_keys = set(answer_state_dict.keys())
assert len(ans_loaded_keys - model_keys) == 0
model.load_state_dict(answer_state_dict, strict=False) | Load model weights from pre-training model. The answers in the fine-tuned QA task (indicated by label2ans) would also be properly initialized with pre-trained QA heads. :param path: Path to model snapshot. :param model: LXRT model instance. :param label2ans: The label2ans dict of fine-tuned QA datasets, like {0: 'cat', 1: 'dog', ...} :return: |
19,312 | import json
import logging
import os
import shutil
import tempfile
from functools import wraps
from hashlib import sha256
import sys
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `filename_to_url` function. Write a Python function `def filename_to_url(filename, cache_dir=None)` to solve the following problem:
Return the url and etag (which may be ``None``) stored for `filename`. Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
Here is the function:
def filename_to_url(filename, cache_dir=None):
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise EnvironmentError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise EnvironmentError("file {} not found".format(meta_path))
with open(meta_path, encoding="utf-8") as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag | Return the url and etag (which may be ``None``) stored for `filename`. Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist. |
19,313 | import json
import logging
import os
import shutil
import tempfile
from functools import wraps
from hashlib import sha256
import sys
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
def get_from_cache(url, cache_dir=None):
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w', encoding="utf-8") as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
The provided code snippet includes necessary dependencies for implementing the `cached_path` function. Write a Python function `def cached_path(url_or_filename, cache_dir=None)` to solve the following problem:
Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path.
Here is the function:
def cached_path(url_or_filename, cache_dir=None):
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if sys.version_info[0] == 3 and isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if sys.version_info[0] == 3 and isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename)) | Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path. |
19,314 | import json
import logging
import os
import shutil
import tempfile
from functools import wraps
from hashlib import sha256
import sys
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `s3_request` function. Write a Python function `def s3_request(func)` to solve the following problem:
Wrapper function for s3 requests in order to create more helpful error messages.
Here is the function:
def s3_request(func):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise EnvironmentError("file {} not found".format(url))
else:
raise
return wrapper | Wrapper function for s3 requests in order to create more helpful error messages. |
19,315 | import json
import logging
import os
import shutil
import tempfile
from functools import wraps
from hashlib import sha256
import sys
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
The provided code snippet includes necessary dependencies for implementing the `read_set_from_file` function. Write a Python function `def read_set_from_file(filename)` to solve the following problem:
Extract a de-duped collection (set) of text from a file. Expected file format is one item per line.
Here is the function:
def read_set_from_file(filename):
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r', encoding='utf-8') as file_:
for line in file_:
collection.add(line.rstrip())
return collection | Extract a de-duped collection (set) of text from a file. Expected file format is one item per line. |
19,316 | import json
import logging
import os
import shutil
import tempfile
from functools import wraps
from hashlib import sha256
import sys
from io import open
import boto3
import requests
from botocore.exceptions import ClientError
from tqdm import tqdm
def get_file_extension(path, dot=True, lower=True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext | null |
19,317 | import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
import logging
def warmup_cosine(x, warmup=0.002):
if x < warmup:
return x/warmup
return 0.5 * (1.0 + torch.cos(math.pi * x)) | null |
19,318 | import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
import logging
The provided code snippet includes necessary dependencies for implementing the `warmup_constant` function. Write a Python function `def warmup_constant(x, warmup=0.002)` to solve the following problem:
Linearly increases learning rate over `warmup`*`t_total` (as provided to BertAdam) training steps. Learning rate is 1. afterwards.
Here is the function:
def warmup_constant(x, warmup=0.002):
""" Linearly increases learning rate over `warmup`*`t_total` (as provided to BertAdam) training steps.
Learning rate is 1. afterwards. """
if x < warmup:
return x/warmup
return 1.0 | Linearly increases learning rate over `warmup`*`t_total` (as provided to BertAdam) training steps. Learning rate is 1. afterwards. |
19,319 | import math
import torch
from torch.optim import Optimizer
from torch.optim.optimizer import required
import logging
The provided code snippet includes necessary dependencies for implementing the `warmup_linear` function. Write a Python function `def warmup_linear(x, warmup=0.002)` to solve the following problem:
Specifies a triangular learning rate schedule where peak is reached at `warmup`*`t_total`-th (as provided to BertAdam) training step. After `t_total`-th training step, learning rate is zero.
Here is the function:
def warmup_linear(x, warmup=0.002):
""" Specifies a triangular learning rate schedule where peak is reached at `warmup`*`t_total`-th (as provided to BertAdam) training step.
After `t_total`-th training step, learning rate is zero. """
if x < warmup:
return x/warmup
return max((x-1.)/(warmup-1.), 0) | Specifies a triangular learning rate schedule where peak is reached at `warmup`*`t_total`-th (as provided to BertAdam) training step. After `t_total`-th training step, learning rate is zero. |
19,320 | import collections
import logging
import os
import unicodedata
from io import open
from .file_utils import cached_path
The provided code snippet includes necessary dependencies for implementing the `load_vocab` function. Write a Python function `def load_vocab(vocab_file)` to solve the following problem:
Loads a vocabulary file into a dictionary.
Here is the function:
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
index = 0
with open(vocab_file, "r", encoding="utf-8") as reader:
while True:
token = reader.readline()
if not token:
break
token = token.strip()
vocab[token] = index
index += 1
return vocab | Loads a vocabulary file into a dictionary. |
19,321 | import collections
import logging
import os
import unicodedata
from io import open
from .file_utils import cached_path
The provided code snippet includes necessary dependencies for implementing the `whitespace_tokenize` function. Write a Python function `def whitespace_tokenize(text)` to solve the following problem:
Runs basic whitespace cleaning and splitting on a piece of text.
Here is the function:
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens | Runs basic whitespace cleaning and splitting on a piece of text. |
19,322 | import collections
import logging
import os
import unicodedata
from io import open
from .file_utils import cached_path
The provided code snippet includes necessary dependencies for implementing the `_is_whitespace` function. Write a Python function `def _is_whitespace(char)` to solve the following problem:
Checks whether `chars` is a whitespace character.
Here is the function:
def _is_whitespace(char):
"""Checks whether `chars` is a whitespace character."""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False | Checks whether `chars` is a whitespace character. |
19,323 | import collections
import logging
import os
import unicodedata
from io import open
from .file_utils import cached_path
The provided code snippet includes necessary dependencies for implementing the `_is_control` function. Write a Python function `def _is_control(char)` to solve the following problem:
Checks whether `chars` is a control character.
Here is the function:
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False | Checks whether `chars` is a control character. |
19,324 | import collections
import logging
import os
import unicodedata
from io import open
from .file_utils import cached_path
The provided code snippet includes necessary dependencies for implementing the `_is_punctuation` function. Write a Python function `def _is_punctuation(char)` to solve the following problem:
Checks whether `chars` is a punctuation character.
Here is the function:
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False | Checks whether `chars` is a punctuation character. |
19,325 | import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
from torch.nn import functional as F
import numpy as np
from param import args
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, SmoothL1Loss
from .file_utils import cached_path
from .attention import MultiheadAttention
def rand_batch_seq(row_array, dim_needed): # keep sort rank
row_total = row_array.shape[1]
row_sequence = np.sort(np.random.choice(row_total, dim_needed, replace=False, p=None))
return row_array[:, row_sequence, :] | null |
19,326 | import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
from torch.nn import functional as F
import numpy as np
from param import args
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, SmoothL1Loss
from .file_utils import cached_path
from .attention import MultiheadAttention
logger = logging.getLogger(__name__)
args = parse_args()
The provided code snippet includes necessary dependencies for implementing the `load_tf_weights_in_bert` function. Write a Python function `def load_tf_weights_in_bert(model, tf_checkpoint_path)` to solve the following problem:
Load tf checkpoints in a pytorch model
Here is the function:
def load_tf_weights_in_bert(model, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model
"""
try:
import re
import numpy as np
import tensorflow as tf
except Importtokenization:
logger.info("Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m"] for n in name):
logger.info("Skipping {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
l = re.split(r'_(\d+)', m_name)
else:
l = [m_name]
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
else:
pointer = getattr(pointer, l[0])
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
elif m_name == 'kernel':
array = np.transpose(array)
try:
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model | Load tf checkpoints in a pytorch model |
19,327 | import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
from torch.nn import functional as F
import numpy as np
from param import args
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, SmoothL1Loss
from .file_utils import cached_path
from .attention import MultiheadAttention
The provided code snippet includes necessary dependencies for implementing the `gelu` function. Write a Python function `def gelu(x)` to solve the following problem:
Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415
Here is the function:
def gelu(x):
"""Implementation of the gelu activation function.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) | Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) Also see https://arxiv.org/abs/1606.08415 |
19,328 | import copy
import json
import logging
import math
import os
import shutil
import tarfile
import tempfile
import sys
from io import open
from torch.nn import functional as F
import numpy as np
from param import args
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, SmoothL1Loss
from .file_utils import cached_path
from .attention import MultiheadAttention
def swish(x):
return x * torch.sigmoid(x) | null |
19,329 | import sys
import csv
import base64
import time
import logging
import numpy as np
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ["img_id", "img_h", "img_w", "objects_id", "objects_conf",
"attrs_id", "attrs_conf", "num_boxes", "boxes", "features"]
logger = logging.getLogger(__name__)
The provided code snippet includes necessary dependencies for implementing the `load_obj_tsv` function. Write a Python function `def load_obj_tsv(fname, topk=None)` to solve the following problem:
Load object features from tsv file. :param fname: The path to the tsv file. :param topk: Only load features for top K images (lines) in the tsv file. Will load all the features if topk is either -1 or None. :return: A list of image object features where each feature is a dict. See FILENAMES above for the keys in the feature dict.
Here is the function:
def load_obj_tsv(fname, topk=None):
"""Load object features from tsv file.
:param fname: The path to the tsv file.
:param topk: Only load features for top K images (lines) in the tsv file.
Will load all the features if topk is either -1 or None.
:return: A list of image object features where each feature is a dict.
See FILENAMES above for the keys in the feature dict.
"""
data = []
start_time = time.time()
# print("Start to load Faster-RCNN detected objects from %s" % fname)
logger.info("Start to load Faster-RCNN detected objects from %s" % fname)
with open(fname) as f:
reader = csv.DictReader(f, FIELDNAMES, delimiter="\t")
for i, item in enumerate(reader):
for key in ['img_h', 'img_w', 'num_boxes']:
item[key] = int(item[key])
boxes = item['num_boxes']
decode_config = [
('objects_id', (boxes, ), np.int64),
('objects_conf', (boxes, ), np.float32),
('attrs_id', (boxes, ), np.int64),
('attrs_conf', (boxes, ), np.float32),
('boxes', (boxes, 4), np.float32),
('features', (boxes, -1), np.float32),
]
for key, shape, dtype in decode_config:
item[key] = np.frombuffer(base64.b64decode(item[key]), dtype=dtype)
item[key] = item[key].reshape(shape)
item[key].setflags(write=False)
data.append(item)
if topk is not None and len(data) == topk:
break
elapsed_time = time.time() - start_time
# print("Loaded %d images in file %s in %d seconds." % (len(data), fname, elapsed_time))
logger.info("Loaded %d images in file %s in %d seconds." % (len(data), fname, elapsed_time))
return data | Load object features from tsv file. :param fname: The path to the tsv file. :param topk: Only load features for top K images (lines) in the tsv file. Will load all the features if topk is either -1 or None. :return: A list of image object features where each feature is a dict. See FILENAMES above for the keys in the feature dict. |
19,330 | import torch.nn as nn
import os
from param import args
import torch
from lxrt.tokenization import BertTokenizer
from lxrt.modeling import LXRTFeatureExtraction, VISUAL_CONFIG, BertConfig, BertLayerNorm, GeLU
import logging
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
The provided code snippet includes necessary dependencies for implementing the `convert_sents_to_features` function. Write a Python function `def convert_sents_to_features(sents, max_seq_length, tokenizer)` to solve the following problem:
Loads a data file into a list of `InputBatch`s.
Here is the function:
def convert_sents_to_features(sents, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
features = []
for (i, sent) in enumerate(sents):
tokens_a = tokenizer.tokenize(sent.strip())
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# Keep segment id which allows loading BERT-weights.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids))
return features | Loads a data file into a list of `InputBatch`s. |
19,331 | import torch.nn as nn
import os
from param import args
import torch
from lxrt.tokenization import BertTokenizer
from lxrt.modeling import LXRTFeatureExtraction, VISUAL_CONFIG, BertConfig, BertLayerNorm, GeLU
import logging
VISUAL_CONFIG = VisualConfig()
def set_visual_config(params):
VISUAL_CONFIG.l_layers = params.llayers
VISUAL_CONFIG.x_layers = params.xlayers
VISUAL_CONFIG.r_layers = params.rlayers | null |
19,332 | import os
import collections
import torch
import torch.nn as nn
import logging
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
from param import args
from lxrt.qa_answer_table import load_lxmert_qa
from tasks.vqa_model import VQAModel
from tasks.vqa_data import VQADataset, VQATorchDataset, VQAEvaluator
DataTuple = collections.namedtuple("DataTuple", 'dataset loader evaluator')
args = parse_args()
class VQADataset:
def __init__(self, splits: str):
def num_answers(self):
def __len__(self):
class VQATorchDataset(Dataset):
def __init__(self, dataset: VQADataset):
def __len__(self):
def __getitem__(self, item: int):
class VQAEvaluator:
def __init__(self, dataset: VQADataset):
def evaluate(self, quesid2ans: dict):
def dump_result(self, quesid2ans: dict, path):
def get_data_tuple(splits: str, bs:int, shuffle=False, drop_last=False) -> DataTuple:
dset = VQADataset(splits)
tset = VQATorchDataset(dset)
evaluator = VQAEvaluator(dset)
data_loader = DataLoader(
tset, batch_size=bs,
shuffle=shuffle, num_workers=args.num_workers,
drop_last=drop_last, pin_memory=True
)
return DataTuple(dataset=dset, loader=data_loader, evaluator=evaluator) | null |
19,333 | import os
import collections
import torch
import torch.nn as nn
import logging
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
from param import args
from lxrt.qa_answer_table import load_lxmert_qa
from tasks.vqa_model import VQAModel
from tasks.vqa_data import VQADataset, VQATorchDataset, VQAEvaluator
def adjust_learning_rate(optimizer, decay_rate):
optimizer._lr_base *= decay_rate | null |
19,334 | import torch.nn as nn
import os
import torch
import logging
from lxrt.tokenization import BertTokenizer
from lxrt.modeling import LXRTFeatureExtraction, VISUAL_CONFIG, BertConfig, BertLayerNorm, GeLU
from param import args
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
The provided code snippet includes necessary dependencies for implementing the `convert_sents_to_features` function. Write a Python function `def convert_sents_to_features(sents, max_seq_length, tokenizer)` to solve the following problem:
Loads a data file into a list of `InputBatch`s.
Here is the function:
def convert_sents_to_features(sents, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
features = []
for (i, sent) in enumerate(sents):
tokens_a = tokenizer.tokenize(sent.strip())
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
# Keep segment id which allows loading BERT-weights.
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids))
return features | Loads a data file into a list of `InputBatch`s. |
19,335 | import torch.nn as nn
import os
import torch
import logging
from lxrt.tokenization import BertTokenizer
from lxrt.modeling import LXRTFeatureExtraction, VISUAL_CONFIG, BertConfig, BertLayerNorm, GeLU
from param import args
VISUAL_CONFIG = VisualConfig()
def set_visual_config(params):
VISUAL_CONFIG.l_layers = params.llayers
VISUAL_CONFIG.x_layers = params.xlayers
VISUAL_CONFIG.r_layers = params.rlayers | null |
19,336 | import os
import collections
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.utils.data.dataloader import DataLoader
import logging
from param import args
from tasks.nlvr2_model import NLVR2Model
from tasks.nlvr2_data import NLVR2Dataset, NLVR2TorchDataset, NLVR2Evaluator
DataTuple = collections.namedtuple("DataTuple", 'dataset loader evaluator')
args = parse_args()
class NLVR2Dataset:
"""
An NLVR2 data example in json file:
{
"identifier": "train-10171-0-0",
"img0": "train-10171-0-img0",
"img1": "train-10171-0-img1",
"label": 0,
"sent": "An image shows one leather pencil case, displayed open with writing implements tucked inside.
",
"uid": "nlvr2_train_0"
}
"""
def __init__(self, splits: str):
self.name = splits
self.splits = splits.split(',')
# Loading datasets to data
self.data = []
for split in self.splits:
self.data.extend(json.load(open("data/nlvr2/%s.json" % split)))
logger.info("Load %d data from split(s) %s." % (len(self.data), self.name))
# List to dict (for evaluation and others)
self.id2datum = {
datum['uid']: datum
for datum in self.data
}
def __len__(self):
return len(self.data)
class NLVR2TorchDataset(Dataset):
def __init__(self, dataset: NLVR2Dataset):
super().__init__()
self.raw_dataset = dataset
if args.tiny:
topk = TINY_IMG_NUM
elif args.fast:
topk = FAST_IMG_NUM
else:
topk = -1
# Loading detection features to img_data
if args.use_hdf5:
self.imgid2img = {}
self.image_file_list = args.image_hdf5_file.split(',')
for i, image_file in enumerate(self.image_file_list):
with h5py.File(image_file, 'r') as all_images:
for image_id in all_images.keys():
self.imgid2img[image_id] = i
logger.info('total image number is: {}'.format(len(self.imgid2img)))
elif args.use_npz:
self.imgid2img = {}
self.image_dir_list = args.image_hdf5_file.split(',')
for image_dir in self.image_dir_list:
image_npz_files = os.path.join(image_dir, '*.npz')
for image_npz_file in glob.glob(image_npz_files):
image_id = image_npz_file.split('/')[-1].split('.')[0]
self.imgid2img[image_id] = image_npz_file
logger.info('total image number is: {}'.format(len(self.imgid2img)))
else:
img_data = []
if 'train' in dataset.splits:
img_data.extend(load_obj_tsv('data/nlvr2_imgfeat/nlvr2_train.tsv', topk=topk))
if 'valid' in dataset.splits:
img_data.extend(load_obj_tsv('data/nlvr2_imgfeat/nlvr2_valid.tsv', topk=topk))
if 'test' in dataset.name:
img_data.extend(load_obj_tsv('data/nlvr2_imgfeat/nlvr2_test.tsv', topk=topk))
self.imgid2img = {}
for img_datum in img_data:
self.imgid2img[img_datum['img_id']] = img_datum
# Filter out the dataset
self.data = []
for datum in self.raw_dataset.data:
if datum['img0'] in self.imgid2img and datum['img1'] in self.imgid2img:
self.data.append(datum)
logger.info("Use %d data in torch dataset" % (len(self.data)))
def __len__(self):
return len(self.data)
def __getitem__(self, item: int):
datum = self.data[item]
ques_id = datum['uid']
ques = datum['sent']
# Get image info
boxes2 = []
feats2 = []
for key in ['img0', 'img1']:
img_id = datum[key]
if args.use_hdf5:
file_idx = self.imgid2img[img_id]
with h5py.File(self.image_file_list[file_idx], 'r') as all_images:
img_info = all_images[img_id]
img_h, img_w, obj_num = np.frombuffer(base64.b64decode(img_info[0]), dtype=np.int64).tolist()
feats = np.frombuffer(base64.b64decode(img_info[6]), dtype=np.float32).reshape((obj_num, -1)).copy()
boxes = np.frombuffer(base64.b64decode(img_info[5]), dtype=np.float32).reshape((obj_num, 4)).copy()
elif args.use_npz:
file_path = self.imgid2img[img_id]
img_info = np.load(file_path)
img_h = img_info['img_h']
img_w = img_info['img_w']
obj_num = img_info['num_boxes']
feats = img_info['features'].copy()
boxes = img_info['boxes'].copy()
else:
img_info = self.imgid2img[img_id]
boxes = img_info['boxes'].copy()
feats = img_info['features'].copy()
obj_num = img_info['num_boxes']
img_h, img_w = img_info['img_h'], img_info['img_w']
if args.padding:
if obj_num != args.max_objects:
align_obj_num = args.max_objects
feat_len = np.size(feats, 1)
box_len = np.size(boxes, 1)
feats = np.row_stack((feats, np.zeros((align_obj_num - obj_num, feat_len), dtype=np.float32)))
boxes = np.row_stack((boxes, np.zeros((align_obj_num - obj_num, box_len), dtype=np.float32)))
obj_num = align_obj_num
assert obj_num == len(boxes) == len(feats)
# Normalize the boxes (to 0 ~ 1)
boxes[..., (0, 2)] /= img_w
boxes[..., (1, 3)] /= img_h
np.testing.assert_array_less(boxes, 1+1e-5)
np.testing.assert_array_less(-boxes, 0+1e-5)
boxes2.append(boxes)
feats2.append(feats)
feats = np.stack(feats2)
boxes = np.stack(boxes2)
# Create target
if 'label' in datum:
label = datum['label']
return ques_id, feats, boxes, ques, label
else:
return ques_id, feats, boxes, ques
class NLVR2Evaluator:
def __init__(self, dataset: NLVR2Dataset):
self.dataset = dataset
def evaluate(self, quesid2ans: dict):
score = 0.
for quesid, ans in quesid2ans.items():
datum = self.dataset.id2datum[quesid]
label = datum['label']
if ans == label:
score += 1
return score / len(quesid2ans)
def dump_result(self, quesid2ans: dict, path):
"""
Dump result to a CSV file, which is compatible with NLVR2 evaluation system.
NLVR2 CSV file requirement:
Each line contains: identifier, answer
:param quesid2ans: nlvr2 uid to ans (either "True" or "False")
:param path: The desired path of saved file.
:return:
"""
with open(path, 'w') as f:
for uid, ans in quesid2ans.items():
idt = self.dataset.id2datum[uid]["identifier"]
ans = 'True' if ans == 1 else 'False'
f.write("%s,%s\n" % (idt, ans))
def get_tuple(splits: str, bs:int, shuffle=False, drop_last=False) -> DataTuple:
dset = NLVR2Dataset(splits)
tset = NLVR2TorchDataset(dset)
evaluator = NLVR2Evaluator(dset)
data_loader = DataLoader(
tset, batch_size=bs,
shuffle=shuffle, num_workers=args.num_workers,
drop_last=drop_last, pin_memory=True
)
return DataTuple(dataset=dset, loader=data_loader, evaluator=evaluator) | null |
19,337 | import argparse
import random
import logging
import numpy as np
import torch
def get_optimizer(optim):
# Bind the optimizer
if optim == 'rms':
# print("Optimizer: Using RMSProp")
logger.info("Optimizer: Using RMSProp")
optimizer = torch.optim.RMSprop
elif optim == 'adam':
# print("Optimizer: Using Adam")
logger.info("Optimizer: Using Adam")
optimizer = torch.optim.Adam
elif optim == 'adamax':
# print("Optimizer: Using Adamax")
logger.info("Optimizer: Using Adamax")
optimizer = torch.optim.Adamax
elif optim == 'sgd':
# print("Optimizer: sgd")
logger.info("Optimizer: sgd")
optimizer = torch.optim.SGD
elif 'bert' in optim:
optimizer = 'bert' # The bert optimizer will be bind later.
else:
assert False, "Please add your optimizer %s in the list." % optim
return optimizer
args = parse_args()
def parse_args():
parser = argparse.ArgumentParser()
# Data Splits
parser.add_argument("--train", default='train')
parser.add_argument("--valid", default='valid')
parser.add_argument("--test", default=None)
# Training Hyper-parameters
parser.add_argument('--batchSize', dest='batch_size', type=int, default=256)
parser.add_argument('--test_batch_size', dest='test_batch_size', type=int, default=950)
parser.add_argument('--optim', default='bert')
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--dropout', type=float, default=0.1)
parser.add_argument('--seed', type=int, default=9595, help='random seed')
parser.add_argument("--hidden_dropout", default=0.0, type=float)
parser.add_argument("--fix_language_bert",
default=False,
action='store_true',
help='whether fix language bert while fine-tuning.')
parser.add_argument("--one_stream",
default=False,
action='store_true',
help='whether to use the one_stream style.')
parser.add_argument("--safer_fp16",
default=False,
action='store_true',
help='whether use safer fp16.')
parser.add_argument("--debug_mode",
default=False,
action='store_true',
help='whether to debug mode')
# Debugging
parser.add_argument('--output', type=str, default='snap/test')
parser.add_argument("--fast", action='store_const', default=False, const=True)
parser.add_argument("--tiny", action='store_const', default=False, const=True)
parser.add_argument("--tqdm", action='store_const', default=False, const=True)
# Model Loading
parser.add_argument('--load', type=str, default=None,
help='Load the model (usually the fine-tuned model).')
parser.add_argument('--patial_load', type=str, default=None,
help='Load the patial model.')
parser.add_argument('--loadLXMERT', dest='load_lxmert', type=str, default=None,
help='Load the pre-trained LXMERT model.')
parser.add_argument('--loadLXMERTQA', dest='load_lxmert_qa', type=str, default=None,
help='Load the pre-trained LXMERT model with QA answer head.')
parser.add_argument("--fromScratch", dest='from_scratch', action='store_const', default=False, const=True,
help='If none of the --load, --loadLXMERT, --loadLXMERTQA is set, '
'the model would be trained from scratch. If --fromScratch is'
' not specified, the model would load BERT-pre-trained weights by'
' default. ')
parser.add_argument("--bert_model", default=None, type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-large-cased, bert-base-multilingual-uncased, "
"bert-base-multilingual-cased, bert-base-chinese.")
parser.add_argument('--hidden_size', type=int, default=768)
parser.add_argument('--bert_fix_no_str', type=str, default=None,
help='fix the no-matching of str in model when training.')
parser.add_argument('--bert_fix_str', type=str, default=None,
help='fix the matching of str in model when training.')
# Optimization
parser.add_argument("--mceLoss", dest='mce_loss', action='store_const', default=False, const=True)
# LXRT Model Config
# Note: LXRT = L, X, R (three encoders), Transformer
parser.add_argument("--llayers", default=0, type=int, help='Number of Language layers')
parser.add_argument("--xlayers", default=12, type=int, help='Number of CROSS-modality layers.')
parser.add_argument("--rlayers", default=0, type=int, help='Number of object Relationship layers.')
parser.add_argument('--middle_layer', type=int, default=0, help='middle layer for match')
# LXMERT Pre-training Config
parser.add_argument("--taskMatched", dest='task_matched', action='store_const', default=False, const=True)
parser.add_argument("--taskMaskLM", dest='task_mask_lm', action='store_const', default=False, const=True)
parser.add_argument("--taskObjPredict", dest='task_obj_predict', action='store_const', default=False, const=True)
parser.add_argument("--taskQA", dest='task_qa', action='store_const', default=False, const=True)
parser.add_argument("--visualLosses", dest='visual_losses', default='obj,attr,feat', type=str)
parser.add_argument("--qaSets", dest='qa_sets', default=None, type=str)
parser.add_argument("--wordMaskRate", dest='word_mask_rate', default=0.15, type=float)
parser.add_argument("--objMaskRate", dest='obj_mask_rate', default=0.15, type=float)
# Training configuration
parser.add_argument("--multiGPU", action='store_const', default=False, const=True)
parser.add_argument("--num_workers", dest='num_workers', default=0, type=int)
parser.add_argument("--amp_type", default=None, type=str, help="whether to use mix precision, must in [O0, O1, O2, O3]")
parser.add_argument("--conditional_mask", dest='conditional_mask', action='store_const', default=False, const=True)
parser.add_argument("--only_mask_words", dest='only_mask_words', action='store_const', default=False, const=True)
parser.add_argument("--whole_word_mask", dest='whole_word_mask', action='store_const', default=False, const=True)
parser.add_argument("--image_wwm", dest='image_wwm', action='store_const', default=False, const=True)
parser.add_argument("--add_box_size", dest='add_box_size', action='store_const', default=False, const=True)
parser.add_argument("--use_struct_emb", dest='use_struct_emb', action='store_const', default=False, const=True)
parser.add_argument("--max_2d_position_embeddings", default=160, type=int, help='max struct position length')
parser.add_argument("--use_hdf5", dest='use_hdf5', action='store_const', default=False, const=True)
parser.add_argument("--use_npz", dest='use_npz', action='store_const', default=False, const=True)
parser.add_argument("--use_jpg", dest='use_jpg', action='store_const', default=False, const=True)
parser.add_argument("--image_hdf5_file", dest='image_hdf5_file', default='data/mscoco_imgfeat/train2014_obj36.tsv.token,data/mscoco_imgfeat/val2014_obj36.tsv.token', type=str)
parser.add_argument("--padding", dest='padding', action='store_const', default=False, const=True)
parser.add_argument("--with_score", dest='with_score', action='store_const', default=False, const=True)
parser.add_argument("--clip_norm", dest='clip_norm', default=5., type=float)
parser.add_argument("--merge_for_submit", dest='merge_for_submit', action='store_const', default=False, const=True)
parser.add_argument('--merge_dir', type=str, default='snap/vqa/test_vqa_ensemble_predict_best_submission')
parser.add_argument('--max_objects', type=int, default=100)
parser.add_argument("--use_multi_view", dest='use_multi_view', action='store_const', default=False, const=True)
parser.add_argument("--from_config_file",
default=False,
action='store_true',
help='whether initial config from config file.')
parser.add_argument("--bert_config_file", default=None, type=str,
help="The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
parser.add_argument('--single_image_file', type=str, default=None,
help='single image hdf5 file for pre-training image modality.')
parser.add_argument("--revise_match_task",
default=False,
action='store_true',
help='whether to revise match task.')
parser.add_argument("--prefetch",
default=False,
action='store_true',
help='whether to prefetch the data.')
parser.add_argument("--paired_attn",
default=False,
action='store_true',
help='whether to use paired attn for NLVR task')
parser.add_argument("--linear_nlvr",
default=False,
action='store_true',
help='whether to only use linear nlvr head')
parser.add_argument("--only_use_relevant_dets",
default=False,
action='store_true',
help='whether to only_use_relevant_dets')
parser.add_argument("--max_seq_length", default=40, type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded. Must match data generation.")
parser.add_argument("--nlvr_max_seq_length", default=20, type=int,
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded. Must match data generation.")
parser.add_argument("--concat_attention",
default=False,
action='store_true',
help='whether to use concat attention')
parser.add_argument("--no_qa_data",
default=False,
action='store_true',
help='whether to remove qa data and task.')
parser.add_argument("--use_vit",
default=False,
action='store_true',
help='vit mode.')
parser.add_argument('--image_crop_size_h', type=int, default=448)
parser.add_argument('--image_crop_size_w', type=int, default=448)
parser.add_argument("--bin_h_num", type=int, default=8)
parser.add_argument("--bin_w_num", type=int, default=8)
parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
# distributed parameters
parser.add_argument("--local_rank", type=int)
parser.add_argument('--total_instances', type=int, default=8967075)
parser.add_argument("--read_local_data",
default=False,
action='store_true',
help='whether to read image from local disk.')
# Parse the arguments.
args = parser.parse_args()
# Bind optimizer class.
args.optimizer = get_optimizer(args.optim)
# Set seeds
torch.manual_seed(args.seed)
random.seed(args.seed)
np.random.seed(args.seed)
return args | null |
19,338 | import os, sys, argparse, re, json
import time
import torch
import random as python_random
from uer.utils.tokenizer import *
from uer.utils.vocab import Vocab
from sqlova.utils.utils_wikisql import *
from sqlova.model.nl2sql.wikisql_models import *
from tableModel import TableTextPretraining
import comp_sql
import pandas as pd
def load_hyperparam(args):
with open(args.config_path, mode="r", encoding="utf-8") as f:
param = json.load(f)
args.emb_size = param.get("emb_size", 768)
args.hidden_size = param.get("hidden_size", 768)
args.kernel_size = param.get("kernel_size", 3)
args.block_size = param.get("block_size", 2)
args.feedforward_size = param.get("feedforward_size", 3072)
args.heads_num = param.get("heads_num", 12)
args.layers_num = param.get("layers_num", 12)
args.dropout = param.get("dropout", 0.1)
return args | null |
19,339 | import os, sys, argparse, re, json
import time
import torch
import random as python_random
from uer.utils.tokenizer import *
from uer.utils.vocab import Vocab
from sqlova.utils.utils_wikisql import *
from sqlova.model.nl2sql.wikisql_models import *
from tableModel import TableTextPretraining
import comp_sql
import pandas as pd
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def construct_hyper_param(parser):
parser.add_argument('--tepoch', default=3, type=int)
parser.add_argument("--bS", default=3, type=int,
help="Batch size")
parser.add_argument("--accumulate_gradients", default=1, type=int,
help="The number of accumulation of backpropagation to effectivly increase the batch size.")
parser.add_argument('--fine_tune',
default=True,
action='store_true',
help="If present, BERT is trained.")
parser.add_argument("--task", default='finance_benchmark', type=str,
help="Type of model.")
# 1.2 BERT Parameters
parser.add_argument("--vocab_file",
default='models/google_zh_vocab.txt', type=str,
help="The vocabulary file that the BERT model was trained on.")
parser.add_argument("--max_seq_length",
default=350, type=int, # Set based on maximum length of input tokens.
help="The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--num_target_layers",
default=1, type=int,
help="The Number of final layers of BERT to be used in downstream task.")
parser.add_argument('--lr_bert', default=1e-5, type=float, help='BERT model learning rate.')
parser.add_argument('--lr_amr', default=1e-4, type=float, help='BERT model learning rate.')
parser.add_argument('--lr', default=1e-3, type=float, help="Learning rate.")
parser.add_argument('--seed',
type=int,
default=1,
help="random seed for initialization")
parser.add_argument('--no_pretraining', action='store_true', help='Use BERT pretrained model')
parser.add_argument("--bert_type_abb", default='uS', type=str,
help="Type of BERT model to load. e.g.) uS, uL, cS, cL, and mcS")
# 1.3 Seq-to-SQL module parameters
parser.add_argument('--lS', default=2, type=int, help="The number of LSTM layers.")
parser.add_argument('--dr', default=0.3, type=float, help="Dropout rate.")
parser.add_argument("--hS", default=100, type=int, help="The dimension of hidden vector in the seq-to-SQL module.")
# 1.4 Execution-guided decoding beam-size. It is used only in test.py
parser.add_argument('--EG',
default=False,
action='store_true',
help="If present, Execution guided decoding is used in test.")
parser.add_argument('--beam_size',
type=int,
default=4,
help="The size of beam for smart decoding")
parser.add_argument("--subword_type", choices=["none", "char"], default="none",
help="Subword feature type.")
parser.add_argument("--embedding", choices=["bert", "word"], default="bert",
help="Emebdding type.")
parser.add_argument("--encoder", choices=["bert", "lstm", "gru", \
"cnn", "gatedcnn", "attn", \
"rcnn", "crnn", "gpt"], \
default="bert", help="Encoder type.")
parser.add_argument("--config_path", default="./models/bert_base_config.json", help="Path of the config file.")
parser.add_argument("--vocab_path", type=str, default="./models/google_zh_vocab.txt",
help="Path of the vocabulary file.")
parser.add_argument("--mlp_arc_size", type=int, default=400, help="batch size.")
parser.add_argument("--mlp_rel_size", type=int, default=100, help="batch size.")
parser.add_argument("--table_bert_dir", default='/Users/yuchen/Downloads/_models/ptm/0830_ptm_base.bin-20000', type=str, help="table_bert")
parser.add_argument("--data_dir", default='./data/cbank', type=str, help="table_bert")
parser.add_argument("--train_name", default='0901_train_cbank.txt',
type=str, help="table_bert")
parser.add_argument("--dev_name", default='0901_dev_cbank.txt',
type=str, help="table_bert")
parser.add_argument("--test_name", default='0901_test_cbank.txt',
type=str, help="table_bert")
parser.add_argument("--table_name", default='cbank_table.json',
type=str, help="table_bert")
parser.add_argument("--table_words", default='cbank_value_name.csv',
type=str, help="table_bert")
# 1.5 auto train args
parser.add_argument("--bert_path", default='./model/ERNIE', type=str,
help='config path to use (e.g. ./conf/config)')
parser.add_argument("--filename", default='./example/train.zip', type=str)
parser.add_argument("--output_dir", default='model/tableqa/', type=str)
parser.add_argument("--job_id", default='nl2sql001', type=str)
parser.add_argument("--heartbeat_host", default='127.0.0.1', type=str)
parser.add_argument("--heartbeat_port", default=8880, type=int)
args = parser.parse_args()
args.target = "bert"
args.use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.device = device
map_bert_type_abb = {'uS': 'uncased_L-12_H-768_A-12',
'uL': 'uncased_L-24_H-1024_A-16',
'cS': 'cased_L-12_H-768_A-12',
'cL': 'cased_L-24_H-1024_A-16',
'mcS': 'multi_cased_L-12_H-768_A-12'}
args.bert_type = map_bert_type_abb[args.bert_type_abb]
# print(f"BERT-type: {args.bert_type}")
# Decide whether to use lower_case.
if args.bert_type_abb == 'cS' or args.bert_type_abb == 'cL' or args.bert_type_abb == 'mcS':
args.do_lower_case = False
else:
args.do_lower_case = True
# Seeds for random number generation
seed(args.seed)
python_random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed)
# args.toy_model = not torch.cuda.is_available()
args.toy_model = False
args.toy_size = 12
return args | null |
19,340 | import os, sys, argparse, re, json
import time
import torch
import random as python_random
from uer.utils.tokenizer import *
from uer.utils.vocab import Vocab
from sqlova.utils.utils_wikisql import *
from sqlova.model.nl2sql.wikisql_models import *
from tableModel import TableTextPretraining
import comp_sql
import pandas as pd
print(device)
def mkdir(path):
folder = os.path.exists(path)
if not folder:
os.makedirs(path)
print("make new folder ", path) | null |
19,341 | import os, sys, argparse, re, json
import time
import torch
import random as python_random
from uer.utils.tokenizer import *
from uer.utils.vocab import Vocab
from sqlova.utils.utils_wikisql import *
from sqlova.model.nl2sql.wikisql_models import *
from tableModel import TableTextPretraining
import comp_sql
import pandas as pd
def get_opt(args, model, model_bert, fine_tune, total_steps):
if fine_tune:
opt = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
lr=args.lr, weight_decay=0)
opt_bert = torch.optim.Adam(filter(lambda p: p.requires_grad, model_bert.parameters()),
lr=args.lr_bert, weight_decay=0)
else:
opt = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()),
lr=args.lr, weight_decay=0)
opt_bert = None
return opt, opt_bert | null |
19,342 | import os, sys, argparse, re, json
import time
import torch
import random as python_random
from uer.utils.tokenizer import *
from uer.utils.vocab import Vocab
from sqlova.utils.utils_wikisql import *
from sqlova.model.nl2sql.wikisql_models import *
from tableModel import TableTextPretraining
import comp_sql
import pandas as pd
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
def get_table_bert(args):
bert_config = args
args.num_hidden_layers = args.layers_num
args.pretrained_model_path = args.table_bert_dir
uer_tokenizer = BertTokenizer(args)
table_bert_model = TableTextPretraining(args)
if args.use_cuda:
table_bert_model_dict = torch.load(args.pretrained_model_path)
else:
table_bert_model_dict = torch.load(args.pretrained_model_path, map_location='cpu')
table_bert_model_dict = {k: v for k, v in table_bert_model_dict.items() if k in table_bert_model.state_dict()}
table_bert_model.load_state_dict(table_bert_model_dict, strict=False)
# print('model bert:', table_bert_model)
print("Load pre-trained parameters.")
return table_bert_model.pre_encoder, uer_tokenizer, bert_config
def get_models(args, trained=False):
# some constants
#agg_ops = ['', 'MAX', 'MIN', 'COUNT', 'SUM', 'AVG']
#cond_ops = ['=', '>', '<', 'OP'] # do not know why 'OP' required. Hence,
dep_ops = ['null', 'scol', 'agg', 'wcol', 'val', 'op', 'sort_col', 'sort_op', 'sort_value']
agg_ops = ["", "AVG", "MAX", "MIN", "COUNT", "SUM", "COMPARE", "GROUP BY", "SAME"]
cond_ops = [">", "<", "==", "!=", "ASC", "DESC"]
print(f"Batch_size = {args.bS * args.accumulate_gradients}")
print(f"BERT parameters:")
print(f"learning rate: {args.lr_bert}")
print(f"Fine-tune BERT: {args.fine_tune}")
# Get BERT
table_bert, tokenizer, bert_config = get_table_bert(args)
args.iS = bert_config.hidden_size * args.num_target_layers # Seq-to-SQL input vector dimenstion
# Get Seq-to-SQL
n_cond_ops = len(cond_ops)
n_agg_ops = len(agg_ops)
print(f"Seq-to-SQL: the number of final BERT layers to be used: {args.num_target_layers}")
print(f"Seq-to-SQL: the size of hidden dimension = {args.hS}")
print(f"Seq-to-SQL: LSTM encoding layer size = {args.lS}")
print(f"Seq-to-SQL: dropout rate = {args.dr}")
print(f"Seq-to-SQL: learning rate = {args.lr}")
model = Seq2SQL_v1(args.iS, args.hS, args.lS, args.dr, n_cond_ops, n_agg_ops)
model = model.to(device)
if trained:
assert path_model_bert != None
assert path_model != None
print(".......")
print("loading from ", path_model_bert, " and ", path_model, " and ", path_model_amr)
print(".......")
if torch.cuda.is_available():
res = torch.load(path_model_bert)
else:
res = torch.load(path_model_bert, map_location='cpu')
table_bert.load_state_dict(res['model_bert'])
table_bert.to(device)
if torch.cuda.is_available():
res = torch.load(path_model)
else:
res = torch.load(path_model, map_location='cpu')
model.load_state_dict(res['model'])
return model, table_bert, tokenizer, bert_config | null |
19,343 | import os, sys, argparse, re, json
import time
import torch
import random as python_random
from uer.utils.tokenizer import *
from uer.utils.vocab import Vocab
from sqlova.utils.utils_wikisql import *
from sqlova.model.nl2sql.wikisql_models import *
from tableModel import TableTextPretraining
import comp_sql
import pandas as pd
def load_nl2sql_bussiness(path_nl2sql, mode, bussiness_name):
""" Load training sets
"""
sub_dir = mode # here!!!!!!!
path_dir = path_nl2sql + '/' + bussiness_name + '/'
path_data = path_dir + mode + '_' + bussiness_name+'_tok.json'
path_data = path_dir + 'uer_' +mode + '_' + bussiness_name+'_tok.json'
path_table = path_dir + bussiness_name + '_table.json'
#path_table = path_nl2sql + 'alltables.json'
print("path_data: ", path_data)
print("path_table:", path_table)
data = []
table = {}
with open(path_data, encoding='utf-8') as f:
for idx, line in enumerate(f):
#t1 = json.loads(line)
t1 = eval(line)
data.append(t1)
with open(path_table, encoding='utf-8') as f:
for idx, line in enumerate(f):
t1 = json.loads(line.strip())
#t1 = eval(line.strip())
table[t1['tablename']] = t1
return data, table
def get_bussiness_data(path_nl2sql, args):
train_data, train_table = load_nl2sql_bussiness(path_nl2sql, 'train', bussiness_name=args.task)
val_data, val_table = load_nl2sql_bussiness(path_nl2sql, 'test', bussiness_name=args.task)
train_loader, dev_loader = get_loader_wikisql(train_data, val_data, args.bS, shuffle_train=True)
return train_data, train_table, val_data, val_table, train_loader, dev_loader | null |
19,344 | import os, sys, argparse, re, json
import time
import torch
import random as python_random
from uer.utils.tokenizer import *
from uer.utils.vocab import Vocab
from sqlova.utils.utils_wikisql import *
from sqlova.model.nl2sql.wikisql_models import *
from tableModel import TableTextPretraining
import comp_sql
import pandas as pd
print(device)
def get_yewu_single_data(args):
train_data = []
val_data = []
test_data = []
tables = {}
shuffle_train = True
with open(os.path.join(args.data_dir, args.train_name), encoding='utf-8') as f:
for idx, line in enumerate(f):
t1 = json.loads(line)
train_data.append(t1)
with open(os.path.join(args.data_dir, args.dev_name), encoding='utf-8') as f:
for idx, line in enumerate(f):
t1 = json.loads(line)
val_data.append(t1)
with open(os.path.join(args.data_dir, args.test_name), encoding='utf-8') as f:
for idx, line in enumerate(f):
t1 = json.loads(line)
test_data.append(t1)
with open(os.path.join(args.data_dir, args.table_name), encoding='utf-8') as f:
for line in f.readlines():
table = eval(line.strip())
# table = json.dumps(line.strip())
if 'tablename' in table:
tid = table['tablename']
else:
tid = table['id']
table['tablename'] = table['id']
if 'types' in table:
table['col_types'] = table['types']
table['col_types'] = [x.lower() for x in table['col_types']]
table['types'] = [x.lower() for x in table['types']]
else:
table['col_types'] = [x.lower() for x in table['col_types']]
table['types'] = [x.lower() for x in table['col_types']]
if 'header' in table:
table['headers'] = table['header']
if 'headers' in table:
table['header'] = table['headers']
if 'unit' in table:
pass
else:
table['unit'] = ['Null'] * len(table['headers'])
tables[tid] = table
print('lengths:', len(train_data), len(val_data), len(test_data))
train_loader = torch.utils.data.DataLoader(
batch_size=args.bS,
dataset=train_data,
shuffle=shuffle_train,
num_workers=1,
collate_fn=temp_func # now dictionary values are not merged!
)
dev_loader = torch.utils.data.DataLoader(
batch_size=args.bS,
dataset=val_data,
shuffle=shuffle_train,
num_workers=1,
collate_fn=temp_func # now dictionary values are not merged!
)
test_loader = torch.utils.data.DataLoader(
batch_size=args.bS,
dataset=test_data,
shuffle=shuffle_train,
num_workers=1,
collate_fn=temp_func # now dictionary values are not merged!
)
return train_data, val_data, test_data, tables, train_loader, dev_loader, test_loader | null |
19,345 | import os, sys, argparse, re, json
import time
import torch
import random as python_random
from uer.utils.tokenizer import *
from uer.utils.vocab import Vocab
from sqlova.utils.utils_wikisql import *
from sqlova.model.nl2sql.wikisql_models import *
from tableModel import TableTextPretraining
import comp_sql
import pandas as pd
print(device)
def loadTableWords(args):
value_name_path = os.path.join(args.data_dir, args.table_words)
if os.path.exists(value_name_path):
star_words = pd.read_table(value_name_path, header=0)
# print(star_words.head())
rowx, y = star_words.shape
for idx in range(rowx):
star_words.loc[idx]['归一化列值'] = str(star_words.loc[idx]['归一化列值']).replace(' ', '')
else:
star_words = None
return star_words
def train(train_loader, train_table, model, model_bert, opt, bert_config, tokenizer, epoch,
task, max_seq_length, num_target_layers, accumulate_gradients=1, start_time=None, heartbeat_hook=None,
callconfig=None, check_grad=True, st_pos=0, opt_bert=None, path_db=None, dset_name='train'):
if dset_name == 'train':
model.train()
model_bert.train()
else:
model.eval()
model_bert.eval()
amr_loss = 0
ave_loss = 0
slen_loss = 0
sc_loss = 0
scco_loss = 0
sa_loss = 0
wn_loss = 0
wc_loss = 0
wo_loss = 0
wvi_loss = 0
cnt = 0 # count the # of examples
cnt_sc = 0 # count the # of correct predictions of select column
cnt_scco = 0
cnt_sa = 0 # of selectd aggregation
cnt_wn = 0 # of where number
cnt_wc = 0 # of where column
cnt_wo = 0 # of where operator
cnt_wv = 0 # of where-value
cnt_wvi = 0 # of where-value index (on question tokens)
cnt_lx = 0 # of logical form acc
cnt_lx_r = 0
cnt_x = 0 # of execution acc
right_sql_cnt = 0
sql_acc = 0.0
# Engine for SQL querying.
# engine = DBEngine(os.path.join(path_db, f"{dset_name}.db"))
#engine = DBEngine(path_db)
# print(train_table[0])
if dset_name == 'train':
epoch_start_time = time.time()
for iB, t in enumerate(train_loader):
torch.cuda.empty_cache()
if iB % 100 == 0:
print(iB, "/", len(train_loader), "\tUsed time:", time.time() - epoch_start_time, "\tloss:", ave_loss/(iB+0.00001),)
sys.stdout.flush()
cnt += len(t)
if cnt < st_pos:
continue
# Get fields
nlu, nlu_t, sql_i, sql_q, sql_t, tb, hs_t, hds = get_fields(t, train_table, no_hs_t=True, no_sql_t=True)
g_sc, g_sa, g_wn, g_wc, g_wo, g_wv, g_cond_conn_op, g_slen, idxs = get_g(sql_i)
# g_wvi = get_g_wvi_corenlp(t, idxs)
wemb_n, wemb_h, l_n, l_hpu, l_hs = get_wemb_bert(bert_config, model_bert, tokenizer, nlu_t, hds, max_seq_length,
num_out_layers_n=num_target_layers, num_out_layers_h=num_target_layers)
g_wvi_corenlp = get_g_wvi_corenlp(t, idxs)
try:
g_wvi = g_wvi_corenlp
except:
print('????wvi')
# Exception happens when where-condition is not found in nlu_tt.
# In this case, that train example is not used.
# During test, that example considered as wrongly answered.
# e.g. train: 32.
continue
knowledge = []
for k in t:
if "bertindex_knowledge" in k:
knowledge.append(k["bertindex_knowledge"])
else:
knowledge.append(max(l_n)*[0])
knowledge_header = []
for k in t:
if "header_knowledge" in k:
knowledge_header.append(k["header_knowledge"])
else:
knowledge_header.append(max(l_hs) * [0])
# score
s_sc, s_sa, s_wn, s_wc, s_wo, s_wv, s_cco, s_slen = model(wemb_n, l_n, wemb_h, l_hpu, l_hs,
g_sc=g_sc, g_sa=g_sa, g_wn=g_wn, g_wc=g_wc,
g_wvi=g_wvi,
g_cond_conn_op=g_cond_conn_op, g_slen=g_slen,
knowledge = knowledge, knowledge_header = knowledge_header)
loss, loss_slen, loss_sc, loss_scco, loss_sa, loss_wn, loss_wc, loss_wo, loss_wvi = Loss_sw_se(s_sc, s_cco,
s_sa, s_wn,
s_wc, s_wo,
s_wv, s_slen,
g_sc, g_sa,
g_wn, g_wc,
g_wo, g_wvi,
g_cond_conn_op,
g_slen)
loss_all = loss
if dset_name == 'dev':
pass
else:
# Calculate gradient
if iB % accumulate_gradients == 0: # mode
# at start, perform zero_grad
opt.zero_grad()
if opt_bert:
opt_bert.zero_grad()
loss_all.backward()
if accumulate_gradients == 1:
opt.step()
if opt_bert:
opt_bert.step()
elif iB % accumulate_gradients == (accumulate_gradients - 1):
# at the final, take step with accumulated graident
loss_all.backward()
opt.step()
if opt_bert:
opt_bert.step()
else:
# at intermediate stage, just accumulates the gradients
loss_all.backward()
# Prediction
pr_sc, pr_scco, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi, pr_slen = pred_sw_se(s_sc, s_cco, s_sa, s_wn, s_wc,
s_wo, s_wv, s_slen)
pr_wv_str, pr_wv_str_wp = convert_pr_wvi_to_string(pr_wvi, nlu_t, nlu)
# print("agg:", pr_sa, ",cond_conn_op:", pr_scco, ",sel:", pr_sc, ",conds:" ,pr_wc, pr_wo, pr_wv_str)
# print("@@@@@@@@@@@@@@")
pr_sql_i = generate_sql_i(pr_sc, pr_scco, pr_sa, pr_wn, pr_wc, pr_wo, pr_wv_str, nlu, t, train_table)
for k in range(len(sql_i)):
try:
if (np.sort(np.asarray(sql_i[k]['conds']), axis=0) == np.sort(np.asarray(pr_sql_i[k]['conds']),
axis=0)).all() and \
(sql_i[k]['sel'] == np.asarray(pr_sql_i[k]['sel'])).all() and \
(sql_i[k]['agg'] == np.asarray(pr_sql_i[k]['agg'])).all() and \
(sql_i[k]['cond_conn_op'] == pr_sql_i[k]['cond_conn_op']):
cnt_lx_r += 1
else:
pass
except:
cnt_lx_r += 1
if (pr_wc[k] == g_wc[k]) is False:
pass
# Cacluate accuracy
cnt_sc1_list, cnt_scco1_list, cnt_sa1_list, cnt_wn1_list, \
cnt_wc1_list, cnt_wo1_list, \
cnt_wvi1_list, cnt_wv1_list = get_cnt_sw_list(g_sc, g_cond_conn_op, g_sa, g_wn, g_wc, g_wo, g_wvi, g_slen,
pr_sc, pr_scco, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi,
sql_i, pr_sql_i,
mode='train')
cnt_lx1_list = get_cnt_lx_list(cnt_sc1_list, cnt_scco1_list, cnt_sa1_list, cnt_wn1_list, cnt_wc1_list,
cnt_wo1_list, cnt_wv1_list)
# statistics
ave_loss += loss.item()
slen_loss += loss_slen.item()
sc_loss += loss_sc.item()
sa_loss += loss_sa.item()
scco_loss += loss_scco.item()
wn_loss += loss_wn.item()
wc_loss += loss_wc.item()
wo_loss += loss_wo.item()
wvi_loss += loss_wvi.item()
# count
cnt_sc += sum(cnt_sc1_list)
cnt_scco += sum(cnt_scco1_list)
cnt_sa += sum(cnt_sa1_list)
cnt_wn += sum(cnt_wn1_list)
cnt_wc += sum(cnt_wc1_list)
cnt_wo += sum(cnt_wo1_list)
cnt_wvi += sum(cnt_wvi1_list)
cnt_wv += sum(cnt_wv1_list)
cnt_lx += sum(cnt_lx1_list)
# cnt_x += sum(cnt_x1_list)
else:
with torch.no_grad():
table_words = loadTableWords(args)
for iB, t in enumerate(train_loader):
torch.cuda.empty_cache()
cnt += len(t)
if cnt < st_pos:
continue
# nlu, nlu_t, sql_i, tb, hs_t = get_fields_info(t, train_table)
# nlu : natural language utterance
# nlu_t: tokenized nlu
# sql_i: canonical form of SQL query
# sql_q: full SQL query text. Not used.
# sql_t: tokenized SQL query
# tb : table
# hs_t : tokenized headers. Not used.
nlu, nlu_t, sql_i, sql_q, sql_t, tb, hs_t, hds = get_fields(t, train_table, no_hs_t=True, no_sql_t=True)
g_sc, g_sa, g_wn, g_wc, g_wo, g_wv, g_cond_conn_op, g_slen, idxs = get_g(sql_i)
# get ground truth where-value index under CoreNLP tokenization scheme. It's done already on trainset.
# g_wvi = get_g_wvi_corenlp(t, idxs)
# wemb_n, wemb_h, l_n, l_hpu, l_hs = get_wemb_bert(bert_config, model_bert, tokenizer, nlu_t, hs_t, max_seq_length,
# num_out_layers_n=num_target_layers, num_out_layers_h=num_target_layers)
wemb_n, wemb_h, l_n, l_hpu, l_hs = get_wemb_bert(bert_config, model_bert, tokenizer, nlu_t, hds,
max_seq_length,
num_out_layers_n=num_target_layers,
num_out_layers_h=num_target_layers)
g_wvi_corenlp = get_g_wvi_corenlp(t, idxs)
try:
g_wvi = g_wvi_corenlp
except:
# Exception happens when where-condition is not found in nlu_tt.
# In this case, that train example is not used.
# During test, that example considered as wrongly answered.
# e.g. train: 32.
print('wvi???')
continue
knowledge = []
for k in t:
if "bertindex_knowledge" in k:
knowledge.append(k["bertindex_knowledge"])
else:
knowledge.append(max(l_n)*[0])
knowledge_header = []
for k in t:
if "header_knowledge" in k:
knowledge_header.append(k["header_knowledge"])
else:
knowledge_header.append(max(l_hs) * [0])
# score
s_sc, s_sa, s_wn, s_wc, s_wo, s_wv, s_cco, s_slen = model(wemb_n, l_n, wemb_h, l_hpu, l_hs,
g_sc=g_sc, g_sa=g_sa, g_wn=g_wn, g_wc=g_wc,
g_wvi=g_wvi,
g_cond_conn_op=g_cond_conn_op, g_slen=g_slen,
knowledge = knowledge, knowledge_header = knowledge_header)
loss, loss_slen, loss_sc, loss_scco, loss_sa, loss_wn, loss_wc, loss_wo, loss_wvi = Loss_sw_se(s_sc,
s_cco,
s_sa,
s_wn,
s_wc,
s_wo,
s_wv,
s_slen,
g_sc,
g_sa,
g_wn,
g_wc,
g_wo,
g_wvi,
g_cond_conn_op,
g_slen)
# Prediction
pr_sc, pr_scco, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi, pr_slen = pred_sw_se(s_sc, s_cco, s_sa, s_wn, s_wc,
s_wo, s_wv, s_slen)
pr_wv_str, pr_wv_str_wp = convert_pr_wvi_to_string(pr_wvi, nlu_t, nlu)
# print("agg:", pr_sa, ",cond_conn_op:", pr_scco, ",sel:", pr_sc, ",conds:" ,pr_wc, pr_wo, pr_wv_str)
# print("@@@@@@@@@@@@@@")
pr_sql_i = generate_sql_i(pr_sc, pr_scco, pr_sa, pr_wn, pr_wc, pr_wo, pr_wv_str, nlu, t, train_table)
for k in range(len(sql_i)):
cond_com_flag = comp_sql.com_conds(sql_i[k], pr_sql_i[k], tb[k], table_words)
sel_com_flag = comp_sql.com_sels_with_split(g_sc[k], g_sa[k], pr_sql_i[k], tb[k], table_words)
if cond_com_flag and sel_com_flag:
right_sql_cnt += 1
try:
if (np.sort(np.asarray(sql_i[k]['conds']), axis=0) == np.sort(
np.asarray(pr_sql_i[k]['conds']), axis=0)).all() and \
(sql_i[k]['sel'] == np.asarray(pr_sql_i[k]['sel'])).all() and \
(sql_i[k]['agg'] == np.asarray(pr_sql_i[k]['agg'])).all() and \
(sql_i[k]['cond_conn_op'] == pr_sql_i[k]['cond_conn_op']):
cnt_lx_r += 1
else:
pass
except:
cnt_lx_r += 1
if pr_wc[k] == g_wc[k] is False:
pass
# Cacluate accuracy
cnt_sc1_list, cnt_scco1_list, cnt_sa1_list, cnt_wn1_list, \
cnt_wc1_list, cnt_wo1_list, \
cnt_wvi1_list, cnt_wv1_list = get_cnt_sw_list(g_sc, g_cond_conn_op, g_sa, g_wn, g_wc, g_wo, g_wvi,
g_slen,
pr_sc, pr_scco, pr_sa, pr_wn, pr_wc, pr_wo, pr_wvi,
sql_i, pr_sql_i,
mode='train')
cnt_lx1_list = get_cnt_lx_list(cnt_sc1_list, cnt_scco1_list, cnt_sa1_list, cnt_wn1_list, cnt_wc1_list,
cnt_wo1_list, cnt_wv1_list)
# statistics
ave_loss += loss.item()
slen_loss += loss_slen.item()
sc_loss += loss_sc.item()
sa_loss += loss_sa.item()
scco_loss += loss_scco.item()
wn_loss += loss_wn.item()
wc_loss += loss_wc.item()
wo_loss += loss_wo.item()
wvi_loss += loss_wvi.item()
# count
cnt_sc += sum(cnt_sc1_list)
cnt_scco += sum(cnt_scco1_list)
cnt_sa += sum(cnt_sa1_list)
cnt_wn += sum(cnt_wn1_list)
cnt_wc += sum(cnt_wc1_list)
cnt_wo += sum(cnt_wo1_list)
cnt_wvi += sum(cnt_wvi1_list)
cnt_wv += sum(cnt_wv1_list)
cnt_lx += sum(cnt_lx1_list)
# cnt_x += sum(cnt_x1_list)
# break
sql_acc = right_sql_cnt/cnt
print('sql_acc:', right_sql_cnt/cnt)
amr_loss /= cnt
ave_loss /= cnt
slen_loss /= cnt
sc_loss /= cnt
sa_loss /= cnt
scco_loss /= cnt
wn_loss /= cnt
wc_loss /= cnt
wo_loss /= cnt
wvi_loss /= cnt
acc_sc = cnt_sc / cnt
acc_scco = cnt_scco / cnt
acc_sa = cnt_sa / cnt
acc_wn = cnt_wn / cnt
acc_wc = cnt_wc / cnt
acc_wo = cnt_wo / cnt
acc_wvi = cnt_wvi / cnt
acc_wv = cnt_wv / cnt
acc_lx = cnt_lx / cnt
acc_lx_r = cnt_lx_r / cnt
print(
'Epoch {}, slen_loss = {}, sc_loss = {}, sa_loss = {}, scco_loss = {}, wn_loss = {}, wc_loss = {}, wo_loss = {}, wvi_loss = {}'.format(
epoch, slen_loss, sc_loss, sa_loss, scco_loss, wn_loss, wc_loss, wo_loss, wvi_loss))
# print('cnt_lx_r = {}'.format(cnt_lx_r))
# acc_x = cnt_x / cnt
# acc = [ave_loss, acc_sc, acc_sa, acc_wn, acc_wc, acc_wo, acc_wvi, acc_wv, acc_lx, acc_x]
acc = [ave_loss, acc_sc, acc_scco, acc_sa, acc_wn, acc_wc, acc_wo, acc_wvi, acc_wv, acc_lx]
aux_out = 1
return acc, aux_out, sql_acc | null |
19,346 | import os, sys, argparse, re, json
import time
import torch
import random as python_random
from uer.utils.tokenizer import *
from uer.utils.vocab import Vocab
from sqlova.utils.utils_wikisql import *
from sqlova.model.nl2sql.wikisql_models import *
from tableModel import TableTextPretraining
import comp_sql
import pandas as pd
def save_model(args, model, model_amr, model_bert):
state = {'model': model.state_dict()}
torch.save(state, os.path.join(args.output_dir, 'model_best.pt'))
state = {'model_amr': model_amr.model.state_dict()}
torch.save(state, os.path.join(args.output_dir, 'model_amr_best.pt'))
state = {'model_bert': model_bert.state_dict()}
torch.save(state, os.path.join(args.output_dir, 'model_bert_best.pt')) | null |
19,347 | import os, sys, argparse, re, json
import time
import torch
import random as python_random
from uer.utils.tokenizer import *
from uer.utils.vocab import Vocab
from sqlova.utils.utils_wikisql import *
from sqlova.model.nl2sql.wikisql_models import *
from tableModel import TableTextPretraining
import comp_sql
import pandas as pd
print(device)
def print_result(epoch, acc, dname):
ave_loss, acc_sc, acc_scco, acc_sa, acc_wn, acc_wc, acc_wo, acc_wvi, acc_wv, acc_lx = acc
print(f'{dname} results ------------')
print(
f" Epoch: {epoch}, ave loss: {ave_loss}, acc_sc: {acc_sc:.3f}, acc_scco: {acc_scco:.3f}, acc_sa: {acc_sa:.3f}, acc_wn: {acc_wn:.3f}, \
acc_wc: {acc_wc:.3f}, acc_wo: {acc_wo:.3f}, acc_wvi: {acc_wvi:.3f}, acc_wv: {acc_wv:.3f}, acc_lx: {acc_lx:.3f}"
) | null |
19,348 | import os, json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from sqlova.utils.utils import topk_multi_dim
from sqlova.utils.utils_wikisql import *
def Loss_slen(s_slen, g_slen):
loss = F.cross_entropy(s_slen, torch.tensor(g_slen).to(device))
return loss
def Loss_sc_multi(s_wc, g_wc):
# Construct index matrix
bS, max_h_len = s_wc.shape
im = torch.zeros([bS, max_h_len]).to(device)
for b, g_wc1 in enumerate(g_wc):
for g_wc11 in g_wc1:
im[b, g_wc11] = 1.0 / len(g_wc1)
p = F.log_softmax(s_wc, dim=1)
loss = F.kl_div(p, im)
return loss
def Loss_scco(s_cco, g_cond_conn_op):
loss = F.cross_entropy(s_cco, torch.tensor(g_cond_conn_op).to(device))
return loss
def Loss_sa_multi(s_sa, g_sslen, g_sa):
loss = 0
for b, g_sslen1 in enumerate(g_sslen):
if g_sslen1 == 0:
continue
g_sa1 = g_sa[b]
s_sa1 = s_sa[b]
g_sa1 = [int(x) for x in g_sa1]
loss += F.cross_entropy(s_sa1[:g_sslen1], torch.autograd.Variable(torch.tensor(g_sa1).to(device)))
return loss
def Loss_wn(s_wn, g_wn):
loss = F.cross_entropy(s_wn, torch.tensor(g_wn).to(device))
return loss
def Loss_wc(s_wc, g_wc):
# Construct index matrix
# [bS, max_h_len, 4]
bS, max_h_len, dim = s_wc.shape
loss = 0
for b, g_wc1 in enumerate(g_wc):
l = len(g_wc1)
s_wc_l = s_wc[b, :, :l]
loss += F.cross_entropy(s_wc_l.permute(1, 0), torch.autograd.Variable(torch.tensor(g_wc1).to(device)))
return loss
def Loss_wo(s_wo, g_wn, g_wo):
# Construct index matrix
loss = 0
for b, g_wn1 in enumerate(g_wn):
if g_wn1 == 0:
continue
g_wo1 = g_wo[b]
s_wo1 = s_wo[b]
loss += F.cross_entropy(s_wo1[:g_wn1], torch.autograd.Variable(torch.tensor(g_wo1).to(device)))
return loss
def Loss_wv_se(s_wv, g_wn, g_wvi):
"""
s_wv: [bS, 4, mL, 2], 4 stands for maximum # of condition, 2 tands for start & end logits.
g_wvi: [ [1, 3, 2], [4,3] ] (when B=2, wn(b=1) = 3, wn(b=2) = 2).
"""
loss = 0
# g_wvi = torch.tensor(g_wvi).to(device)
for b, g_wvi1 in enumerate(g_wvi):
# for i_wn, g_wvi11 in enumerate(g_wvi1):
g_wn1 = g_wn[b]
if g_wn1 == 0:
continue
g_wvi1 = torch.tensor(g_wvi1).to(device)
g_st1 = g_wvi1[:, 0]
g_ed1 = g_wvi1[:, 1]
# loss from the start position
# print("st_login: ", s_wv[b,:g_wn1,:,0].shape, g_st1)
loss += F.cross_entropy(s_wv[b, :g_wn1, :, 0], g_st1)
# loss from the end position
# print("ed_login: ", s_wv[b,:g_wn1,:,1].shape, g_ed1)
loss += F.cross_entropy(s_wv[b, :g_wn1, :, 1], g_ed1)
return loss
The provided code snippet includes necessary dependencies for implementing the `Loss_sw_se` function. Write a Python function `def Loss_sw_se(s_sc, s_cco, s_sa, s_wn, s_wc, s_wo, s_wv, s_slen, g_sc, g_sa, g_wn, g_wc, g_wo, g_wvi, g_cond_conn_op, g_slen)` to solve the following problem:
:param s_wv: score [ B, n_conds, T, score] :param g_wn: [ B ] :param g_wvi: [B, conds, pnt], e.g. [[[0, 6, 7, 8, 15], [0, 1, 2, 3, 4, 15]], [[0, 1, 2, 3, 16], [0, 7, 8, 9, 16]]] :return:
Here is the function:
def Loss_sw_se(s_sc, s_cco, s_sa, s_wn, s_wc, s_wo, s_wv, s_slen, g_sc, g_sa, g_wn, g_wc, g_wo, g_wvi, g_cond_conn_op,
g_slen):
"""
:param s_wv: score [ B, n_conds, T, score]
:param g_wn: [ B ]
:param g_wvi: [B, conds, pnt], e.g. [[[0, 6, 7, 8, 15], [0, 1, 2, 3, 4, 15]], [[0, 1, 2, 3, 16], [0, 7, 8, 9, 16]]]
:return:
"""
loss = 0
loss += Loss_slen(s_slen, g_slen)
# loss += Loss_sc(s_sc, g_sc)
loss += Loss_sc_multi(s_sc, g_sc)
loss += Loss_scco(s_cco, g_cond_conn_op)
# loss += Loss_sa(s_sa, g_sa)
loss += Loss_sa_multi(s_sa, g_slen, g_sa)
loss += Loss_wn(s_wn, g_wn)
loss += Loss_wc(s_wc, g_wc)
loss += Loss_wo(s_wo, g_wn, g_wo)
loss += Loss_wv_se(s_wv, g_wn, g_wvi)
return loss, Loss_slen(s_slen, g_slen), Loss_sc_multi(s_sc, g_sc), Loss_scco(s_cco, g_cond_conn_op), \
Loss_sa_multi(s_sa, g_slen, g_sa), Loss_wn(s_wn, g_wn), Loss_wc(s_wc, g_wc), Loss_wo(s_wo, g_wn, g_wo), \
Loss_wv_se(s_wv, g_wn, g_wvi) | :param s_wv: score [ B, n_conds, T, score] :param g_wn: [ B ] :param g_wvi: [B, conds, pnt], e.g. [[[0, 6, 7, 8, 15], [0, 1, 2, 3, 4, 15]], [[0, 1, 2, 3, 16], [0, 7, 8, 9, 16]]] :return: |
19,349 | import os, json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
from sqlova.utils.utils import topk_multi_dim
from sqlova.utils.utils_wikisql import *
def Loss_sc(s_sc, g_sc):
loss = F.cross_entropy(s_sc, torch.tensor(g_sc).to(device))
return loss | null |
19,350 | import os, json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
from sqlova.utils.utils import topk_multi_dim
from sqlova.utils.utils_wikisql import *
def Loss_sa(s_sa, g_sa):
loss = F.cross_entropy(s_sa, torch.tensor(g_sa).to(device))
return loss | null |
19,351 | import os, json
from copy import deepcopy
from matplotlib.pylab import *
import torch
import torch.nn as nn
import torch.nn.functional as F
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
from sqlova.utils.utils import topk_multi_dim
from sqlova.utils.utils_wikisql import *
The provided code snippet includes necessary dependencies for implementing the `Loss_s2s` function. Write a Python function `def Loss_s2s(score, g_pnt_idxs)` to solve the following problem:
score = [B, T, max_seq_length]
Here is the function:
def Loss_s2s(score, g_pnt_idxs):
"""
score = [B, T, max_seq_length]
"""
# WHERE string part
loss = 0
for b, g_pnt_idxs1 in enumerate(g_pnt_idxs):
ed = len(g_pnt_idxs1) - 1
score_part = score[b, :ed]
loss += F.cross_entropy(score_part, torch.tensor(g_pnt_idxs1[1:]).to(device)) # +1 shift.
return loss | score = [B, T, max_seq_length] |
19,352 | import os, sys, json
from matplotlib.pylab import *
def get_qas(path_q, tid):
qas = []
with open(path_q, 'r') as f_q:
qnum = -1
for j, q1 in enumerate(f_q):
q1 = json.loads(q1)
tid_q = q1['table_id']
if tid_q != tid:
continue
else:
qnum += 1
# print(tid_q, tid)
qas1 = {}
nlu = q1['question']
sql = q1['sql']
qas1['question'] = nlu
qas1['id'] = f'{tid_q}-{qnum}'
qas1['answers'] = get_squad_style_ans(nlu, sql)
qas1['c_answers'] = sql
qas.append(qas1)
return qas
def get_tbl_context(t1):
context = ''
header_tok = t1['header']
# Here Join scheme can be changed.
header_joined = ' '.join(header_tok)
context += header_joined
return context
def generate_wikisql_bert(path_wikisql, dset_type):
path_q = os.path.join(path_wikisql, f'{dset_type}.jsonl')
path_tbl = os.path.join(path_wikisql, f'{dset_type}.tables.jsonl')
# Generate new json file
with open(path_tbl, 'r') as f_tbl:
wikisql = {'version': "v1.1"}
data = []
data1 = {}
paragraphs = [] # new tbls
for i, t1 in enumerate(f_tbl):
paragraphs1 = {}
t1 = json.loads(t1)
tid = t1['id']
qas = get_qas(path_q, tid)
paragraphs1['qas'] = qas
paragraphs1['tid'] = tid
paragraphs1['context'] = get_tbl_context(t1)
# paragraphs1['context_page_title'] = t1['page_title'] # not always present
paragraphs1['context_headers'] = t1['header']
paragraphs1['context_headers_type'] = t1['types']
paragraphs1['context_contents'] = t1['rows']
paragraphs.append(paragraphs1)
data1['paragraphs'] = paragraphs
data1['title'] = 'wikisql'
data.append(data1)
wikisql['data'] = data
# Save
with open(os.path.join(path_wikisql, f'{dset_type}_bert.json'), 'w', encoding='utf-8') as fnew:
json_str = json.dumps(wikisql, ensure_ascii=False)
json_str += '\n'
fnew.writelines(json_str) | null |
19,353 | import os
from matplotlib.pylab import *
The provided code snippet includes necessary dependencies for implementing the `ensure_dir` function. Write a Python function `def ensure_dir(my_path)` to solve the following problem:
Generate directory if not exists
Here is the function:
def ensure_dir(my_path):
""" Generate directory if not exists
"""
if not os.path.exists(my_path):
os.makedirs(my_path) | Generate directory if not exists |
19,354 | import os
from matplotlib.pylab import *
def topk_multi_dim(tensor, n_topk=1, batch_exist=True):
if batch_exist:
idxs = []
for b, tensor1 in enumerate(tensor):
idxs1 = []
tensor1_1d = tensor1.reshape(-1)
values_1d, idxs_1d = tensor1_1d.topk(k=n_topk)
idxs_list = unravel_index(idxs_1d.cpu().numpy(), tensor1.shape)
# (dim0, dim1, dim2, ...)
# reconstruct
for i_beam in range(n_topk):
idxs11 = []
for idxs_list1 in idxs_list:
idxs11.append(idxs_list1[i_beam])
idxs1.append(idxs11)
idxs.append(idxs1)
else:
tensor1 = tensor
idxs1 = []
tensor1_1d = tensor1.reshape(-1)
values_1d, idxs_1d = tensor1_1d.topk(k=n_topk)
idxs_list = unravel_index(idxs_1d.numpy(), tensor1.shape)
# (dim0, dim1, dim2, ...)
# reconstruct
for i_beam in range(n_topk):
idxs11 = []
for idxs_list1 in idxs_list:
idxs11.append(idxs_list1[i_beam])
idxs1.append(idxs11)
idxs = idxs1
return idxs | null |
19,355 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def load_wikisql_data(path_wikisql, mode='train', toy_model=False, toy_size=10, no_hs_tok=False, aug=False):
""" Load training sets
"""
if aug:
mode = f"aug.{mode}"
print('Augmented data is loaded!')
path_sql = os.path.join(path_wikisql, mode + '_tok.jsonl')
if no_hs_tok:
path_table = os.path.join(path_wikisql, mode + '.tables.jsonl')
else:
path_table = os.path.join(path_wikisql, mode + '_tok.tables.jsonl')
data = []
table = {}
with open(path_sql) as f:
for idx, line in enumerate(f):
if toy_model and idx >= toy_size:
break
t1 = json.loads(line.strip())
data.append(t1)
with open(path_table) as f:
for idx, line in enumerate(f):
if toy_model and idx > toy_size:
break
t1 = json.loads(line.strip())
table[t1['id']] = t1
return data, table
def load_w2i_wemb(path_wikisql, bert=False):
""" Load pre-made subset of TAPI.
"""
if bert:
with open(os.path.join(path_wikisql, 'w2i_bert.json'), 'r') as f_w2i:
w2i = json.load(f_w2i)
wemb = load(os.path.join(path_wikisql, 'wemb_bert.npy'), )
else:
with open(os.path.join(path_wikisql, 'w2i.json'), 'r') as f_w2i:
w2i = json.load(f_w2i)
wemb = load(os.path.join(path_wikisql, 'wemb.npy'), )
return w2i, wemb
def load_wikisql(path_wikisql, toy_model, toy_size, bert=False, no_w2i=False, no_hs_tok=False, aug=False):
# Get data
train_data, train_table = load_wikisql_data(path_wikisql, mode='train', toy_model=toy_model, toy_size=toy_size,
no_hs_tok=no_hs_tok, aug=aug)
dev_data, dev_table = load_wikisql_data(path_wikisql, mode='dev', toy_model=toy_model, toy_size=toy_size,
no_hs_tok=no_hs_tok)
# Get word vector
if no_w2i:
w2i, wemb = None, None
else:
w2i, wemb = load_w2i_wemb(path_wikisql, bert)
return train_data, train_table, dev_data, dev_table, w2i, wemb | null |
19,356 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def load_nl2sql_data(path_nl2sql, mode='train', toy_model=False, toy_size=10, no_hs_tok=False, aug=False, path_sql1=None):
""" Load training sets
"""
if aug:
mode = f"aug.{mode}"
print('Augmented data is loaded!')
sub_dir = mode # here!!!!!!!
if mode == 'train' or mode == 'val':
path_sql = os.path.join(path_nl2sql, sub_dir, mode + '_wvi.json') #######################
else:
path_sql = os.path.join(path_nl2sql, sub_dir, mode + '.json')
# path_sql = os.path.join(path_nl2sql, sub_dir, mode+'_wvi.json')
if path_sql1 is not None:
path_sql = path_sql1
# print(path_sql)
path_table = os.path.join(path_nl2sql, sub_dir, mode + '.tables.json')
data = []
table = {}
with open(path_sql, encoding='utf-8') as f:
for idx, line in enumerate(f):
if toy_model and idx >= toy_size:
break
# print(line)
t1 = json.loads(line)
data.append(t1)
with open(path_table, encoding='utf-8') as f:
for idx, line in enumerate(f):
if toy_model and idx > toy_size:
break
t1 = json.loads(line.strip())
table[t1['id']] = t1
return data, table
def load_w2i_wemb(path_wikisql, bert=False):
""" Load pre-made subset of TAPI.
"""
if bert:
with open(os.path.join(path_wikisql, 'w2i_bert.json'), 'r') as f_w2i:
w2i = json.load(f_w2i)
wemb = load(os.path.join(path_wikisql, 'wemb_bert.npy'), )
else:
with open(os.path.join(path_wikisql, 'w2i.json'), 'r') as f_w2i:
w2i = json.load(f_w2i)
wemb = load(os.path.join(path_wikisql, 'wemb.npy'), )
return w2i, wemb
def load_nl2sql(path_nl2sql, toy_model, toy_size, bert=False, no_w2i=True, no_hs_tok=False, aug=False):
# Get data
train_data, train_table = load_nl2sql_data(path_nl2sql, mode='train', toy_model=toy_model, toy_size=toy_size,
no_hs_tok=no_hs_tok, aug=aug)
############ here!
dev_data, dev_table = load_nl2sql_data(path_nl2sql, mode='val', toy_model=toy_model, toy_size=toy_size,
no_hs_tok=no_hs_tok)
for k, v in train_table.items():
print(v)
break
# Get word vector
if no_w2i:
w2i, wemb = None, None
else:
w2i, wemb = load_w2i_wemb(path_nl2sql, bert)
return train_data, train_table, dev_data, dev_table, w2i, wemb | null |
19,357 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def temp_func(x):
return x
def get_loader_wikisql(data_train, data_dev, bS, shuffle_train=True, shuffle_dev=False):
train_loader = torch.utils.data.DataLoader(
batch_size=bS,
dataset=data_train,
shuffle=shuffle_train,
num_workers=1,
collate_fn=temp_func # now dictionary values are not merged!
)
dev_loader = torch.utils.data.DataLoader(
batch_size=bS,
dataset=data_dev,
shuffle=shuffle_dev,
num_workers=1,
collate_fn=temp_func # now dictionary values are not merged!
)
return train_loader, dev_loader | null |
19,358 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def get_fields_info(t1s, tables, train=True):
nlu, nlu_t, sql_i, tb, hs_t = [], [], [], [], []
for t1 in t1s:
nlu.append(t1['question'])
nlu_t.append(t1['question_tok'])
hs_t.append(t1['header_tok'])
if train:
sql_i.append(t1['sql'])
tb.append(tables[t1['table_id']])
return nlu, nlu_t, sql_i, tb, hs_t | null |
19,359 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def get_fields_1(t1, tables, no_hs_t=False, no_sql_t=True, testt=False):
nlu1 = t1['question']
# nlu_t1 = t1['question']
nlu_t1 = t1['question_tok']
tid1 = t1['table_id']
if not testt:
sql_i1 = t1['sql']
sql_q1 = t1['sql']
else:
sql_i1 = None
sql_q1 = None
if no_sql_t:
sql_t1 = None
else:
sql_t1 = t1['query_tok']
tb1 = tables[tid1]
# print(tid1)
# print(tb1['header'])
if not no_hs_t:
hs_t1 = tb1['header_tok']
else:
hs_t1 = []
hs1 = tb1['header']
return nlu1, nlu_t1, tid1, sql_i1, sql_q1, sql_t1, tb1, hs_t1, hs1
def get_fields(t1s, tables, no_hs_t=False, no_sql_t=False, testt=False):
nlu, nlu_t, tid, sql_i, sql_q, sql_t, tb, hs_t, hs = [], [], [], [], [], [], [], [], []
for t1 in t1s:
if no_hs_t:
nlu1, nlu_t1, tid1, sql_i1, sql_q1, sql_t1, tb1, hs_t1, hs1 = get_fields_1(t1, tables, no_hs_t, no_sql_t,
testt=testt)
else:
nlu1, nlu_t1, tid1, sql_i1, sql_q1, sql_t1, tb1, hs_t1, hs1 = get_fields_1(t1, tables, no_hs_t, no_sql_t,
testt=testt)
nlu.append(nlu1)
nlu_t.append(nlu_t1)
tid.append(tid1)
sql_i.append(sql_i1)
sql_q.append(sql_q1)
sql_t.append(sql_t1)
tb.append(tb1)
hs_t.append(hs_t1)
hs.append(hs1)
return nlu, nlu_t, sql_i, sql_q, sql_t, tb, hs_t, hs | null |
19,360 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def words_to_idx(words, w2i, no_BE=False):
"""
Input: [ ['I', 'am', 'hero'],
['You', 'are 'geneus'] ]
output:
w2i = [ B x max_seq_len, 1]
wemb = [B x max_seq_len, dim]
- Zero-padded when word is not available (teated as <UNK>)
"""
bS = len(words)
l = torch.zeros(bS, dtype=torch.long).to(device) # length of the seq. of words.
w2i_l_list = [] # shall be replaced to arr
# wemb_NLq_batch = []
for i, words1 in enumerate(words):
w2i_l1, l1 = word_to_idx1(words1, w2i, no_BE)
w2i_l_list.append(w2i_l1)
l[i] = l1
# Prepare tensor of wemb
# overwrite w2i_l
w2i_l = torch.zeros([bS, int(max(l))], dtype=torch.long).to(device)
for b in range(bS):
w2i_l[b, :l[b]] = torch.LongTensor(w2i_l_list[b]).to(device)
return w2i_l, l
The provided code snippet includes necessary dependencies for implementing the `hs_to_idx` function. Write a Python function `def hs_to_idx(hs_t, w2i, no_BE=False)` to solve the following problem:
Zero-padded when word is not available (teated as <UNK>) Treat each "header tokens" as if they are NL-utterance tokens.
Here is the function:
def hs_to_idx(hs_t, w2i, no_BE=False):
""" Zero-padded when word is not available (teated as <UNK>)
Treat each "header tokens" as if they are NL-utterance tokens.
"""
bS = len(hs_t) # now, B = B_NLq
hpu_t = [] # header pseudo-utterance
l_hs = []
for hs_t1 in hs_t:
hpu_t += hs_t1
l_hs1 = len(hs_t1)
l_hs.append(l_hs1)
w2i_hpu, l_hpu = words_to_idx(hpu_t, w2i, no_BE=no_BE)
return w2i_hpu, l_hpu, l_hs | Zero-padded when word is not available (teated as <UNK>) Treat each "header tokens" as if they are NL-utterance tokens. |
19,361 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def encode(lstm, wemb_l, l, return_hidden=False, hc0=None, last_only=False, U=None, V=None, ctx=None, l_hs=None):
""" [batch_size, max token length, dim_emb]
"""
bS, mL, eS = wemb_l.shape
# sort before packking
l = array(l)
perm_idx = argsort(-l)
perm_idx_inv = generate_perm_inv(perm_idx)
# pack sequence
packed_wemb_l = nn.utils.rnn.pack_padded_sequence(wemb_l[perm_idx, :, :],
l[perm_idx],
batch_first=True)
# Time to encode
if hc0 is not None:
hc0 = (hc0[0][:, perm_idx], hc0[1][:, perm_idx])
# ipdb.set_trace()
packed_wemb_l = packed_wemb_l.float() # I don't know why..
packed_wenc, hc_out = lstm(packed_wemb_l, hc0)
hout, cout = hc_out
# unpack
wenc, _l = nn.utils.rnn.pad_packed_sequence(packed_wenc, batch_first=True)
if last_only:
if ctx is None:
# Take only final outputs for each columns.
wenc = wenc[tuple(range(bS)), l[perm_idx] - 1] # [batch_size, dim_emb]
wenc.unsqueeze_(1) # [batch_size, 1, dim_emb]
else:
ctx = ctx.unsqueeze(1)
# [batch_size, 1, dim_emb] -> [batch_size, 1, hS]
wenc_u = U(ctx)
# [batch_size, seq_len, dim_emb] -> [batch_size, seq_len, hS]
wenc_v = V(wenc)
start = 0
# [batch_size, 1, dim_emb]
wenc2 = torch.zeros(wenc.shape[0], 1, wenc.shape[2])
for b in range(ctx.shape[0]):
# [1, hS] * [batch_size, seq_len, hS] -> [batch_size, seq_len, hS]
attn = torch.mul(wenc_u[b], wenc_v[start:start + l_hs[b]])
# attn, _ = nn.utils.rnn.pad_packed_sequence(attn, batch_first=True)
# [batch_size, seq_len]
attn = F.softmax(attn.sum(2), dim=1)
wenc1 = torch.bmm(attn.unsqueeze(1), wenc[start:start + l_hs[b]])
wenc1 += ctx[b]
wenc2[start:start + l_hs[b]] = wenc1
start += l_hs[b]
wenc = wenc2
wenc = wenc[perm_idx_inv]
if return_hidden:
# hout.shape = [number_of_directoin * num_of_layer, seq_len(=batch size), dim * number_of_direction ] w/ batch_first.. w/o batch_first? I need to see.
hout = hout[:, perm_idx_inv].to(device)
cout = cout[:, perm_idx_inv].to(device) # Is this correct operation?
return wenc, hout, cout
else:
return wenc
def encode_hpu(lstm, wemb_hpu, l_hpu, l_hs, U=None, V=None, ctx=None):
# print('weemb before lstm:', wemb_hpu.shape)
wenc_hpu, hout, cout = encode(lstm,
wemb_hpu,
l_hpu,
return_hidden=True,
hc0=None,
last_only=True,
U=U,
V=V,
ctx=ctx,
l_hs=l_hs)
# print("wenc_hpu:", wenc_hpu.shape)
wenc_hpu = wenc_hpu.squeeze(1)
bS_hpu, mL_hpu, eS = wemb_hpu.shape
hS = wenc_hpu.size(-1)
# print('l hs:', l_hs)
wenc_hs = wenc_hpu.new_zeros(len(l_hs), max(l_hs), hS)
wenc_hs = wenc_hs.to(device)
# print('wenc hs:', wenc_hs.shape)
# Re-pack according to batch.
# ret = [B_NLq, max_len_headers_all, dim_lstm]
st = 0
for i, l_hs1 in enumerate(l_hs):
wenc_hs[i, :l_hs1] = wenc_hpu[st:(st + l_hs1)]
st += l_hs1
return wenc_hs | null |
19,362 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def get_wc1(conds):
"""
[ [wc, wo, wv],
[wc, wo, wv], ...
]
"""
wc1 = []
for cond in conds:
wc1.append(int(cond[0]))
return wc1
def get_wo1(conds):
"""
[ [wc, wo, wv],
[wc, wo, wv], ...
]
"""
wo1 = []
for cond in conds:
wo1.append(int(cond[1]))
return wo1
def get_wv1(conds):
"""
[ [wc, wo, wv],
[wc, wo, wv], ...
]
"""
wv1 = []
for cond in conds:
wv1.append(str(cond[2]))
return wv1
The provided code snippet includes necessary dependencies for implementing the `get_g` function. Write a Python function `def get_g(sql_i)` to solve the following problem:
for backward compatibility, separated with get_g
Here is the function:
def get_g(sql_i):
""" for backward compatibility, separated with get_g"""
g_sc = []
g_sa = []
g_wn = []
g_wc = []
g_wo = []
g_wv = []
g_slen = []
# tiaojian lianjie and/or
g_cond_conn_op = []
idxs = []
for b, psql_i1 in enumerate(sql_i):
# print('sqlooiiiii:', psql_i1)
# 暂不考虑选两个列和选两个聚合函数的情况
# here!!!!!!!!!!!!!
# g_sc.append(psql_i1["sel"][0])
# g_sa.append(psql_i1["agg"][0])
psql_i1["sel"] = np.asarray(psql_i1["sel"])
idx = np.argsort(psql_i1["sel"])
g_sc.append(list(psql_i1["sel"][idx]))
g_sa.append(list(np.asarray(psql_i1["agg"])[idx]))
g_slen.append(len(psql_i1["sel"]))
psql_i1["sel"] = np.sort(psql_i1["sel"])
psql_i1["agg"] = np.sort(psql_i1["agg"])
assert len(psql_i1["sel"]) == len(psql_i1["agg"])
g_cond_conn_op.append(psql_i1["cond_conn_op"])
conds = np.asarray(psql_i1['conds'])
conds_num = [int(x) for x in conds[:, 0]]
idx = np.argsort(conds_num)
idxs.append(idx)
psql_i1['conds'] = conds[idx]
if not len(psql_i1["agg"]) < 0:
g_wn.append(len(conds))
g_wc.append(get_wc1(list(conds[idx])))
g_wo.append(get_wo1(list(conds[idx])))
g_wv.append(get_wv1(list(conds[idx])))
else:
raise EnvironmentError
return g_sc, g_sa, g_wn, g_wc, g_wo, g_wv, g_cond_conn_op, g_slen, idxs | for backward compatibility, separated with get_g |
19,363 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def get_g_wvi_corenlp(t, idxs):
g_wvi = []
for b, t1 in enumerate(t):
g_wvi1 = []
for g_wvi_corenlp11 in list(np.asarray(t1['wvi_corenlp'])[idxs[b]]):
st_idx, ed_idx = g_wvi_corenlp11
g_wvi11 = [st_idx, ed_idx]
g_wvi1.append(g_wvi11)
g_wvi.append(g_wvi1)
return g_wvi | null |
19,364 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def generate_w2i_wemb_table(tables, wv, idx_w2i, n_total, w2i, wemb):
""" Generate subset of GloVe
update_w2i_wemb. It uses wv, w2i, wemb, idx_w2i as global variables.
To do
1. What should we do with the numeric?
"""
# word_set from NL query
for table_id, table_contents in tables.items():
# NLq = t1['question']
# word_tokens = NLq.rstrip().replace('?', '').split(' ')
headers = table_contents['header_tok'] # [ ['state/terriotry'], ['current', 'slogan'], [],
for header_tokens in headers:
for token in header_tokens:
idx_w2i, n_total = update_w2i_wemb(token, wv, idx_w2i, n_total, w2i, wemb)
# WikiSQL generaets unbelivable query... using state/territory in the NLq. Unnatural.. but as is
# when there is slash, unlike original SQLNet which treats them as single token, we use
# both tokens. e.g. 'state/terriotry' -> 'state'
# token_spl = token.split('/')
# for token_spl1 in token_spl:
# idx_w2i, n_total = update_w2i_wemb(token_spl1, wv, idx_w2i, n_total, w2i, wemb)
return idx_w2i, n_total
def generate_w2i_wemb(train_data, wv, idx_w2i, n_total, w2i, wemb):
""" Generate subset of GloVe
update_w2i_wemb. It uses wv, w2i, wemb, idx_w2i as global variables.
To do
1. What should we do with the numeric?
"""
# word_set from NL query
for i, t1 in enumerate(train_data):
# NLq = t1['question']
# word_tokens = NLq.rstrip().replace('?', '').split(' ')
word_tokens = t1['question_tok']
# Currently, TAPI does not use "?". So, it is removed.
for word in word_tokens:
idx_w2i, n_total = update_w2i_wemb(word, wv, idx_w2i, n_total, w2i, wemb)
n_total += 1
return idx_w2i, n_total
def make_w2i_wemb(args, path_save_w2i_wemb, wv, data_train, data_dev, data_test, table_train, table_dev, table_test):
w2i = {'<UNK>': 0, '<BEG>': 1, '<END>': 2} # to use it when embeds NL query.
idx_w2i = 2
n_total = 3
wemb = [np.zeros(300, dtype=np.float32) for _ in range(3)] # 128 is of TAPI vector.
idx_w2i, n_total = generate_w2i_wemb(data_train, wv, idx_w2i, n_total, w2i, wemb)
idx_w2i, n_total = generate_w2i_wemb_table(table_train, wv, idx_w2i, n_total, w2i, wemb)
idx_w2i, n_total = generate_w2i_wemb(data_dev, wv, idx_w2i, n_total, w2i, wemb)
idx_w2i, n_total = generate_w2i_wemb_table(table_dev, wv, idx_w2i, n_total, w2i, wemb)
idx_w2i, n_total = generate_w2i_wemb(data_test, wv, idx_w2i, n_total, w2i, wemb)
idx_w2i, n_total = generate_w2i_wemb_table(table_test, wv, idx_w2i, n_total, w2i, wemb)
path_w2i = os.path.join(path_save_w2i_wemb, 'w2i.json')
path_wemb = os.path.join(path_save_w2i_wemb, 'wemb.npy')
wemb = np.stack(wemb, axis=0)
with open(path_w2i, 'w') as f_w2i:
json.dump(w2i, f_w2i)
np.save(path_wemb, wemb)
return w2i, wemb | null |
19,365 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def update_w2i_wemb(word, wv, idx_w2i, n_total, w2i, wemb):
""" Follow same approach from SQLNet author's code.
Used inside of generaet_w2i_wemb.
"""
# global idx_w2i, w2i, wemb # idx, word2vec, word to idx dictionary, list of embedding vec, n_total: total number of words
if (word in wv) and (word not in w2i):
idx_w2i += 1
w2i[word] = idx_w2i
wemb.append(wv[word])
n_total += 1
return idx_w2i, n_total
The provided code snippet includes necessary dependencies for implementing the `generate_w2i_wemb_e2k_headers` function. Write a Python function `def generate_w2i_wemb_e2k_headers(e2k_dicts, wv, idx_w2i, n_total, w2i, wemb)` to solve the following problem:
Generate subset of TAPI from english-to-korean dict of table headers etc.. update_w2i_wemb. It uses wv, w2i, wemb, idx_w2i as global variables. To do 1. What should we do with the numeric? Current version do not treat them specially. But this would be modified later so that we can use tags.
Here is the function:
def generate_w2i_wemb_e2k_headers(e2k_dicts, wv, idx_w2i, n_total, w2i, wemb):
""" Generate subset of TAPI from english-to-korean dict of table headers etc..
update_w2i_wemb. It uses wv, w2i, wemb, idx_w2i as global variables.
To do
1. What should we do with the numeric?
Current version do not treat them specially. But this would be modified later so that we can use tags.
"""
# word_set from NL query
for table_name, e2k_dict in e2k_dicts.items():
word_tokens_list = list(e2k_dict.values())
# Currently, TAPI does not use "?". So, it is removed.
for word_tokens in word_tokens_list:
for word in word_tokens:
idx_w2i, n_total = update_w2i_wemb(word, wv, idx_w2i, n_total, w2i, wemb)
n_total += 1
return idx_w2i, n_total | Generate subset of TAPI from english-to-korean dict of table headers etc.. update_w2i_wemb. It uses wv, w2i, wemb, idx_w2i as global variables. To do 1. What should we do with the numeric? Current version do not treat them specially. But this would be modified later so that we can use tags. |
19,366 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def tokenize_nlu1(tokenizer, nlu1):
nlu1_tok = tokenizer.tokenize(nlu1)
return nlu1_tok | null |
19,367 | import os, json
import random as rd
from copy import deepcopy
import difflib
import re
import torch
import torchvision.datasets as dsets
import torch.nn as nn
import torch.nn.functional as F
from matplotlib.pylab import *
from torch.autograd import Variable
from .utils import generate_perm_inv
from .utils import json_default_type_checker
def tokenize_hds1(tokenizer, hds1):
hds_all_tok = []
for hds11 in hds1:
sub_tok = tokenizer.tokenize(hds11)
hds_all_tok.append(sub_tok) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.